diff --git a/agent/Dockerfile b/agent/Dockerfile new file mode 100644 index 0000000..7c68855 --- /dev/null +++ b/agent/Dockerfile @@ -0,0 +1,36 @@ +# Dockerfile for cicd +FROM harbor.inspur.local/system/alpine:3.17.2-yq +MAINTAINER yinchongbing yinchongbing@inspur.com +WORKDIR /app +ADD cmd/ /app/ +ADD config/config.yaml /app/config/ +ADD deploy/docker/run.sh /app/run.sh + +ENV APP_ENV local +ENV APP_DEBUG true +ENV APP_LANGUAGE zh_CN +ENV SERVER_HOST 0.0.0.0 +ENV SERVER_PORT 8899 +ENV POSTGRES_DATA_ENABLE false +ENV POSTGRES_DATA_HOST postgres +ENV POSTGRES_DATA_PORT 5432 +ENV POSTGRES_DATA_DATABASE postgres +ENV POSTGRES_DATA_USERNAME postgres +ENV POSTGRES_DATA_PASSWORD Mypostgres!23 +ENV POSTGRES_AUTH_ENABLE false +ENV POSTGRES_AUTH_HOST postgres +ENV POSTGRES_AUTH_PORT 5432 +ENV POSTGRES_AUTH_DATABASE auth +ENV POSTGRES_AUTH_USERNAME postgres +ENV POSTGRES_AUTH_PASSWORD Mypostgres!23 +ENV LOG_LEVEL 4 +ENV REDIS_EBABLE false +ENV REDIS_HOST redis +ENV REDIS_PORT 6379 +ENV REDIS_DATABASE 0 +ENV REDIS_PASSWORD "" + +RUN chmod +x run.sh && chmod +x cfn-schedule-agent +EXPOSE $SERVER_PORT + +CMD ["./run.sh"] \ No newline at end of file diff --git a/agent/README.md b/agent/README.md new file mode 100644 index 0000000..fd9fc8f --- /dev/null +++ b/agent/README.md @@ -0,0 +1,151 @@ +# go project种子项目 +本项目基于 gin 框架为核心的一个脚手架搭建而成,可以基于本项目快速完成业务开发,开箱📦 即用。 + +HTTP 运行 +拉取代码后在项目根目录执行如下命令: + +# 本地有 go 环境,版本 >= 1.18.1 +# 建议开启GO111MODULE +# go env -w GO111MODULE=on + +# 配置Go Mod引用私有库 +go env -w GOPRIVATE=git.inspur.com +go env -w GOINSECURE=git.inspur.com +go env -w GOPROXY="http://nexus.inspur.local/repository/go-public/,direct" + +# 依赖管理 +go mod tidy +# 依赖库下载 +go mod vendor + +# 首次运行会自动复制一份示例配置(config/config.example.yaml)文件到config目录(config/config.yaml) +go run main.go + +# 项目起来后执行下面命令访问示例路由 +curl "http://127.0.0.1:8899/api/v1/hello-world" +# {"code":0,"message":"OK","data":{"result":"hello gin-layout"},"cost":"6.151µs"} +curl "http://127.0.0.1:8899/api/v1/hello-world?name=world" +# {"code":0,"message":"OK","data":{"result":"hello world"},"cost":"6.87µs"} +部署 +# 打包项目(如何打包其他os平台的包自行 google) + CGO_ENABLED=0:禁用CGO + GOOS=linux:编译后的操作系统是Linux + GOARCH=amd64:编译后的CPU架构是amd64 + -a:强制编译所有package + -installsuffix cgo:指定额外的后缀 + -o cfn-schedule-agent:将编译后的程序存储为cfn-schedule-agent + .:编译当前路径下所有代码 +## linux平台上编译命令: +### linux: + CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -a -installsuffix cgo -o cfn-schedule-agent . +### windows: + CGO_ENABLED=1 GOOS=windows GOARCH=amd64 go build -a -installsuffix cgo -o cfn-schedule-agent . + +## windows平台上编译命令: +### linux: + set GOOS=linux + set GOARCH=amd64 + set CGO_ENABLED=0 + go env -w GOARCH=amd64 + go env -w GOOS=linux + go env -w CGO_ENABLED=0 + + go build -a -installsuffix cgo -o cfn-schedule-agent . +### windows: + set GOOS=windows + set GOARCH=amd64 + set CGO_ENABLED=0 + go env -w GOARCH=amd64 + go env -w GOOS=windows + go env -w CGO_ENABLED=0 + + go build -a -installsuffix cgo -o cfn-schedule-agent . + + +# 运行时请配置指定config文件的位置,否则可能会出现找不到配置的情况,修改完配置请重启 +cmd/main.go -c="指定配置文件位置(/home/config.yaml)" + +# nginx 反向代理配置示例 +server { +listen 80; +server_name api.xxx.com; +location / { +proxy_set_header Host $host; +proxy_pass http://172.0.0.1:8899; +} +} +目录结构 +. +|——.gitignore +|——go.mod +|——go.sum +|——main.go // 项目入口 main 包 +|——LICENSE +|——README.md +|——doc // 设计和用户文档 +|——db // 数据库脚本 +|——hack // 用于执行各种构建,安装,分析等操作的脚本。 +|——boot // 项目初始化目录 +| └──boot.go +|——config // 这里通常维护一些本地调试用的样例配置文件 +| └──autoload // 配置文件的结构体定义包 +| └──app.go +| └──logger.go +| └──mysql.go +| └──redis.go +| └──server.go +| └──config.example.ini // .ini 配置示例文件 +| └──config.example.yaml // .yaml 配置示例文件 +| └──config.go // 配置初始化文件 +|——data // 数据初始化目录 +| └──data.go +| └──mysql.go +| └──redis.go +|——internal // 该服务所有不对外暴露的代码,通常的业务逻辑都在这下面,使用internal避免错误引用 +| └──controller // 控制器代码 +| └──v1 +| └──auth.go // 完整流程演示代码,包含数据库表的操作 +| └──helloword.go // 基础演示代码 +| └──base.go +| └──middleware // 中间件目录 +| └──cors.go +| └──logger.go +| └──recovery.go +| └──requestCost.go +| └──model // 业务数据访问 +| └──admin_users.go +| └──base.go +| └──pkg // 内部使用包 +| └──errors // 错误定义 +| └──code.go +| └──en-us.go +| └──zh-cn.go +| └──logger // 日志处理 +| └──logger.go +| └──response // 统一响应输出 +| └──response.go +| └──routers // 路由定义 +| └──apiRouter.go +| └──router.go +| └──service // 业务逻辑 +| └──auth.go +| └──validator // 请求参数验证器 +| └──form // 表单参数定义 +| └──auth.go +| └──validator.go +|——pkg // 可以被外部使用的包 +| └──convert // 数据类型转换 +| └──convert.go +| └──utils // 帮助函数 +| └──utils.go +生产环境注意事项 +在构建生产环境时,请配置好 .yaml 文件中基础路径 base_path,所有的日志记录文件会保存在该目录下的 {base_path}/gin-layout/logs/ 里面,该基础路径默认为执行命令的目录 + +其他说明 +项目中使用到的包 +核心:gin +配置:gopkg.in/yaml.v3、gopkg.in/ini.v1 +参数验证:github.com/go-playground/validator/v10 +日志:go.uber.org/zap、github.com/natefinch/lumberjack、github.com/lestrrat-go/file-rotatelogs +数据库:gorm.io/gorm、go-redis/v8 +还有其他不一一列举,更多请查看go.mod文件 \ No newline at end of file diff --git a/agent/boot/boot.go b/agent/boot/boot.go new file mode 100644 index 0000000..3b9d0e3 --- /dev/null +++ b/agent/boot/boot.go @@ -0,0 +1,53 @@ +package boot + +import ( + "flag" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/config/agent" + "os" + + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/config" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/data" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/internal/validator" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/log" + processManager "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/process-manager" +) + +var ( + configPath string + printVersion bool + Run string + ProcessUnitDir string + AssetConfigDir string +) + +func init() { + flag.StringVar(&Run, "r", "http", "执行命令默认运行http服务") + flag.StringVar(&configPath, "c", "", "请输入配置文件绝对路径") + flag.StringVar(&ProcessUnitDir, "process-unit-dir", "", "纳管进程配置文件目录绝对路径") + flag.StringVar(&AssetConfigDir, "asset-config-dir", "", "资产配置文件目录绝对路径") + + flag.BoolVar(&printVersion, "version", false, "查看版本") + flag.Parse() + + if printVersion { + // 打印版本号 + println(version) + os.Exit(0) + } + + // 1、初始化配置 + config.InitConfig(configPath) + agent.InitAgentConfig(AssetConfigDir) + + // 2、初始化zap日志 + log.InitLogger() + + // 3、初始化数据库 + data.InitData() + + // 4、初始化验证器 + validator.InitValidatorTrans("zh") + + // 5、启动进程管理程序 + go processManager.Start(ProcessUnitDir) +} diff --git a/agent/boot/version.go b/agent/boot/version.go new file mode 100644 index 0000000..9efc093 --- /dev/null +++ b/agent/boot/version.go @@ -0,0 +1,4 @@ +package boot + +// version is the current gin-layout version. +const version = "0.2.3" diff --git a/agent/cfn-schedule-agent b/agent/cfn-schedule-agent new file mode 100644 index 0000000..51e613b Binary files /dev/null and b/agent/cfn-schedule-agent differ diff --git a/agent/cmd/.gitignore b/agent/cmd/.gitignore new file mode 100644 index 0000000..c96a04f --- /dev/null +++ b/agent/cmd/.gitignore @@ -0,0 +1,2 @@ +* +!.gitignore \ No newline at end of file diff --git a/agent/config/agent/agent.go b/agent/config/agent/agent.go new file mode 100644 index 0000000..1b9129e --- /dev/null +++ b/agent/config/agent/agent.go @@ -0,0 +1,63 @@ +package agent + +import ( + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/config/bin_path" + "gopkg.in/yaml.v3" + "io/fs" + "os" +) + +type AgentConfig struct { + RID string `yaml:"rid" mapstructure:"rid"` + Token string `yaml:"token" mapstructure:"token"` + //AgentVersion string `yaml:"agent_version" mapstructure:"agent_version"` + Role string `yaml:"role" mapstructure:"role"` + NetInterface string `yaml:"net_interface" mapstructure:"net_interface"` + + ScheduleServer string `yaml:"schedule_server" mapstructure:"schedule_server"` + + Enterprise string `yaml:"enterprise" mapstructure:"enterprise"` + Type string `yaml:"type"` // 0:应用一体机 1:调度一体机 2:纳管一体机 +} + +var Agent = &AgentConfig{} + +func InitAgentConfig(assetConfigDir string) { + if assetConfigDir != "" { + bin_path.ASSETCONFIGDIR = assetConfigDir + } + + file, err := os.ReadFile(bin_path.ASSETCONFIGDIR + "/agent.yaml") + if err != nil { + panic("Failed to read configuration file:" + err.Error()) + } + + var config = &AgentConfig{} + yaml.Unmarshal(file, config) + + Agent = config + + //放入环境变量 + os.Setenv("CFN_SCHEDULE_AGENT_RID", config.RID) + os.Setenv("CFN_TELEGRAF_LOG_FILE", bin_path.TELEGRAFLOG) + + // 如果 path 路径不存在,会有 err,然后通过 IsNotExist 判定文件路径是否存在,如果 true 则不存在,注意用 os.ModePerm 这样文件是可以写入的 + if _, err := os.Stat(bin_path.TELEGRAFLOG); os.IsNotExist(err) { + // mkdir 创建目录,mkdirAll 可创建多层级目录 + os.MkdirAll(bin_path.TELEGRAFLOG, os.ModePerm) + } + +} + +func LoadAgentInfo(config AgentConfig) { + // 更新内存 + Agent = &config + + go func() { + out, _ := yaml.Marshal(config) + err := os.WriteFile(bin_path.ASSETCONFIGDIR+"/agent.yaml", out, fs.ModePerm) + if err != nil { + panic("Failed to write configuration file:" + err.Error()) + } + }() +} diff --git a/agent/config/agent/agent.yaml b/agent/config/agent/agent.yaml new file mode 100644 index 0000000..bfb344e --- /dev/null +++ b/agent/config/agent/agent.yaml @@ -0,0 +1,7 @@ +rid: "DZ210900214" +token: "afsdfasfasdccc " +role: "Cache" # 枚举:Cache or Normal +net_interface: "eth0" # 示例:eth0 for linux 以太网 4 for windows +schedule_server: "localhost" +enterprise: "SDLC" +type: 1 # 1:应用一体机 2:调度一体机 3:纳管一体机 \ No newline at end of file diff --git a/agent/config/app.go b/agent/config/app.go new file mode 100644 index 0000000..01f277f --- /dev/null +++ b/agent/config/app.go @@ -0,0 +1,26 @@ +package config + +import ( + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/convert" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/utils" +) + +type AppConfig struct { + AppEnv string `ini:"app_env" yaml:"app_env" mapstructure:"app_env"` + Debug bool `ini:"debug" yaml:"debug" mapstructure:"debug"` + Language string `ini:"language" yaml:"language" mapstructure:"language"` + StaticBasePath string `ini:"base_path" yaml:"base_path" mapstructure:"base_path"` +} + +var App = AppConfig{ + AppEnv: "local", + Debug: true, + Language: "zh_CN", + StaticBasePath: getDefaultPath(), +} + +func getDefaultPath() (path string) { + path, _ = utils.GetDefaultPath() + path = convert.GetString(utils.If(path != "", path, "/tmp")) + return +} diff --git a/agent/config/bin_path/bin_path.go b/agent/config/bin_path/bin_path.go new file mode 100644 index 0000000..5031e9f --- /dev/null +++ b/agent/config/bin_path/bin_path.go @@ -0,0 +1,28 @@ +package bin_path + +import ( + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/config" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/utils" + "path/filepath" +) + +var ( + BINDIR = "/root/cfn-schedule-agent/bin" + UPGRADEDIR = "/root/cfn-schedule-agent/upgrade" + ASSETCONFIGDIR = "" + AGENTLOG = "" + AGENTUPGRADELOG = "" + TELEGRAFLOG = "" + UPGRADEPLAN = "" +) + +func init() { + BINDIR, _ = utils.GetCurrentPath() + UPGRADEDIR = filepath.Join(BINDIR, "../upgrade") + AGENTUPGRADELOG = filepath.Join(UPGRADEDIR, "/upgrade.log") + TELEGRAFLOG = filepath.Join(BINDIR, "/data-collector/telegraf/log/", "telegraf.log") + AGENTLOG = filepath.Join(BINDIR, "/logs/", config.Config.Logger.Filename) + + UPGRADEPLAN = filepath.Join(UPGRADEDIR, "/upgradeplan") + +} diff --git a/agent/config/bin_path/bin_path_linux.go b/agent/config/bin_path/bin_path_linux.go new file mode 100644 index 0000000..7dc6eca --- /dev/null +++ b/agent/config/bin_path/bin_path_linux.go @@ -0,0 +1,5 @@ +package bin_path + +func init() { + ASSETCONFIGDIR = "/etc/cfn-schedule-agent" +} diff --git a/agent/config/bin_path/bing_path_windows.go b/agent/config/bin_path/bing_path_windows.go new file mode 100644 index 0000000..eb4215c --- /dev/null +++ b/agent/config/bin_path/bing_path_windows.go @@ -0,0 +1,5 @@ +package bin_path + +func init() { + ASSETCONFIGDIR = "C:\\cfn-schedule-agent" +} diff --git a/agent/config/config.go b/agent/config/config.go new file mode 100644 index 0000000..9d35f4f --- /dev/null +++ b/agent/config/config.go @@ -0,0 +1,111 @@ +package config + +import ( + "os" + "path/filepath" + "sync" + + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/utils" + "github.com/spf13/viper" +) + +// Conf 配置项主结构体 +// mapstructure(配置文件属性名称无法与类属性名称直接对应) +type Conf struct { + AppConfig `yaml:"app" mapstructure:"app"` + Server ServerConfig `yaml:"server" mapstructure:"server"` + PostgreSql PostgreSqlConfigMap `yaml:"postgres" mapstructure:"postgres"` + Redis RedisConfig `yaml:"redis" mapstructure:"redis"` + Nats NatsConfig `yaml:"nats_config" mapstructure:"nats_config"` + //Minio MinioConfig `yaml:"minio" mapstructure:"minio"` + Logger LoggerConfig `yaml:"logger" mapstructure:"logger"` + Schedule ScheduleServer `yaml:"schedule" mapstructure:"schedule"` + AgentVersion string `yaml:"agent_version" mapstructure:"agent_version"` + ZombieCleaner bool `yaml:"zombie_cleaner" mapstructure:"zombie_cleaner"` +} + +var Config = &Conf{ + AppConfig: App, + Server: Server, + PostgreSql: PostgreSql, + Redis: Redis, + Nats: Nats, + //Minio: Minio, + Logger: Logger, + Schedule: Schedule, +} + +var once sync.Once + +func InitConfig(configPath string) { + once.Do(func() { + // 加载 .yaml 配置 + loadYaml(configPath) + }) +} + +// todo 环境变量注入配置 or 与nacos集成 +func loadYaml(configPath string) { + var yamlConfig string + if configPath == "" { + runDirectory, _ := utils.GetCurrentPath() + yamlConfig = filepath.Join(runDirectory, "/config.yaml") + } else { + yamlConfig = filepath.Join(configPath) + } + + viper.SetConfigFile(yamlConfig) + viper.SetConfigType("yaml") + err := viper.ReadInConfig() + if err != nil { + panic("Failed to read configuration file:" + err.Error()) + } + for _, key := range viper.AllKeys() { + value := viper.GetString(key) + realValue := expandValueEnv(value) + if value != realValue { + viper.Set(key, realValue) + } + } + + err = viper.Unmarshal(Config) + if err != nil { + panic("Failed to load configuration:" + err.Error()) + } + +} + +func expandValueEnv(value string) (realValue string) { + realValue = value + + vLen := len(value) + // 3 = ${} + if vLen < 3 { + return + } + // Need start with "${" and end with "}", then return. + if value[0] != '$' || value[1] != '{' || value[vLen-1] != '}' { + return + } + + key := "" + defaultV := "" + // value start with "${" + for i := 2; i < vLen; i++ { + if value[i] == '|' && (i+1 < vLen && value[i+1] == '|') { + key = value[2:i] + defaultV = value[i+2 : vLen-1] // other string is default value. + break + } else if value[i] == '}' { + key = value[2:i] + break + } + } + + realValue = os.Getenv(key) + if realValue == "" { + realValue = defaultV + } + + return +} diff --git a/agent/config/config.yaml b/agent/config/config.yaml new file mode 100644 index 0000000..e68390e --- /dev/null +++ b/agent/config/config.yaml @@ -0,0 +1,60 @@ +app: + app_env: ${APP_ENV||local} + debug: ${APP_DEBUG||true} + language: ${APP_LANGUAGE||zh_CN} +# base_path: "" +server: + host: ${SERVER_HOST||0.0.0.0} + port: ${SERVER_PORT||8898} + +agent_version: ${AGENT_VERSION||v1.0.0} +zombie_cleaner: ${POSTGRES_DATA_ENABLE||true} + +schedule: + port: ${SCHEDULE_PORT||9090} + heart_beat: 30 + protocol: http + auth: + type: none + +postgres: + data: + enable: ${POSTGRES_DATA_ENABLE||false} + host: ${POSTGRES_DATA_HOST||10.110.63.138} + port: ${POSTGRES_DATA_PORT||5432} + database: ${POSTGRES_DATA_DATABASE||postgres} + username: ${POSTGRES_DATA_USERNAME||postgres} + password: ${POSTGRES_DATA_PASSWORD||Mypostgres!23} + log_level: ${LOG_LEVEL||4} + auth: + enable: ${POSTGRES_AUTH_ENABLE||false} + host: ${POSTGRES_AUTH_HOST||10.110.63.138} + port: ${POSTGRES_AUTH_PORT||5432} + database: ${POSTGRES_AUTH_DATABASE||auth} + username: ${POSTGRES_AUTH_USERNAME||postgres} + password: ${POSTGRES_AUTH_PASSWORD||Mypostgres!23} + log_level: ${LOG_LEVEL||4} +redis: + enable: ${REDIS_ENABLE||false} + host: ${REDIS_HOST||127.0.0.1} + port: ${REDIS_POST||6379} + password: ${REDIS_PASSWORD||} + database: ${REDIS_DATABASE||0} +nats_config: + # 10.110.63.81:30529 192.168.12.110 + url: ${NATS_URL||nats://10.110.63.81:30529} + user: ${NATS_USER||admin} + password: ${NATS_PASSWORD||T0pS3cr3tFGThjjds56} + + +logger: + default_division: ${LOGGER_DEFAULT_DIVISION||time} + file_name: ${LOGGER_FILE_NAME||cfn-schedule-agent.sys.log} + division_time: + max_age: ${LOGGER_DIVISION_TIME_MAX_AGE||15} + rotation_time: ${LOGGER_DIVISION_TIME_ROTATION_TIME||24} + division_size: + max_size: ${LOGGER_DIVISION_SIZE_MAX_SIZE||20} + max_backups: ${LOGGER_DIVISION_SIZE_MAX_BACKUPS||15} + max_age: ${LOGGER_DIVISION_SIZE_MAX_AGE||15} + compress: ${LOGGER_DIVISION_SIZE_COMPRESS||false} diff --git a/agent/config/logger.go b/agent/config/logger.go new file mode 100644 index 0000000..e8de354 --- /dev/null +++ b/agent/config/logger.go @@ -0,0 +1,35 @@ +package config + +type DivisionTime struct { + MaxAge int `ini:"max_age" yaml:"max_age" mapstructure:"max_age"` // 保留旧文件的最大天数,单位天 + RotationTime int `ini:"rotation_time" yaml:"rotation_time" mapstructure:"rotation_time"` // 多长时间切割一次文件,单位小时 +} + +type DivisionSize struct { + MaxSize int `ini:"max_size" yaml:"max_size" mapstructure:"max_size"` // 在进行切割之前,日志文件的最大大小(以MB为单位) + MaxBackups int `ini:"max_backups" yaml:"max_backups" mapstructure:"max_backups"` // 保留旧文件的最大个数 + MaxAge int `ini:"max_age" yaml:"max_age" mapstructure:"max_age"` // 保留旧文件的最大天数 + Compress bool `ini:"compress" yaml:"compress"` // 是否压缩/归档旧文件 +} + +type LoggerConfig struct { + DefaultDivision string `ini:"default_division" yaml:"default_division" mapstructure:"default_division"` + Filename string `ini:"file_name" yaml:"file_name" mapstructure:"file_name"` + DivisionTime DivisionTime `ini:"division_time" yaml:"division_time" mapstructure:"division_time"` + DivisionSize DivisionSize `ini:"division_size" yaml:"division_size" mapstructure:"division_size"` +} + +var Logger = LoggerConfig{ + DefaultDivision: "time", // time 按时间切割,默认一天, size 按文件大小切割 + Filename: "sys.log", + DivisionTime: DivisionTime{ + MaxAge: 15, + RotationTime: 24, + }, + DivisionSize: DivisionSize{ + MaxSize: 2, + MaxBackups: 2, + MaxAge: 15, + Compress: false, + }, +} diff --git a/agent/config/minio.go b/agent/config/minio.go new file mode 100644 index 0000000..248ea2d --- /dev/null +++ b/agent/config/minio.go @@ -0,0 +1,13 @@ +package config + +type MinioConfig struct { + Endpoint string `ini:"endpoint" yaml:"endpoint"` + AccessKey string `ini:"access_key" yaml:"access_key" mapstructure:"access_key"` + SecretAccessKey string `ini:"secret_access_key" yaml:"secret_access_key" mapstructure:"secret_access_key"` +} + +var Minio = MinioConfig{ + Endpoint: "192.168.12.78:208670", + AccessKey: "xxxxxxxx", + SecretAccessKey: "yyyyyyyy", +} diff --git a/agent/config/nats.go b/agent/config/nats.go new file mode 100644 index 0000000..24f2163 --- /dev/null +++ b/agent/config/nats.go @@ -0,0 +1,20 @@ +package config + +type NatsConfig struct { + Url string `ini:"url" yaml:"url"` + User string `ini:"user" yaml:"user"` + Password string `ini:"password" yaml:"password"` + + //ToAgentUnicastSubject string `ini:"to_agent_unicast_subject" yaml:"to_agent_unicast_subject"` + //ToAgentBroadcastSubject string `ini:"to_agent_broadcast_subject" yaml:"to_agent_broadcast_subject"` + //ToScheduleSubject string `ini:"to_schedule_subject" yaml:"to_schedule_subject"` +} + +var Nats = NatsConfig{ + Url: "nats://192.168.12.110:30529", + User: "admin", + Password: "T0pS3cr3tFGThjjds56", + //ToAgentUnicastSubject: "cfn_to_agent_unicast_{rid}", + //ToAgentBroadcastSubject: "cfn_to_agent_broadcast", + //ToScheduleSubject: "cfn_to_schedulecfn_to_schedule", +} diff --git a/agent/config/postgres.go b/agent/config/postgres.go new file mode 100644 index 0000000..e5bf6ce --- /dev/null +++ b/agent/config/postgres.go @@ -0,0 +1,40 @@ +package config + +type PostgreSqlConfig struct { + Enable bool `ini:"enable" yaml:"enable"` + Host string `ini:"host" yaml:"host"` + Username string `ini:"username" yaml:"username"` + Password string `ini:"password" yaml:"password"` + Port uint16 `ini:"port" yaml:"port"` + Database string `ini:"database" yaml:"database"` + TablePrefix string `ini:"table_prefix" yaml:"table_prefix" mapstructure:"table_prefix"` + LogLevel int `ini:"log_level" yaml:"log_level" mapstructure:"log_level"` + PrintSql bool `ini:"print_sql" yaml:"print_sql" mapstructure:"print_sql"` +} + +type PostgreSqlConfigMap map[string]PostgreSqlConfig + +var PostgreSql = PostgreSqlConfigMap{ + "data": { + Enable: false, + Host: "127.0.0.1", + Username: "root", + Password: "root1234", + Port: 3306, + Database: "test", + TablePrefix: "", + LogLevel: 4, + PrintSql: false, + }, + "auth": { + Enable: false, + Host: "127.0.0.1", + Username: "root", + Password: "root1234", + Port: 3306, + Database: "test", + TablePrefix: "", + LogLevel: 4, + PrintSql: false, + }, +} diff --git a/agent/config/redis.go b/agent/config/redis.go new file mode 100644 index 0000000..17a0ea9 --- /dev/null +++ b/agent/config/redis.go @@ -0,0 +1,17 @@ +package config + +type RedisConfig struct { + Enable bool `ini:"enable" yaml:"enable"` + Host string `ini:"host" yaml:"host"` + Port string `ini:"port" yaml:"port"` + Password string `ini:"password" yaml:"password"` + Database int `ini:"database" yaml:"database"` +} + +var Redis = RedisConfig{ + Enable: false, + Host: "127.0.0.1", + Password: "root1234", + Port: "6379", + Database: 0, +} diff --git a/agent/config/schedule_server.go b/agent/config/schedule_server.go new file mode 100644 index 0000000..0c9d12b --- /dev/null +++ b/agent/config/schedule_server.go @@ -0,0 +1,42 @@ +package config + +// The valid auth strategies and values for cookie handling +const ( + // These constants are used for external services auth (Prometheus ...) ; + AuthTypeBasic = "basic" + AuthTypeBearer = "bearer" + AuthTypeNone = "none" +) + +// Auth provides authentication data for external services +type Auth struct { + CAFile string `yaml:"ca_file" mapstructure:"ca_file"` + InsecureSkipVerify bool `yaml:"insecure_skip_verify" mapstructure:"insecure_skip_verify"` + Password string `yaml:"password" mapstructure:"password"` + Token string `yaml:"token" mapstructure:"token"` + Type string `yaml:"type" mapstructure:"type"` + Username string `yaml:"username" mapstructure:"username"` +} + +func (a *Auth) Obfuscate() { + a.Token = "xxx" + a.Password = "xxx" + a.Username = "xxx" + a.CAFile = "xxx" +} + +type ScheduleServer struct { + Port int `yaml:"port,omitempty" mapstructure:"port"` + Protocol string `yaml:"protocol,omitempty" mapstructure:"protocol"` // http https socket websocket + Auth Auth `yaml:"auth,omitempty" mapstructure:"auth"` + HeartBeat int64 `ini:"heart_beat" yaml:"heart_beat" mapstructure:"heart_beat"` +} + +var Schedule = ScheduleServer{ + Port: 1234, + Protocol: "http", + Auth: Auth{ + Type: AuthTypeNone, + }, + HeartBeat: 30, +} diff --git a/agent/config/server.go b/agent/config/server.go new file mode 100644 index 0000000..3b7a926 --- /dev/null +++ b/agent/config/server.go @@ -0,0 +1,12 @@ +package config + +// ServerConfig 定义项目配置 +type ServerConfig struct { + Host string `ini:"host" yaml:"host" mapstructure:"host"` + Port uint16 `ini:"port" yaml:"port" mapstructure:"port"` +} + +var Server = ServerConfig{ + Host: "127.0.0.1", + Port: 9999, +} diff --git a/agent/data/data.go b/agent/data/data.go new file mode 100644 index 0000000..c2189ff --- /dev/null +++ b/agent/data/data.go @@ -0,0 +1,19 @@ +package data + +import ( + c "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/config" + "sync" +) + +var once sync.Once + +func InitData() { + once.Do(func() { + initPostgres() + + if c.Config.Redis.Enable { + // 初始化 redis + initRedis() + } + }) +} diff --git a/agent/data/postgres.go b/agent/data/postgres.go new file mode 100644 index 0000000..8665a4f --- /dev/null +++ b/agent/data/postgres.go @@ -0,0 +1,73 @@ +package data + +import ( + "fmt" + c "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/config" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/log" + "gorm.io/driver/postgres" + "gorm.io/gorm" + "gorm.io/gorm/logger" + "gorm.io/gorm/schema" +) + +var PostgreSqlDB = make(map[string]*gorm.DB) + +type Writer interface { + Printf(string, ...interface{}) +} + +type WriterLog struct { +} + +func (w WriterLog) Printf(format string, args ...interface{}) { + log.Logger.Sugar().Infof(format, args...) +} + +func initPostgres() { + var err error + postgreSqlConfigMap := c.Config.PostgreSql + if c.Config.PostgreSql != nil && len(postgreSqlConfigMap) > 0 { + for name, postgreSqlConfig := range postgreSqlConfigMap { + if !postgreSqlConfig.Enable { + continue + } + + var writerLog WriterLog + if postgreSqlConfig.PrintSql { + writerLog = WriterLog{} + } + logConfig := logger.New( + writerLog, + logger.Config{ + SlowThreshold: 0, // 慢 SQL 阈值 + LogLevel: logger.LogLevel(postgreSqlConfig.LogLevel), // 日志级别 + IgnoreRecordNotFoundError: false, // 忽略ErrRecordNotFound(记录未找到)错误 + Colorful: false, // 是否启用彩色打印 + }, + ) + + configs := &gorm.Config{ + NamingStrategy: schema.NamingStrategy{ + TablePrefix: postgreSqlConfig.TablePrefix, // 表名前缀 + // SingularTable: true, // 使用单数表名 + }, + Logger: logConfig, + } + + dsn := fmt.Sprintf("host=%s port=%d user=%s dbname=%s password=%s", + postgreSqlConfig.Host, + postgreSqlConfig.Port, + postgreSqlConfig.Username, + postgreSqlConfig.Database, + postgreSqlConfig.Password, + ) + + PostgreSqlDB[name], err = gorm.Open(postgres.Open(dsn), configs) + + if err != nil { + panic("postgres connection failed:" + err.Error()) + } + } + } + +} diff --git a/agent/data/redis.go b/agent/data/redis.go new file mode 100644 index 0000000..ab37d3f --- /dev/null +++ b/agent/data/redis.go @@ -0,0 +1,23 @@ +package data + +import ( + "context" + c "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/config" + "github.com/go-redis/redis/v8" +) + +var Rdb *redis.Client + +func initRedis() { + Rdb = redis.NewClient(&redis.Options{ + Addr: c.Config.Redis.Host + ":" + c.Config.Redis.Port, + Password: c.Config.Redis.Password, + DB: c.Config.Redis.Database, + }) + var ctx = context.Background() + _, err := Rdb.Ping(ctx).Result() + + if err != nil { + panic("Redis connection failed:" + err.Error()) + } +} diff --git a/agent/deploy/docker/run.sh b/agent/deploy/docker/run.sh new file mode 100644 index 0000000..54a8421 --- /dev/null +++ b/agent/deploy/docker/run.sh @@ -0,0 +1,44 @@ +#!/bin/sh + +# 从环境变量获取值,来更改配置文件 + +NAME=$APP_ENV yq -i '.app.app_env=env(NAME)' config/config.yaml +NAME=$APP_DEBUG yq -i '.app.debug=env(NAME)' config/config.yaml +NAME=$APP_LANGUAGE yq -i '.app.language=env(NAME)' config/config.yaml +NAME=$SERVER_HOST yq -i '.server.host=env(NAME)' config/config.yaml +NAME=$SERVER_PORT yq -i '.server.port=env(NAME)' config/config.yaml +NAME=$SCHEDULE_PORT yq -i '.schedule.port=env(NAME)' config/config.yaml + +if [ $POSTGRES_DATA_ENABLE = true ] ;then + NAME=$POSTGRES_DATA_ENABLE yq -i '.postgres.data.enable=env(NAME)' config/config.yaml + NAME=$POSTGRES_DATA_HOST yq -i '.postgres.data.host=env(NAME)' config/config.yaml + NAME=$POSTGRES_DATA_PORT yq -i '.postgres.data.port=env(NAME)' config/config.yaml + NAME=$POSTGRES_DATA_DATABASE yq -i '.postgres.data.database=env(NAME)' config/config.yaml + NAME=$POSTGRES_DATA_USERNAME yq -i '.postgres.data.username=env(NAME)' config/config.yaml + NAME=$LOG_LEVEL yq -i '.postgres.data.log_level=env(NAME)' config/config.yaml + if [ -n "$POSTGRES_DATA_PASSWORD" ] ;then + NAME=$POSTGRES_DATA_PASSWORD yq -i '.postgres.data.password=env(NAME)' config/config.yaml + fi +fi +if [ $POSTGRES_AUTH_ENABLE = true ] ;then + NAME=$POSTGRES_AUTH_ENABLE yq -i '.postgres.auth.enable=env(NAME)' config/config.yaml + NAME=$POSTGRES_AUTH_HOST yq -i '.postgres.auth.host=env(NAME)' config/config.yaml + NAME=$POSTGRES_AUTH_PORT yq -i '.postgres.auth.port=env(NAME)' config/config.yaml + NAME=$POSTGRES_AUTH_DATABASE yq -i '.postgres.auth.database=env(NAME)' config/config.yaml + NAME=$POSTGRES_AUTH_USERNAME yq -i '.postgres.auth.username=env(NAME)' config/config.yaml + NAME=$LOG_LEVEL yq -i '.postgres.auth.log_level=env(NAME)' config/config.yaml + if [ -n "$POSTGRES_AUTH_PASSWORD" ] ;then + NAME=$POSTGRES_AUTH_PASSWORD yq -i '.postgres.auth.password=env(NAME)' config/config.yaml + fi +fi + +if [ $REDIS_EBABLE = true ] ;then + NAME=$REDIS_EBABLE yq -i '.redis.enable=env(NAME)' config/config.yaml + NAME=$REDIS_HOST yq -i '.redis.host=env(NAME)' config/config.yaml + NAME=$REDIS_PORT yq -i '.redis.port=env(NAME)' config/config.yaml + NAME=$REDIS_DATABASE yq -i '.redis.database=env(NAME)' config/config.yaml + if [ -n "$REDIS_PASSWORD" ] ;then + NAME=$REDIS_PASSWORD yq -i '.redis.password=env(NAME)' config/config.yaml + fi +fi +./cfn-schedule-agent -c config/config.yaml diff --git a/agent/go.mod b/agent/go.mod new file mode 100644 index 0000000..369ad1b --- /dev/null +++ b/agent/go.mod @@ -0,0 +1,162 @@ +module git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent + +go 1.20 + +require ( + github.com/StackExchange/wmi v1.2.1 + github.com/dgrijalva/jwt-go v3.2.0+incompatible + github.com/docker/docker v24.0.6+incompatible + github.com/elastic/go-windows v1.0.1 + github.com/gin-contrib/cors v1.3.1 + github.com/gin-gonic/gin v1.8.1 + github.com/go-playground/locales v0.14.0 + github.com/go-playground/universal-translator v0.18.0 + github.com/go-playground/validator/v10 v10.11.1 + github.com/go-redis/redis/v8 v8.11.5 + github.com/golang/glog v1.1.0 + github.com/guoyk93/rg v1.0.1 + github.com/influxdata/telegraf v1.28.2 + github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901 + github.com/lestrrat-go/file-rotatelogs v2.4.0+incompatible + github.com/natefinch/lumberjack v2.0.0+incompatible + github.com/nats-io/nats.go v1.30.2 + github.com/prometheus-community/pro-bing v0.3.0 + github.com/prometheus/procfs v0.12.0 + github.com/robfig/cron/v3 v3.0.1 + github.com/spf13/viper v1.17.0 + github.com/stretchr/testify v1.8.4 + go.uber.org/zap v1.24.0 + golang.org/x/crypto v0.14.0 + golang.org/x/net v0.17.0 + golang.org/x/sys v0.14.0 + golang.org/x/text v0.13.0 + gopkg.in/yaml.v3 v3.0.1 + gorm.io/driver/postgres v1.5.2 + gorm.io/gorm v1.25.0 + gorm.io/plugin/soft_delete v1.1.0 + howett.net/plist v1.0.0 +) + +require ( + cloud.google.com/go/compute v1.23.0 // indirect + cloud.google.com/go/compute/metadata v0.2.3 // indirect + github.com/Microsoft/go-winio v0.6.1 // indirect + github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect + github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df // indirect + github.com/awnumar/memcall v0.1.2 // indirect + github.com/awnumar/memguard v0.22.3 // indirect + github.com/aws/aws-sdk-go-v2 v1.21.0 // indirect + github.com/aws/aws-sdk-go-v2/config v1.18.42 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.13.40 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.11 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.41 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.35 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.3.43 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.35 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.14.1 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.17.1 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.22.0 // indirect + github.com/aws/smithy-go v1.14.2 // indirect + github.com/benbjohnson/clock v1.3.3 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/blues/jsonata-go v1.5.4 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/compose-spec/compose-go v1.16.0 // indirect + github.com/coreos/go-semver v0.3.1 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect + github.com/distribution/reference v0.5.0 // indirect + github.com/docker/distribution v2.8.3+incompatible // indirect + github.com/docker/go-connections v0.4.0 // indirect + github.com/docker/go-units v0.5.0 // indirect + github.com/fatih/color v1.15.0 // indirect + github.com/fsnotify/fsnotify v1.6.0 // indirect + github.com/gin-contrib/sse v0.1.0 // indirect + github.com/go-ole/go-ole v1.2.6 // indirect + github.com/gobwas/glob v0.2.3 // indirect + github.com/goccy/go-json v0.10.2 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang-jwt/jwt/v5 v5.0.0 // indirect + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/golang/protobuf v1.5.3 // indirect + github.com/golang/snappy v0.0.4 // indirect + github.com/google/cel-go v0.14.1-0.20230424164844-d39523c445fc // indirect + github.com/google/s2a-go v0.1.7 // indirect + github.com/google/uuid v1.4.0 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.1 // indirect + github.com/hashicorp/hcl v1.0.0 // indirect + github.com/influxdata/toml v0.0.0-20190415235208-270119a8ce65 // indirect + github.com/jackc/pgpassfile v1.0.0 // indirect + github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect + github.com/jackc/pgx/v5 v5.3.1 // indirect + github.com/jinzhu/inflection v1.0.0 // indirect + github.com/jinzhu/now v1.1.5 // indirect + github.com/jonboulle/clockwork v0.2.2 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/klauspost/compress v1.17.0 // indirect + github.com/klauspost/pgzip v1.2.6 // indirect + github.com/leodido/go-urn v1.2.1 // indirect + github.com/lestrrat-go/strftime v1.0.6 // indirect + github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c // indirect + github.com/magiconair/properties v1.8.7 // indirect + github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-isatty v0.0.19 // indirect + github.com/mattn/go-sqlite3 v1.14.16 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect + github.com/minio/highwayhash v1.0.2 // indirect + github.com/mitchellh/mapstructure v1.5.1-0.20220423185008-bf980b35cac4 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/naoina/go-stringutil v0.1.0 // indirect + github.com/nats-io/jwt/v2 v2.4.1 // indirect + github.com/nats-io/nats-server/v2 v2.9.9 // indirect + github.com/nats-io/nkeys v0.4.5 // indirect + github.com/nats-io/nuid v1.0.1 // indirect + github.com/onsi/gomega v1.27.6 // indirect + github.com/opencontainers/go-digest v1.0.0 // indirect + github.com/opencontainers/image-spec v1.1.0-rc4 // indirect + github.com/pelletier/go-toml/v2 v2.1.0 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c // indirect + github.com/prometheus/client_golang v1.16.0 // indirect + github.com/prometheus/client_model v0.4.0 // indirect + github.com/prometheus/common v0.44.0 // indirect + github.com/prometheus/prometheus v0.46.0 // indirect + github.com/sagikazarmark/locafero v0.3.0 // indirect + github.com/sagikazarmark/slog-shim v0.1.0 // indirect + github.com/shirou/gopsutil/v3 v3.23.8 // indirect + github.com/shoenig/go-m1cpu v0.1.6 // indirect + github.com/sirupsen/logrus v1.9.3 // indirect + github.com/sourcegraph/conc v0.3.0 // indirect + github.com/spf13/afero v1.10.0 // indirect + github.com/spf13/cast v1.5.1 // indirect + github.com/spf13/pflag v1.0.5 // indirect + github.com/stoewer/go-strcase v1.2.0 // indirect + github.com/stretchr/objx v0.5.0 // indirect + github.com/subosito/gotenv v1.6.0 // indirect + github.com/tklauser/go-sysconf v0.3.12 // indirect + github.com/tklauser/numcpus v0.6.1 // indirect + github.com/ugorji/go/codec v1.2.7 // indirect + github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a // indirect + github.com/yusufpapurcu/wmi v1.2.3 // indirect + go.opencensus.io v0.24.0 // indirect + go.uber.org/atomic v1.11.0 // indirect + go.uber.org/multierr v1.11.0 // indirect + golang.org/x/exp v0.0.0-20230905200255-921286631fa9 // indirect + golang.org/x/mod v0.12.0 // indirect + golang.org/x/oauth2 v0.12.0 // indirect + golang.org/x/sync v0.5.0 // indirect + golang.org/x/time v0.3.0 // indirect + golang.org/x/tools v0.13.0 // indirect + google.golang.org/api v0.143.0 // indirect + google.golang.org/appengine v1.6.7 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20230913181813-007df8e322eb // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13 // indirect + google.golang.org/grpc v1.58.2 // indirect + google.golang.org/protobuf v1.31.0 // indirect + gopkg.in/ini.v1 v1.67.0 // indirect + gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + gotest.tools/v3 v3.5.1 // indirect +) diff --git a/agent/go.sum b/agent/go.sum new file mode 100644 index 0000000..a910a75 --- /dev/null +++ b/agent/go.sum @@ -0,0 +1,907 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/compute v1.23.0 h1:tP41Zoavr8ptEqaW6j+LQOnyBBhO7OkOMAGrgLopTwY= +cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= +cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= +cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= +collectd.org v0.5.0 h1:y4uFSAuOmeVhG3GCRa3/oH+ysePfO/+eGJNfd0Qa3d8= +dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= +github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= +github.com/Masterminds/semver/v3 v3.2.0 h1:3MEsd0SM6jqZojhjLWWeBY+Kcjy9i6MQAeY7YgDP83g= +github.com/Masterminds/sprig v2.22.0+incompatible h1:z4yfnGrZ7netVz+0EDJ0Wi+5VZCSYp4Z0m2dk6cEM60= +github.com/Masterminds/sprig/v3 v3.2.3 h1:eL2fZNezLomi0uOLqjQoN6BfsDD+fyLtgbJMAj9n6YA= +github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= +github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= +github.com/StackExchange/wmi v1.2.1 h1:VIkavFPXSjcnS+O8yTq7NI32k0R5Aj+v39y29VYDOSA= +github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8= +github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 h1:s6gZFSlWYmbqAuRjVTiNNhvNRfY2Wxp9nhfyel4rklc= +github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= +github.com/antchfx/jsonquery v1.3.3 h1:zjZpbnZhYng3uOAbIfdNq81A9mMEeuDJeYIpeKpZ4es= +github.com/antchfx/xmlquery v1.3.17 h1:d0qWjPp/D+vtRw7ivCwT5ApH/3CkQU8JOeo3245PpTk= +github.com/antchfx/xpath v1.2.5-0.20230505064641-588960cceeac h1:Et7H7mEPWuivbFEXi3dWa8hobnvF380TS2mq7JmgjEI= +github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df h1:7RFfzj4SSt6nnvCPbCqijJi1nWCd+TqAT3bYCStRC18= +github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df/go.mod h1:pSwJ0fSY5KhvocuWSx4fz3BA8OrA1bQn+K1Eli3BRwM= +github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= +github.com/awnumar/memcall v0.1.2 h1:7gOfDTL+BJ6nnbtAp9+HQzUFjtP1hEseRQq8eP055QY= +github.com/awnumar/memcall v0.1.2/go.mod h1:S911igBPR9CThzd/hYQQmTc9SWNu3ZHIlCGaWsWsoJo= +github.com/awnumar/memguard v0.22.3 h1:b4sgUXtbUjhrGELPbuC62wU+BsPQy+8lkWed9Z+pj0Y= +github.com/awnumar/memguard v0.22.3/go.mod h1:mmGunnffnLHlxE5rRgQc3j+uwPZ27eYb61ccr8Clz2Y= +github.com/aws/aws-sdk-go-v2 v1.21.0 h1:gMT0IW+03wtYJhRqTVYn0wLzwdnK9sRMcxmtfGzRdJc= +github.com/aws/aws-sdk-go-v2 v1.21.0/go.mod h1:/RfNgGmRxI+iFOB1OeJUyxiU+9s88k3pfHvDagGEp0M= +github.com/aws/aws-sdk-go-v2/config v1.18.42 h1:28jHROB27xZwU0CB88giDSjz7M1Sba3olb5JBGwina8= +github.com/aws/aws-sdk-go-v2/config v1.18.42/go.mod h1:4AZM3nMMxwlG+eZlxvBKqwVbkDLlnN2a4UGTL6HjaZI= +github.com/aws/aws-sdk-go-v2/credentials v1.13.40 h1:s8yOkDh+5b1jUDhMBtngF6zKWLDs84chUk2Vk0c38Og= +github.com/aws/aws-sdk-go-v2/credentials v1.13.40/go.mod h1:VtEHVAAqDWASwdOqj/1huyT6uHbs5s8FUHfDQdky/Rs= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.11 h1:uDZJF1hu0EVT/4bogChk8DyjSF6fof6uL/0Y26Ma7Fg= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.11/go.mod h1:TEPP4tENqBGO99KwVpV9MlOX4NSrSLP8u3KRy2CDwA8= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.41 h1:22dGT7PneFMx4+b3pz7lMTRyN8ZKH7M2cW4GP9yUS2g= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.41/go.mod h1:CrObHAuPneJBlfEJ5T3szXOUkLEThaGfvnhTf33buas= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.35 h1:SijA0mgjV8E+8G45ltVHs0fvKpTj8xmZJ3VwhGKtUSI= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.35/go.mod h1:SJC1nEVVva1g3pHAIdCp7QsRIkMmLAgoDquQ9Rr8kYw= +github.com/aws/aws-sdk-go-v2/internal/ini v1.3.43 h1:g+qlObJH4Kn4n21g69DjspU0hKTjWtq7naZ9OLCv0ew= +github.com/aws/aws-sdk-go-v2/internal/ini v1.3.43/go.mod h1:rzfdUlfA+jdgLDmPKjd3Chq9V7LVLYo1Nz++Wb91aRo= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.35 h1:CdzPW9kKitgIiLV1+MHobfR5Xg25iYnyzWZhyQuSlDI= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.35/go.mod h1:QGF2Rs33W5MaN9gYdEQOBBFPLwTZkEhRwI33f7KIG0o= +github.com/aws/aws-sdk-go-v2/service/sso v1.14.1 h1:YkNzx1RLS0F5qdf9v1Q8Cuv9NXCL2TkosOxhzlUPV64= +github.com/aws/aws-sdk-go-v2/service/sso v1.14.1/go.mod h1:fIAwKQKBFu90pBxx07BFOMJLpRUGu8VOzLJakeY+0K4= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.17.1 h1:8lKOidPkmSmfUtiTgtdXWgaKItCZ/g75/jEk6Ql6GsA= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.17.1/go.mod h1:yygr8ACQRY2PrEcy3xsUI357stq2AxnFM6DIsR9lij4= +github.com/aws/aws-sdk-go-v2/service/sts v1.22.0 h1:s4bioTgjSFRwOoyEFzAVCmFmoowBgjTR8gkrF/sQ4wk= +github.com/aws/aws-sdk-go-v2/service/sts v1.22.0/go.mod h1:VC7JDqsqiwXukYEDjoHh9U0fOJtNWh04FPQz4ct4GGU= +github.com/aws/smithy-go v1.14.2 h1:MJU9hqBGbvWZdApzpvoF2WAIJDbtjK2NDJSiJP7HblQ= +github.com/aws/smithy-go v1.14.2/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= +github.com/benbjohnson/clock v1.3.3 h1:g+rSsSaAzhHJYcIQE78hJ3AhyjjtQvleKDjlhdBnIhc= +github.com/benbjohnson/clock v1.3.3/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/blues/jsonata-go v1.5.4 h1:XCsXaVVMrt4lcpKeJw6mNJHqQpWU751cnHdCFUq3xd8= +github.com/blues/jsonata-go v1.5.4/go.mod h1:uns2jymDrnI7y+UFYCqsRTEiAH22GyHnNXrkupAVFWI= +github.com/bufbuild/protocompile v0.4.0 h1:LbFKd2XowZvQ/kajzguUp2DC9UEIQhIq77fZZlaQsNA= +github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= +github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cloudevents/sdk-go/v2 v2.14.0 h1:Nrob4FwVgi5L4tV9lhjzZcjYqFVyJzsA56CwPaPfv6s= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/compose-spec/compose-go v1.16.0 h1:HYk4uYWXgArHh6NG+WE4yGYayOXw+hjqJ+eJxpjWWjk= +github.com/compose-spec/compose-go v1.16.0/go.mod h1:3yngGBGfls6FHGQsg4B1z6gz8ej9SOvmAJtxCwgbcnc= +github.com/containerd/containerd v1.7.3 h1:cKwYKkP1eTj54bP3wCdXXBymmKRQMrWjkLSWZZJDa8o= +github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= +github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= +github.com/cpuguy83/dockercfg v0.3.1 h1:/FpZ+JaygUR/lZP2NlFI2DVfrOEMAIKP5wWEJdoYe9E= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= +github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U= +github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= +github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= +github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= +github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v24.0.6+incompatible h1:hceabKCtUgDqPu+qm0NgsaXf28Ljf4/pWFL7xjWWDgE= +github.com/docker/docker v24.0.6+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= +github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/elastic/go-windows v1.0.1 h1:AlYZOldA+UJ0/2nBuqWdo90GFCgG9xuyw9SYzGUtJm0= +github.com/elastic/go-windows v1.0.1/go.mod h1:FoVvqWSun28vaDQPbj2Elfc0JahhPB7WQEGa3c814Ss= +github.com/emicklei/go-restful/v3 v3.10.2 h1:hIovbnmBTLjHXkqEBUz3HGpXZdM7ZrE9fJIZIqlJLqE= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs= +github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw= +github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY= +github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= +github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= +github.com/fxamacker/cbor v1.5.1 h1:XjQWBgdmQyqimslUh5r4tUGmoqzHmBFQOImkWGi2awg= +github.com/gin-contrib/cors v1.3.1 h1:doAsuITavI4IOcd0Y19U4B+O0dNWihRyX//nn4sEmgA= +github.com/gin-contrib/cors v1.3.1/go.mod h1:jjEJ4268OPZUcU7k9Pm653S7lXUGcqMADzFA61xsmDk= +github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= +github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= +github.com/gin-gonic/gin v1.5.0/go.mod h1:Nd6IXA8m5kNZdNEHMBd93KT+mdY3+bewLgRvmCsR2Do= +github.com/gin-gonic/gin v1.8.1 h1:4+fr/el88TOO3ewCmQr8cx/CtZ/umlIRIs5M4NTNjf8= +github.com/gin-gonic/gin v1.8.1/go.mod h1:ji8BvRH1azfM+SYow9zQ6SZMvR8qOMZHmsCuWR9tTTk= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4= +github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= +github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= +github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= +github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= +github.com/go-openapi/jsonpointer v0.20.0 h1:ESKJdU9ASRfaPNOPRx12IUyA1vn3R9GiE3KYD14BXdQ= +github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= +github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU= +github.com/go-playground/assert/v2 v2.0.1 h1:MsBgLAaY856+nPRTKrp3/OZK38U/wa0CcBYNjji3q3A= +github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= +github.com/go-playground/locales v0.12.1/go.mod h1:IUMDtCfWo/w/mtMfIE/IG2K+Ey3ygWanZIBtBW0W2TM= +github.com/go-playground/locales v0.14.0 h1:u50s323jtVGugKlcYeyzC0etD1HifMjqmJqb8WugfUU= +github.com/go-playground/locales v0.14.0/go.mod h1:sawfccIbzZTqEDETgFXqTho0QybSa7l++s0DH+LDiLs= +github.com/go-playground/universal-translator v0.16.0/go.mod h1:1AnU7NaIRDWWzGEKwgtJRd2xk99HeFyHw3yid4rvQIY= +github.com/go-playground/universal-translator v0.18.0 h1:82dyy6p4OuJq4/CByFNOn/jYrnRPArHwAcmLoJZxyho= +github.com/go-playground/universal-translator v0.18.0/go.mod h1:UvRDBj+xPUEGrFYl+lu/H90nyDXpg0fqeB/AQUGNTVA= +github.com/go-playground/validator/v10 v10.11.1 h1:prmOlTVv+YjZjmRmNSF3VmspqJIxJWXmqUsHwfTRRkQ= +github.com/go-playground/validator/v10 v10.11.1/go.mod h1:i+3WkQ1FvaUjjxh1kSvIA4dMGDBiPU55YFDl0WbKdWU= +github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI= +github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo= +github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= +github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= +github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU= +github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= +github.com/gofrs/uuid v4.2.0+incompatible h1:yyYWMnhkhrKwwr8gAOcOCYxOOscHgDS9yZgBrnJfGa0= +github.com/gofrs/uuid/v5 v5.0.0 h1:p544++a97kEL+svbcFbCQVM9KFu0Yo25UoISXGNNH9M= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang-jwt/jwt/v5 v5.0.0 h1:1n1XNM9hk7O9mnQoNBGolZvzebBQ7p93ULHRc28XJUE= +github.com/golang-jwt/jwt/v5 v5.0.0/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE= +github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/cel-go v0.14.1-0.20230424164844-d39523c445fc h1:jd+stC3Fqf9kaqgCLOdm4Da/AN3txPTlmLB6tStXAcU= +github.com/google/cel-go v0.14.1-0.20230424164844-d39523c445fc/go.mod h1:YzWEoI07MC/a/wj9in8GeVatqfypkldgBlwXh9bCwqY= +github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= +github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4= +github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.3.1 h1:SBWmZhjUDRorQxrN0nwzf+AHBxnbFjViHQS4P0yVpmQ= +github.com/googleapis/enterprise-certificate-proxy v0.3.1/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas= +github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= +github.com/guoyk93/rg v1.0.1 h1:Rnca+1JYfuGqPRMIQkuxAoZhhmrPpMFyS5XwLz0U0ds= +github.com/guoyk93/rg v1.0.1/go.mod h1:tLaoLk8bo/PQld1xGvJvAfCl3K0Nckzh0gsnykFoQYg= +github.com/hashicorp/consul/api v1.25.1 h1:CqrdhYzc8XZuPnhIYZWH45toM0LB9ZeYr/gvpLVI3PE= +github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= +github.com/hashicorp/go-hclog v1.5.0 h1:bI2ocEMgcVlz55Oj1xZNBsVi900c7II+fWDyV9o+13c= +github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= +github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY= +github.com/huandu/xstrings v1.3.3 h1:/Gcsuc1x8JVbJ9/rlye4xZnVAbEkGauT8lbebqcQws4= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= +github.com/influxdata/line-protocol/v2 v2.2.1 h1:EAPkqJ9Km4uAxtMRgUubJyqAr6zgWM0dznKMLRauQRE= +github.com/influxdata/telegraf v1.28.2 h1:psX+oyjo9Ay9Pb/CEDxW9bWoxIWfL3GnzeFYvFJO1Y0= +github.com/influxdata/telegraf v1.28.2/go.mod h1:X3UcHLZQDVu8Y5GO4rOY1pr7TrwPLuW5OBf2RGBFjTE= +github.com/influxdata/toml v0.0.0-20190415235208-270119a8ce65 h1:vvyMtD5LTJc1W9sQKjDkAWdcg0478CszSdzlHtiAXCY= +github.com/influxdata/toml v0.0.0-20190415235208-270119a8ce65/go.mod h1:zApaNFpP/bTpQItGZNNUMISDMDAnTXu9UqJ4yT3ocz8= +github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= +github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= +github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk= +github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= +github.com/jackc/pgx/v5 v5.3.1 h1:Fcr8QJ1ZeLi5zsPZqQeUZhNhxfkkKBOgJuYkJHoBOtU= +github.com/jackc/pgx/v5 v5.3.1/go.mod h1:t3JDKnCBlYIc0ewLF0Q7B8MXmoIaBOZj/ic7iHozM/8= +github.com/jeremywohl/flatten/v2 v2.0.0-20211013061545-07e4a09fb8e4 h1:eA9wi6ZzpIRobvXkn/S2Lyw1hr2pc71zxzOPl7Xjs4w= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jhump/protoreflect v1.15.1 h1:HUMERORf3I3ZdX05WaQ6MIpd/NJ434hTp5YiKgfCL6c= +github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E= +github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= +github.com/jinzhu/now v1.1.1/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= +github.com/jinzhu/now v1.1.4/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= +github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ= +github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901 h1:rp+c0RAYOWj8l6qbCUTSiRLG/iKnW3K3/QfPPuSsBt4= +github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901/go.mod h1:Z86h9688Y0wesXCyonoVr47MasHilkuLMqGhRZ4Hpak= +github.com/jonboulle/clockwork v0.2.2 h1:UOGuzwb1PwsrDAObMuhUnj0p5ULPj8V/xJ7Kx9qUBdQ= +github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.17.0 h1:Rnbp4K9EjcDuVuHtd0dgA4qNuv9yKDYKK1ulpJwgrqM= +github.com/klauspost/compress v1.17.0/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU= +github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/leodido/go-urn v1.1.0/go.mod h1:+cyI34gQWZcE1eQU7NVgKkkzdXDQHr1dBMtdAPozLkw= +github.com/leodido/go-urn v1.2.1 h1:BqpAaACuzVSgi/VLzGZIobT2z4v53pjosyNd9Yv6n/w= +github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY= +github.com/lestrrat-go/envload v0.0.0-20180220234015-a3eb8ddeffcc h1:RKf14vYWi2ttpEmkA4aQ3j4u9dStX2t4M8UM6qqNsG8= +github.com/lestrrat-go/envload v0.0.0-20180220234015-a3eb8ddeffcc/go.mod h1:kopuH9ugFRkIXf3YoqHKyrJ9YfUFsckUU9S7B+XP+is= +github.com/lestrrat-go/file-rotatelogs v2.4.0+incompatible h1:Y6sqxHMyB1D2YSzWkLibYKgg+SwmyFU9dF2hn6MdTj4= +github.com/lestrrat-go/file-rotatelogs v2.4.0+incompatible/go.mod h1:ZQnN8lSECaebrkQytbHj4xNgtg8CR7RYXnPok8e0EHA= +github.com/lestrrat-go/strftime v1.0.6 h1:CFGsDEt1pOpFNU+TJB0nhz9jl+K0hZSLE205AhTIGQQ= +github.com/lestrrat-go/strftime v1.0.6/go.mod h1:f7jQKgV5nnJpYgdEasS+/y7EsTb8ykN2z68n3TtcTaw= +github.com/linkedin/goavro/v2 v2.12.0 h1:rIQQSj8jdAUlKQh6DttK8wCRv4t4QO09g1C4aBWXslg= +github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= +github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c h1:VtwQ41oftZwlMnOEbMWQtSEUgU64U4s+GHk7hZK+jtY= +github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c/go.mod h1:JKx41uQRwqlTZabZc+kILPrO/3jlKnQ2Z8b7YiVw5cE= +github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= +github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA= +github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-sqlite3 v1.14.3/go.mod h1:WVKg1VTActs4Qso6iwGbiFih2UIHo0ENGwNd0Lj+XmI= +github.com/mattn/go-sqlite3 v1.14.16 h1:yOQRA0RpS5PFz/oikGwBEqvAWhWg5ufRz4ETLjwpU1Y= +github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= +github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= +github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/minio/highwayhash v1.0.2 h1:Aak5U0nElisjDCfPSG79Tgzkn2gl66NxOMspRrKnA/g= +github.com/minio/highwayhash v1.0.2/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY= +github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/mapstructure v1.5.1-0.20220423185008-bf980b35cac4 h1:BpfhmLKZf+SjVanKKhCgf3bg+511DmU9eDQTen7LLbY= +github.com/mitchellh/mapstructure v1.5.1-0.20220423185008-bf980b35cac4/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= +github.com/moby/patternmatcher v0.5.0 h1:YCZgJOeULcxLw1Q+sVR636pmS7sPEn1Qo2iAN6M7DBo= +github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= +github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/naoina/go-stringutil v0.1.0 h1:rCUeRUHjBjGTSHl0VC00jUPLz8/F9dDzYI70Hzifhks= +github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0= +github.com/natefinch/lumberjack v2.0.0+incompatible h1:4QJd3OLAMgj7ph+yZTuX13Ld4UpgHp07nNdFX7mqFfM= +github.com/natefinch/lumberjack v2.0.0+incompatible/go.mod h1:Wi9p2TTF5DG5oU+6YfsmYQpsTIOm0B1VNzQg9Mw6nPk= +github.com/nats-io/jwt/v2 v2.4.1 h1:Y35W1dgbbz2SQUYDPCaclXcuqleVmpbRa7646Jf2EX4= +github.com/nats-io/jwt/v2 v2.4.1/go.mod h1:24BeQtRwxRV8ruvC4CojXlx/WQ/VjuwlYiH+vu/+ibI= +github.com/nats-io/nats-server/v2 v2.9.9 h1:bmj0RhvHOc8+z5/RuhI38GqPwtkFAHQuU3e99FVA/TI= +github.com/nats-io/nats-server/v2 v2.9.9/go.mod h1:AB6hAnGZDlYfqb7CTAm66ZKMZy9DpfierY1/PbpvI2g= +github.com/nats-io/nats.go v1.30.2 h1:aloM0TGpPorZKQhbAkdCzYDj+ZmsJDyeo3Gkbr72NuY= +github.com/nats-io/nats.go v1.30.2/go.mod h1:dcfhUgmQNN4GJEfIb2f9R7Fow+gzBF4emzDHrVBd5qM= +github.com/nats-io/nkeys v0.4.5 h1:Zdz2BUlFm4fJlierwvGK+yl20IAKUm7eV6AAZXEhkPk= +github.com/nats-io/nkeys v0.4.5/go.mod h1:XUkxdLPTufzlihbamfzQ7mw/VGx6ObUs+0bN5sNvt64= +github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw= +github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= +github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= +github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= +github.com/onsi/gomega v1.27.6 h1:ENqfyGeS5AX/rlXDd/ETokDz93u0YufY1Pgxuy/PvWE= +github.com/onsi/gomega v1.27.6/go.mod h1:PIQNjfQwkP3aQAH7lf7j87O/5FiNr+ZR8+ipb+qQlhg= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.1.0-rc4 h1:oOxKUJWnFC4YGHCCMNql1x4YaDfYBTS5Y4x/Cgeo1E0= +github.com/opencontainers/image-spec v1.1.0-rc4/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8= +github.com/opencontainers/runc v1.1.5 h1:L44KXEpKmfWDcS02aeGm8QNTFXTo2D+8MYGDIJ/GDEs= +github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4= +github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= +github.com/philhofer/fwd v1.1.2 h1:bnDivRJ1EWPjUIRXV5KfORO897HTbpFAQddBdE8t7Gw= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c h1:NRoLoZvkBTKvR5gQLgA3e0hqjkY9u1wm+iOL45VN/qI= +github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/prometheus-community/pro-bing v0.3.0 h1:SFT6gHqXwbItEDJhTkzPWVqU6CLEtqEfNAPp47RUON4= +github.com/prometheus-community/pro-bing v0.3.0/go.mod h1:p9dLb9zdmv+eLxWfCT6jESWuDrS+YzpPkQBgysQF8a0= +github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8= +github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= +github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= +github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= +github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= +github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= +github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= +github.com/prometheus/prometheus v0.46.0 h1:9JSdXnsuT6YsbODEhSQMwxNkGwPExfmzqG73vCMk/Kw= +github.com/prometheus/prometheus v0.46.0/go.mod h1:10L5IJE5CEsjee1FnOcVswYXlPIscDWWt3IJ2UDYrz4= +github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= +github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE= +github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/sagikazarmark/locafero v0.3.0 h1:zT7VEGWC2DTflmccN/5T1etyKvxSxpHsjb9cJvm4SvQ= +github.com/sagikazarmark/locafero v0.3.0/go.mod h1:w+v7UsPNFwzF1cHuOajOOzoq4U7v/ig1mpRjqV+Bu1U= +github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= +github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= +github.com/shirou/gopsutil/v3 v3.23.8 h1:xnATPiybo6GgdRoC4YoGnxXZFRc3dqQTGi73oLvvBrE= +github.com/shirou/gopsutil/v3 v3.23.8/go.mod h1:7hmCaBn+2ZwaZOr6jmPBZDfawwMGuo1id3C6aM8EDqQ= +github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM= +github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= +github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU= +github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k= +github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= +github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= +github.com/spf13/afero v1.10.0 h1:EaGW2JJh15aKOejeuJ+wpFSHnbd7GE6Wvp3TsNhb6LY= +github.com/spf13/afero v1.10.0/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ= +github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA= +github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.17.0 h1:I5txKw7MJasPL/BrfkbA0Jyo/oELqVmux4pR/UxOMfI= +github.com/spf13/viper v1.17.0/go.mod h1:BmMMMLQXSbcHK6KAOiFLz0l5JHrU89OdIRHvsk0+yVI= +github.com/srebhan/cborquery v0.0.0-20230626165538-38be85b82316 h1:HVv8JjpX24FuI59aET1uInn0ItuEiyj8CZMuR9Uw+lE= +github.com/srebhan/protobufquery v0.0.0-20230803132024-ae4c0d878e55 h1:ksmbrLbJAm+8yxB7fJ245usD0b1v9JHBJrWF+WqGyjs= +github.com/stoewer/go-strcase v1.2.0 h1:Z2iHWqGXH00XYgqDmNgQbIBxf3wrNq0F3feEy0ainaU= +github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= +github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= +github.com/testcontainers/testcontainers-go v0.22.0 h1:hOK4NzNu82VZcKEB1aP9LO1xYssVFMvlfeuDW9JMmV0= +github.com/tidwall/gjson v1.14.4 h1:uo0p8EbA09J7RQaflQ1aBRffTR7xedD2bcIVSYxLnkM= +github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= +github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs= +github.com/tinylib/msgp v1.1.8 h1:FCXC1xanKO4I8plpHGH2P7koL/RzZs12l/+r7vakfm0= +github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU= +github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= +github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk= +github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY= +github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= +github.com/ugorji/go v1.2.7/go.mod h1:nF9osbDWLy6bDVv/Rtoh6QgnvNDpmCalQV5urGCCS6M= +github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= +github.com/ugorji/go/codec v1.2.7 h1:YPXUKf7fYbp/y8xloBqZOw2qaVggbfwMlI8WM3wZUJ0= +github.com/ugorji/go/codec v1.2.7/go.mod h1:WGN1fab3R1fzQlVQTkfxVtIBhWDRqOviHU95kRgeqEY= +github.com/vjeantet/grok v1.0.1 h1:2rhIR7J4gThTgcZ1m2JY4TrJZNgjn985U28kT2wQrJ4= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a h1:fZHgsYlfvtyqToslyjUt3VOPF4J7aK/3MPcK7xp3PDk= +github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a/go.mod h1:ul22v+Nro/R083muKhosV54bj5niojjWZvU8xrevuH4= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw= +github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= +go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= +go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= +go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/exp v0.0.0-20230905200255-921286631fa9 h1:GoHiUyI/Tp2nVkLI2mCxVkOjsbSXD66ic0XW0js0R9g= +golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc= +golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.12.0 h1:smVPGxink+n1ZI5pkQa8y6fZT0RW0MgCO5bFpepy4B4= +golang.org/x/oauth2 v0.12.0/go.mod h1:A74bZ3aGXgCY0qaIC9Ahg6Lglin4AMAco8cIv9baba4= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE= +golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190130150945-aca44879d564/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.14.0 h1:Vz7Qs629MkJkGyHxUlRHizWJRG2j8fbQKjELVSNhy7Q= +golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= +golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.13.0 h1:Iey4qkscZuv0VvIt8E0neZjtPVQFSc870HQ448QgEmQ= +golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/api v0.143.0 h1:o8cekTkqhywkbZT6p1UHJPZ9+9uuCAJs/KYomxZB8fA= +google.golang.org/api v0.143.0/go.mod h1:FoX9DO9hT7DLNn97OuoZAGSDuNAXdJRuGK98rSUgurk= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto/googleapis/api v0.0.0-20230913181813-007df8e322eb h1:lK0oleSc7IQsUxO3U5TjL9DWlsxpEBemh+zpB7IqhWI= +google.golang.org/genproto/googleapis/api v0.0.0-20230913181813-007df8e322eb/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13 h1:N3bU/SQDCDyD6R528GJ/PwW9KjYcJA3dgyH+MovAkIM= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13/go.mod h1:KSqppvjFjtoCI+KGd4PELB0qLNxdJHRGqRI09mB6pQA= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.58.2 h1:SXUpjxeVF3FKrTYQI4f4KvbGD5u2xccdYdurwowix5I= +google.golang.org/grpc v1.58.2/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= +google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE= +gopkg.in/go-playground/validator.v9 v9.29.1/go.mod h1:+c9/zcJMFNgbLvly1L1V+PpxWdVbfP1avr/N00E2vyQ= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8= +gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/yaml.v1 v1.0.0-20140924161607-9f9df34309c0/go.mod h1:WDnlLJ4WF5VGsH/HVa3CI79GS0ol3YnhVnKP89i0kNg= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gorm.io/driver/postgres v1.5.2 h1:ytTDxxEv+MplXOfFe3Lzm7SjG09fcdb3Z/c056DTBx0= +gorm.io/driver/postgres v1.5.2/go.mod h1:fmpX0m2I1PKuR7mKZiEluwrP3hbs+ps7JIGMUBpCgl8= +gorm.io/driver/sqlite v1.1.3 h1:BYfdVuZB5He/u9dt4qDpZqiqDJ6KhPqs5QUqsr/Eeuc= +gorm.io/driver/sqlite v1.1.3/go.mod h1:AKDgRWk8lcSQSw+9kxCJnX/yySj8G3rdwYlU57cB45c= +gorm.io/gorm v1.20.1/go.mod h1:0HFTzE/SqkGTzK6TlDPPQbAYCluiVvhzoA1+aVyzenw= +gorm.io/gorm v1.23.0/go.mod h1:l2lP/RyAtc1ynaTjFksBde/O8v9oOGIApu2/xRitmZk= +gorm.io/gorm v1.25.0 h1:+KtYtb2roDz14EQe4bla8CbQlmb9dN3VejSai3lprfU= +gorm.io/gorm v1.25.0/go.mod h1:L4uxeKpfBml98NYqVqwAdmV1a2nBtAec/cf3fpucW/k= +gorm.io/plugin/soft_delete v1.1.0 h1:LcE4L+GD29RkkMLxMYHpT4wQCJ/9945FsdU/mHGaDuE= +gorm.io/plugin/soft_delete v1.1.0/go.mod h1:Zv7vQctOJTGOsJ/bWgrN1n3od0GBAZgnLjEx+cApLGk= +gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU= +gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +howett.net/plist v1.0.0 h1:7CrbWYbPPO/PyNy38b2EB/+gYbjCe2DXBxgtOOZbSQM= +howett.net/plist v1.0.0/go.mod h1:lqaXoTrLY4hg8tnEzNru53gicrbv7rrk+2xJA/7hw9g= +k8s.io/api v0.28.2 h1:9mpl5mOb6vXZvqbQmankOfPIGiudghwCoLl1EYfUZbw= +k8s.io/apimachinery v0.28.2 h1:KCOJLrc6gu+wV1BYgwik4AF4vXOlVJPdiqn0yAWWwXQ= +k8s.io/client-go v0.28.2 h1:DNoYI1vGq0slMBN/SWKMZMw0Rq+0EQW6/AK4v9+3VeY= +k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= +k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= +k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 h1:LyMgNKD2P8Wn1iAwQU5OhxCKlKJy0sHc+PcDwFB24dQ= +k8s.io/utils v0.0.0-20230711102312-30195339c3c7 h1:ZgnF1KZsYxWIifwSNZFZgNtWE89WI5yiP5WwlfDoIyc= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= +sigs.k8s.io/structured-merge-diff/v4 v4.3.0 h1:UZbZAZfX0wV2zr7YZorDz6GXROfDFj6LvqCRm4VUVKk= +sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= diff --git a/agent/internal/command/command.go b/agent/internal/command/command.go new file mode 100644 index 0000000..9675a40 --- /dev/null +++ b/agent/internal/command/command.go @@ -0,0 +1,30 @@ +package command + +import ( + "fmt" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/internal/pkg/func_make" +) + +var ( + commandMap = map[string]interface{}{ + "demo": demo, + } + funcMake = func_make.New() +) + +func Register() { + err := funcMake.Registers(commandMap) + if err != nil { + panic("failed to register console command: " + err.Error()) + } +} + +func Run(funcName string) { + Register() + _, err := funcMake.Call(funcName) + if err != nil { + fmt.Printf("execution failed, error cause: %v \n", err.Error()) + return + } + fmt.Printf("complete! \n") +} diff --git a/agent/internal/command/demo.go b/agent/internal/command/demo.go new file mode 100644 index 0000000..05b1a95 --- /dev/null +++ b/agent/internal/command/demo.go @@ -0,0 +1,7 @@ +package command + +import "fmt" + +func demo() { + fmt.Println("hello console!") +} diff --git a/agent/internal/controller/base.go b/agent/internal/controller/base.go new file mode 100644 index 0000000..3663e9f --- /dev/null +++ b/agent/internal/controller/base.go @@ -0,0 +1,54 @@ +package controller + +import ( + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/internal/pkg/errors" + r "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/internal/pkg/response" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/log" + "github.com/gin-gonic/gin" + "go.uber.org/zap" +) + +// Success 业务成功响应 +func Success(c *gin.Context, data ...any) { + response := r.Resp() + if data != nil { + response.WithDataSuccess(c, data[0]) + return + } + response.Success(c) +} + +// FailCode 业务失败响应 +func FailCode(c *gin.Context, code int, err error, data ...any) { + response := r.Resp() + if err != nil { + log.Error("异常: %s", err.Error()) + } + if data != nil { + response.WithData(data[0]).FailCode(c, code) + return + } + response.FailCode(c, code) +} + +// Fail 业务失败响应 +func Fail(c *gin.Context, code int, message string, data ...any) { + response := r.Resp() + if data != nil { + response.WithData(data[0]).FailCode(c, code, message) + return + } + response.FailCode(c, code, message) +} + +// Err 判断错误类型是自定义类型则自动返回错误中携带的code和message,否则返回服务器错误 +func Err(c *gin.Context, e error) { + businessError, err := errors.AsBusinessError(e) + if err != nil { + log.Logger.Warn("Unknown error:", zap.Any("Error reason:", err)) + FailCode(c, errors.ServerError, err) + return + } + + Fail(c, businessError.GetCode(), businessError.GetMessage()) +} diff --git a/agent/internal/controller/v1/agentController.go b/agent/internal/controller/v1/agentController.go new file mode 100644 index 0000000..e8891de --- /dev/null +++ b/agent/internal/controller/v1/agentController.go @@ -0,0 +1,69 @@ +package v1 + +import ( + "encoding/json" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/config/agent" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/internal/controller" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/internal/pkg/errors" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/internal/pkg/request" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/internal/service/component" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/internal/service/load_resource_info" + "github.com/gin-gonic/gin" + "io" +) + +func AssetInformationEntry(c *gin.Context) { + body := request.GetBody(c) + var rinfo = agent.AgentConfig{} + err := json.Unmarshal(body, &rinfo) + if err != nil { + controller.FailCode(c, errors.ServerError, err, "更新资产信息失败!") + return + } + + agent.Agent = &rinfo + load_resource_info.LoadAgentInfo(rinfo) + + controller.Success(c, "配置初始化完成") +} + +func GetComponentInfo(c *gin.Context) { + info, err := component.GetComInfo() + if err != nil { + controller.FailCode(c, errors.ServerError, err, "获取组件详情失败!") + return + } + controller.Success(c, info) +} + +func UpdateTelegrafConfig(c *gin.Context) { + + _, fileHeader, err := c.Request.FormFile("telegraf-config") + if err != nil { + controller.FailCode(c, errors.InvalidParameter, err, "请检查参数(body)是否符合要求!") + return + } + + src, err := fileHeader.Open() + if err != nil { + controller.FailCode(c, errors.InvalidParameter, err, "请检查参数(body)是否符合要求!") + return + } + defer src.Close() + + var out []byte + out, _ = io.ReadAll(src) + + component.UpdateTelegrafConfig(out) + + if err != nil { + controller.FailCode(c, errors.ServerError, err, "请检查参数(body)是否符合要求!") + return + } + controller.Success(c, "完成配置文件更新!") +} + +func RecoverConfig(c *gin.Context) { + component.RecoverConfig() + controller.Success(c, "完成配置文件恢复!") +} diff --git a/agent/internal/controller/v1/demo.go b/agent/internal/controller/v1/demo.go new file mode 100644 index 0000000..ba4b21c --- /dev/null +++ b/agent/internal/controller/v1/demo.go @@ -0,0 +1,16 @@ +package v1 + +import ( + "fmt" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/internal/controller" + "github.com/gin-gonic/gin" +) + +func HelloWorld(c *gin.Context) { + str, ok := c.GetQuery("name") + if !ok { + str = "gin-layout" + } + + controller.Success(c, fmt.Sprintf("hello %s", str)) +} diff --git a/agent/internal/controller/v1/downloadAgent.go b/agent/internal/controller/v1/downloadAgent.go new file mode 100644 index 0000000..e6d5d53 --- /dev/null +++ b/agent/internal/controller/v1/downloadAgent.go @@ -0,0 +1,21 @@ +package v1 + +import ( + "fmt" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/internal/nats_service/definition" + "github.com/gin-gonic/gin" + "net/http" + "path/filepath" +) + +func DownloadAgent(c *gin.Context) { + filename := c.Query("filename") + + c.Writer.WriteHeader(http.StatusOK) + c.Header("Content-Disposition", fmt.Sprintf("attachment; filename=%s", filename)) + c.Header("Content-Type", "application/octet-stream") + + versionedAgent := filepath.Join(definition.RunDirectory, "../upgrade", filename) + + c.File(versionedAgent) +} diff --git a/agent/internal/controller/v1/logController.go b/agent/internal/controller/v1/logController.go new file mode 100644 index 0000000..dc9b997 --- /dev/null +++ b/agent/internal/controller/v1/logController.go @@ -0,0 +1,54 @@ +package v1 + +import ( + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/internal/controller" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/internal/pkg/errors" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/internal/service/log_service" + "github.com/gin-gonic/gin" + "strconv" +) + +func GetAgentLog(c *gin.Context) { + lineNum := c.Query("lines") + lineUnit, err2 := strconv.ParseUint(lineNum, 10, 64) + if err2 != nil { + lineUnit = 100 + } + + info, err := log_service.GetAgentLog(lineUnit) + if err != nil { + controller.FailCode(c, errors.ServerError, err, "读取日志失败!") + return + } + controller.Success(c, info) +} + +func GetAgentUpgradeLog(c *gin.Context) { + lineNum := c.Query("lines") + lineUnit, err2 := strconv.ParseUint(lineNum, 10, 64) + if err2 != nil { + lineUnit = 100 + } + + info, err := log_service.GetAgentUpgradeLog(lineUnit) + if err != nil { + controller.FailCode(c, errors.ServerError, err, "读取日志失败!") + return + } + controller.Success(c, info) +} + +func GetTelegrafLog(c *gin.Context) { + lineNum := c.Query("lines") + lineUnit, err2 := strconv.ParseUint(lineNum, 10, 64) + if err2 != nil { + lineUnit = 100 + } + + info, err := log_service.GetTelegrafLog(lineUnit) + if err != nil { + controller.FailCode(c, errors.ServerError, err, "读取日志失败!") + return + } + controller.Success(c, info) +} diff --git a/agent/internal/controller/v1/shutdown.go b/agent/internal/controller/v1/shutdown.go new file mode 100644 index 0000000..02626ff --- /dev/null +++ b/agent/internal/controller/v1/shutdown.go @@ -0,0 +1,13 @@ +package v1 + +import ( + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/internal/controller" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/utils" + "github.com/gin-gonic/gin" + "os" +) + +func Shutdown(c *gin.Context) { + utils.ChUserSig <- os.Interrupt + controller.Success(c, "关闭命令已发送!") +} diff --git a/agent/internal/middleware/app_context.go b/agent/internal/middleware/app_context.go new file mode 100644 index 0000000..5de3602 --- /dev/null +++ b/agent/internal/middleware/app_context.go @@ -0,0 +1,27 @@ +package middleware + +import ( + "github.com/gin-gonic/gin" +) + +const ( + CLUSTERID = "clusterId" + NAMESPACE = "namespace" +) + +func AppContextHandler() gin.HandlerFunc { + return func(c *gin.Context) { + ns := c.Request.Header.Get("namespace") + if len(ns) != 0 { + c.Set(NAMESPACE, ns) + } + + clusterId := c.Param("cluster") + if len(clusterId) != 0 { + c.Set(CLUSTERID, clusterId) + } + + c.Next() + } + +} diff --git a/agent/internal/middleware/authentication.go b/agent/internal/middleware/authentication.go new file mode 100644 index 0000000..ddd8b0d --- /dev/null +++ b/agent/internal/middleware/authentication.go @@ -0,0 +1,81 @@ +package middleware + +import ( + "fmt" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/internal/pkg/authen" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/internal/pkg/errors" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/internal/pkg/response" + "github.com/gin-gonic/gin" + "github.com/golang/glog" + "regexp" +) + +const ( + LoginUserKey = "login-user" +) + +func AuthenticationHandler() gin.HandlerFunc { + return func(c *gin.Context) { + match, _ := regexp.MatchString("/healthz", c.Request.RequestURI) + if match { + c.Next() + return + } + + rawToken := c.Request.Header.Get("Authorization") + if rawToken == "" { + glog.Warning("unauthorized access, token not specified") + response.Resp().FailCode(c, errors.AuthorizationError, "token should be specified in header with 'Authorization' key") + return + } + var username, encrypted string + ok, bearerToken := authen.JWTAuthorizer.IsBearerToken(rawToken) + if ok { + loginInfo, err := authen.JWTAuthorizer.ValidateToken(bearerToken) + //marshal, _ := json.Marshal(loginInfo) + //fmt.Print(string(marshal)) + + if err != nil { + if authen.JWTAuthorizer.IsTokenExpired(err) { + glog.Warning("unauthorized access, bearer token expired") + response.Resp().FailCode(c, errors.AuthorizationError, "bearer token expired") + return + } + glog.Warningf("validate bearer token failed, %s", err) + response.Resp().FailCode(c, errors.AuthorizationError, fmt.Sprint("validate bearer token failed, %s", err)) + return + } + username = loginInfo.Username + encrypted = loginInfo.Encrypted + } else { + glog.Warningf("validate bearer token failed") + response.Resp().FailCode(c, errors.AuthorizationError, "validate bearer token failed") + + return + } + //u, err := user.FindOneByName(username, model.DBAuth()) + //if err != nil { + // if err == gorm.ErrRecordNotFound { + // glog.Errorf("unauthorized access, user not found, %s", username) + // response.Resp().FailCode(c, errors.AuthorizationError, "user not found") + // + // return + // } + // glog.Errorf("get user from db failed, user %s, %s", username, err) + // response.Resp().FailCode(c, errors.ServerError, fmt.Sprintf("get user from db failed, user %s, %s", username, err)) + // + // return + //} + // + //if encrypted != "" && encrypted != u.EncryptedPassword { + // glog.Warningf("unauthorized access, password mismatch, user %s", username) + // response.Resp().FailCode(c, errors.AuthorizationError, "password mismatch") + // return + //} + + //c.Set(LoginUserKey, u) + + c.Set(LoginUserKey, username+":"+encrypted) + c.Next() + } +} diff --git a/agent/internal/middleware/authorization.go b/agent/internal/middleware/authorization.go new file mode 100644 index 0000000..4a0db54 --- /dev/null +++ b/agent/internal/middleware/authorization.go @@ -0,0 +1,26 @@ +package middleware + +import ( + "github.com/gin-gonic/gin" + "github.com/golang/glog" + "regexp" +) + +func AuthorizationHandler() gin.HandlerFunc { + return func(c *gin.Context) { + match, _ := regexp.MatchString("/healthz", c.Request.RequestURI) + if match { + c.Next() + return + } + + _, exist := c.Get(LoginUserKey) + if !exist { + glog.Fatal("Authorization middleware should work together with Authentication middleware") + c.Next() + return + } + + c.Next() + } +} diff --git a/agent/internal/middleware/cors.go b/agent/internal/middleware/cors.go new file mode 100644 index 0000000..43bbab5 --- /dev/null +++ b/agent/internal/middleware/cors.go @@ -0,0 +1,10 @@ +package middleware + +import ( + "github.com/gin-contrib/cors" + "github.com/gin-gonic/gin" +) + +func CorsHandler() gin.HandlerFunc { + return cors.Default() +} diff --git a/agent/internal/middleware/logger.go b/agent/internal/middleware/logger.go new file mode 100644 index 0000000..6ba9604 --- /dev/null +++ b/agent/internal/middleware/logger.go @@ -0,0 +1,53 @@ +package middleware + +import ( + "bytes" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/config" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/log" + "github.com/gin-gonic/gin" + "go.uber.org/zap" + "time" +) + +type responseWriter struct { + gin.ResponseWriter + body *bytes.Buffer +} + +func (w responseWriter) Write(b []byte) (int, error) { + w.body.Write(b) + return w.ResponseWriter.Write(b) +} + +func (w responseWriter) WriteString(s string) (int, error) { + w.body.WriteString(s) + return w.ResponseWriter.WriteString(s) +} + +// CustomLogger 接收gin框架默认的日志 +func CustomLogger() gin.HandlerFunc { + return func(c *gin.Context) { + blw := &responseWriter{body: bytes.NewBufferString(""), ResponseWriter: c.Writer} + c.Writer = blw + // 读取body数据 + //body := request.GetBody(c) + c.Next() + + cost := time.Since(c.GetTime("requestStartTime")) + if config.Config.AppEnv != "production" { + path := c.Request.URL.Path + log.Logger.Info(path, + zap.Int("status", c.Writer.Status()), + zap.String("method", c.Request.Method), + zap.String("path", path), + zap.String("query", c.Request.URL.RawQuery), + //zap.Any("body", string(body)), + zap.String("ip", c.ClientIP()), + zap.String("user-agent", c.Request.UserAgent()), + zap.String("errors", c.Errors.ByType(gin.ErrorTypePrivate).String()), + zap.String("cost", cost.String()), + zap.String("response", blw.body.String()), + ) + } + } +} diff --git a/agent/internal/middleware/recovery.go b/agent/internal/middleware/recovery.go new file mode 100644 index 0000000..7006b1e --- /dev/null +++ b/agent/internal/middleware/recovery.go @@ -0,0 +1,40 @@ +package middleware + +import ( + "errors" + "fmt" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/config" + e "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/internal/pkg/errors" + response2 "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/internal/pkg/response" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/log" + "github.com/gin-gonic/gin" + "net/http" + "strings" +) + +// CustomRecovery 自定义错误 (panic) 拦截中间件、对可能发生的错误进行拦截、统一记录 +func CustomRecovery() gin.HandlerFunc { + DefaultErrorWriter := &PanicExceptionRecord{} + return gin.RecoveryWithWriter(DefaultErrorWriter, func(c *gin.Context, err interface{}) { + // 这里针对发生的panic等异常进行统一响应即 + // 这里针对发生的panic等异常进行统一响应即 + errStr := "" + if config.Config.Debug == true { + errStr = fmt.Sprintf("%v", err) + } + response2.Resp().SetHttpCode(http.StatusInternalServerError).FailCode(c, e.ServerError, errStr) + }) +} + +// PanicExceptionRecord panic等异常记录 +type PanicExceptionRecord struct{} + +func (p *PanicExceptionRecord) Write(b []byte) (n int, err error) { + s1 := "An error occurred in the server's internal code:" + var build strings.Builder + build.WriteString(s1) + build.Write(b) + errStr := build.String() + log.Error(errStr) + return len(errStr), errors.New(errStr) +} diff --git a/agent/internal/middleware/requestCost.go b/agent/internal/middleware/requestCost.go new file mode 100644 index 0000000..2fb0e1a --- /dev/null +++ b/agent/internal/middleware/requestCost.go @@ -0,0 +1,13 @@ +package middleware + +import ( + "github.com/gin-gonic/gin" + "time" +) + +func RequestCostHandler() gin.HandlerFunc { + return func(c *gin.Context) { + c.Set("requestStartTime", time.Now()) + c.Next() + } +} diff --git a/agent/internal/model/base.go b/agent/internal/model/base.go new file mode 100644 index 0000000..403b839 --- /dev/null +++ b/agent/internal/model/base.go @@ -0,0 +1,40 @@ +package model + +import ( + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/data" + "gorm.io/gorm" + "gorm.io/plugin/soft_delete" + "time" +) + +type PageData struct { + TotalCount int64 + PageSize int + PageNumber int + Data interface{} +} + +type BaseModel struct { + ID string `gorm:"column:id;not null;primarykey" json:"id"` + CreateTime time.Time `gorm:"column:create_time;type:timestamp;<-:create" json:"createTime"` + UpdateTime time.Time `gorm:"column:update_time;type:timestamp" json:"updateTime"` + CreatorID string `gorm:"column:creator_id;type:varchar(64);not null;<-:create" json:"creatorID"` + Modifier string `gorm:"column:modifier;type:varchar(64);not null;" json:"modifier"` +} + +func (model *BaseModel) DB() *gorm.DB { + return DB() +} + +type ContainsDeleteBaseModel struct { + BaseModel + DeletedAt soft_delete.DeletedAt `gorm:"column:deleted_at;type:int(11) unsigned;not null;default:0;index" json:"-"` +} + +func DB() *gorm.DB { + return data.PostgreSqlDB["data"] +} + +func DBAuth() *gorm.DB { + return data.PostgreSqlDB["auth"] +} diff --git a/agent/internal/model/component/component.go b/agent/internal/model/component/component.go new file mode 100644 index 0000000..ec5a2b7 --- /dev/null +++ b/agent/internal/model/component/component.go @@ -0,0 +1,30 @@ +package component + +import "time" + +// 组件详情 +type Component struct { + COMName string `json:"comName"` // 组件名称,枚举:Telegraf、ProcessExporter + COMPort int64 `json:"comPort"` // 组件端口 + COMType string `json:"comType"` // 组件类型,中文描述既可 + COMVersion string `json:"comVersion"` // 组件版本 + Config Config `json:"config"` // 组件配置(传json字符串),每个组件的信息不同 + LastStarttime time.Time `json:"lastStarttime"` // 最近运行时间, 2023-08-16 00:00:00 + Status string `json:"status"` // 组件状态,0:离线,1:在线,2:故障 +} + +// Config 组件配置(传json字符串),每个组件的信息不同 +// +// # Telegraf配置 +// +// ProcessExporter配置 +type Config struct { + Metrics []string `json:"metrics,omitempty"` + MetricServer []MetricServer `json:"metricServer,omitempty"` // 指标服务器名称 + Tags map[string]string `json:"tags,omitempty"` // 标签,map[string]string类型 +} + +type MetricServer struct { + Address string `json:"address"` // 指标服务器地址 + Name string `json:"name"` // 指标服务器名称 +} diff --git a/agent/internal/model/heartbeat/heartbeat.go b/agent/internal/model/heartbeat/heartbeat.go new file mode 100644 index 0000000..33359bd --- /dev/null +++ b/agent/internal/model/heartbeat/heartbeat.go @@ -0,0 +1,28 @@ +package heartbeat + +import "time" + +// 1.返回应用一体机开机后运行时间和开始时间 +// 2.返回CPU、内存、网卡等配置信息、型号信息、操作系统信息,硬盘信息等 + +// HeartBeat 一体机-心跳检测信息 +type HeartBeat struct { + SerialNo string `json:"serialNo"` // 序列号 + AgentVersion string `json:"agentVersion"` // 小助手版本 + IPAddress string `json:"ipAddress"` // ip地址,多个逗号分割 + MACAddress string `json:"macAddress"` // mac地址 + + Architecture string `json:"archType"` // architecture + KernelVersion string `json:"kernelVersion"` // kernel_version + OS string `json:"osInfo"` // os + OSType string `json:"osType"` // os + + LastStarttime time.Time `json:"lastStarttime"` // 最近开机时间, 2023-08-16 00:00:00 - 2023-08-16 23:59:59 + COMInfo []COMInfo `json:"comInfo"` // 组件信息 +} + +type COMInfo struct { + LastStarttime time.Time `json:"lastStarttime"` // 最近运行时间, 2023-08-16 00:00:00 + Name string `json:"name"` // 组件名称 + Status string `json:"status"` // 组件状态,0:离线,1:在线,2:故障 +} diff --git a/agent/internal/model/heartbeat/heartbeat_response.go b/agent/internal/model/heartbeat/heartbeat_response.go new file mode 100644 index 0000000..3bbf672 --- /dev/null +++ b/agent/internal/model/heartbeat/heartbeat_response.go @@ -0,0 +1,7 @@ +package heartbeat + +type HeartbeatResponse struct { + ID int `gorm:"column:user_id"` + Name string `gorm:"column:login_name"` + EncryptedPassword string `gorm:"column:login_pass"` +} diff --git a/agent/internal/model/nats_msg_model/local_upgrade_cmd.go b/agent/internal/model/nats_msg_model/local_upgrade_cmd.go new file mode 100644 index 0000000..1d54c39 --- /dev/null +++ b/agent/internal/model/nats_msg_model/local_upgrade_cmd.go @@ -0,0 +1,29 @@ +package nats_msg_model + +import "encoding/json" + +func UnmarshalLocalUpgradeCMD(data []byte) (LocalUpgradeCMD, error) { + var r LocalUpgradeCMD + err := json.Unmarshal(data, &r) + return r, err +} + +func (r *LocalUpgradeCMD) Marshal() ([]byte, error) { + return json.Marshal(r) +} + +type LocalUpgradeCMD struct { + AgentId string `json:"agentId"` // 需要关联升级记录 + // 备用 + DownUrl string `json:"downUrl"` + Filename string `json:"filename"` + Version string `json:"version"` + Command string `json:"command"` + + CompressionType string `json:"compressionType"` // tar.gz or zip + IP string `json:"ip"` + Port uint16 `json:"port"` + OsType string `json:"osType"` // 字典:Windows\Ubuntu\CentOS\KylinOS + ArchType string `json:"archType"` // 字典: X86\ARM + //DeviceType string `json:"deviceType"` // 设备类型;0:为全部,1:应用一体机,2:调度一体机,3:纳管一体机 +} diff --git a/agent/internal/model/nats_msg_model/msg_model.go b/agent/internal/model/nats_msg_model/msg_model.go new file mode 100644 index 0000000..879f45b --- /dev/null +++ b/agent/internal/model/nats_msg_model/msg_model.go @@ -0,0 +1,20 @@ +package nats_msg_model + +import "encoding/json" + +func UnmarshalMsgModel(data []byte) (MsgModel, error) { + var r MsgModel + err := json.Unmarshal(data, &r) + return r, err +} + +func (r *MsgModel) Marshal() ([]byte, error) { + return json.Marshal(r) +} + +type MsgModel struct { + Body []byte `json:"body"` // 函数入参,与函数配套。JSON + Func string `json:"func"` // 功能名称,一般代表一个函数接口。命名规范为:cfn.{agent or schedule}.{包名}.函数名例如:cfn.agent.component.GetComInfo + Rid string `json:"rid"` // 资产ID列表,可选。可用于部分广播 + Version string `json:"version"` // 函数版本 +} diff --git a/agent/internal/model/nats_msg_model/upgrade_cmd.go b/agent/internal/model/nats_msg_model/upgrade_cmd.go new file mode 100644 index 0000000..582b33f --- /dev/null +++ b/agent/internal/model/nats_msg_model/upgrade_cmd.go @@ -0,0 +1,25 @@ +package nats_msg_model + +import "encoding/json" + +func UnmarshalUpgradeCMD(data []byte) (UpgradeCMD, error) { + var r UpgradeCMD + err := json.Unmarshal(data, &r) + return r, err +} + +func (r *UpgradeCMD) Marshal() ([]byte, error) { + return json.Marshal(r) +} + +type UpgradeCMD struct { + AgentId string `json:"agentId"` + DownUrl string `json:"downUrl"` + BucketName string `json:"bucketName"` + Filename string `json:"filename"` + CompressionType string `json:"compressionType"` + Command string `json:"command"` + Version string `json:"version"` + OsType string `json:"osType"` //字典:Windows\Ubuntu\CentOS\KylinOS + ArchType string `json:"archType"` //字典: X86\ARM +} diff --git a/agent/internal/model/nats_msg_model/upgrade_plan.go b/agent/internal/model/nats_msg_model/upgrade_plan.go new file mode 100644 index 0000000..3dc0232 --- /dev/null +++ b/agent/internal/model/nats_msg_model/upgrade_plan.go @@ -0,0 +1,25 @@ +package nats_msg_model + +import "encoding/json" + +func UnmarshalUpgradePlan(data []byte) (UpgradePlan, error) { + var r UpgradePlan + err := json.Unmarshal(data, &r) + return r, err +} + +func (r *UpgradePlan) Marshal() ([]byte, error) { + return json.Marshal(r) +} + +type UpgradePlan struct { + UpgradeTime string `json:"upgradeTime" yaml:"upgradeTime"` //指定升级时间;指定时间升级时有值 + UpgradeRule string `json:"upgradeRule" yaml:"upgradeRule"` //升级策略;0:立刻升级,1:指定时间升级 + DeviceType string `json:"deviceType" yaml:"deviceType"` //设备类型;0:为全部,1:应用一体机,2:调度一体机,3:纳管一体机 + Status string `json:"status" yaml:"status"` //字典:create:创建此条升级规则,delete:删除升级规则 + AgentId string `json:"agentId" yaml:"agentId"` // 关联升级记录,反馈升级结果时用 + Option string `json:"option" yaml:"option"` // reboot|upgrade|rollback # 动作 + Version string `json:"version" yaml:"version"` + CurrentVerson string `json:"currentVerson" yaml:"currentVerson"` + RID string `json:"rid" yaml:"rid" mapstructure:"rid"` +} diff --git a/agent/internal/model/nats_msg_model/upgrade_result.go b/agent/internal/model/nats_msg_model/upgrade_result.go new file mode 100644 index 0000000..d9a3177 --- /dev/null +++ b/agent/internal/model/nats_msg_model/upgrade_result.go @@ -0,0 +1,74 @@ +package nats_msg_model + +import ( + "encoding/json" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/config/agent" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/internal/service/heartbeat_service" + "time" +) + +func UnmarshalUpgradeResult(data []byte) (UpgradeResult, error) { + var r UpgradeResult + err := json.Unmarshal(data, &r) + return r, err +} + +func (r *UpgradeResult) Marshal() ([]byte, error) { + return json.Marshal(r) +} + +// 设备升级记录 +type UpgradeResult struct { + SerialNo string `json:"serialNo"` // 序列号 + + AgentID string `json:"agentId"` // 小助手版本ID + AgentVersion string `json:"agentVersion"` // 升级版本 + OldVersion string `json:"oldVersion"` // 旧版本 + + UpgradeTime string `json:"upgradeTime"` // 升级时间,2016-06-30T16:09:51.692226358+08:00 + UpgradeResult string `json:"upgradeResult"` // 升级结果 + UpgradeStatus string `json:"upgradeStatus"` // 状态,0:失败,1:成功 + Option string `json:"option"` +} + +func NewUpgradeResult(upgradeCMD UpgradeCMD, upgradeResult, UpgradeStatus string) *UpgradeResult { + result := UpgradeResult{ + SerialNo: agent.Agent.RID, + AgentID: upgradeCMD.AgentId, + AgentVersion: upgradeCMD.Version, + OldVersion: heartbeat_service.AgentState.AgentVersion, + UpgradeTime: time.Now().String(), + UpgradeStatus: UpgradeStatus, + UpgradeResult: upgradeResult, + Option: "upgrade", + } + return &result +} + +func NewLocalUpgradeResult(upgradeCMD LocalUpgradeCMD, upgradeResult, UpgradeStatus string) *UpgradeResult { + result := UpgradeResult{ + SerialNo: agent.Agent.RID, + AgentID: upgradeCMD.AgentId, + AgentVersion: upgradeCMD.Version, + OldVersion: heartbeat_service.AgentState.AgentVersion, + UpgradeTime: time.Now().String(), + UpgradeStatus: UpgradeStatus, + UpgradeResult: upgradeResult, + Option: "upgrade", + } + return &result +} + +func NewUpgradePlanResult(upgradePlan UpgradePlan, upgradeResult, UpgradeStatus string) *UpgradeResult { + result := UpgradeResult{ + SerialNo: agent.Agent.RID, + AgentID: upgradePlan.AgentId, + AgentVersion: upgradePlan.Version, + OldVersion: heartbeat_service.AgentState.AgentVersion, + UpgradeTime: time.Now().String(), + UpgradeStatus: UpgradeStatus, + UpgradeResult: upgradeResult, + Option: "upgrade", + } + return &result +} diff --git a/agent/internal/model/user/user.go b/agent/internal/model/user/user.go new file mode 100644 index 0000000..0f4da57 --- /dev/null +++ b/agent/internal/model/user/user.go @@ -0,0 +1,30 @@ +package user + +import "gorm.io/gorm" + +type User struct { + ID int `gorm:"column:user_id"` + Name string `gorm:"column:login_name"` + EncryptedPassword string `gorm:"column:login_pass"` +} + +func (User) TableName() string { + return "sys_account" +} + +func (m User) GetUserID() int { + return m.ID +} + +func (m User) GetUserName() string { + return m.Name +} + +func FindOneByName(name string, db *gorm.DB) (user *User, err error) { + var u User + if err = db.Where("login_name = ?", name).First(&u).Error; err != nil { + return + } + user = &u + return +} diff --git a/agent/internal/nats_service/cfn_to_agent_broadcast/cfn_to_agent_broadcast.go b/agent/internal/nats_service/cfn_to_agent_broadcast/cfn_to_agent_broadcast.go new file mode 100644 index 0000000..a2f1607 --- /dev/null +++ b/agent/internal/nats_service/cfn_to_agent_broadcast/cfn_to_agent_broadcast.go @@ -0,0 +1,76 @@ +package cfn_to_agent_broadcast + +import ( + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/config/agent" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/internal/model/nats_msg_model" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/internal/nats_service/definition" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/internal/nats_service/handle" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/log" + natsClient "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/nats-client" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/utils" + "github.com/nats-io/nats.go" + "time" +) + +var ( + Role = agent.Agent.Role +) + +// 小助手升级包下载命令接收接口 +// 升级计划下发 + +func Start() { + log.Infof("启动nats订阅服务: 升级助手命令广播") + + // Connect Options. + opts := []nats.Option{nats.Name("Agent CMD Subscriber")} + opts = setupConnOptions(opts) + opts = append(opts, nats.UserInfo(natsClient.NatsConfig.User, natsClient.NatsConfig.Password)) + + // Connect to NATS + nc, err := nats.Connect(natsClient.NatsConfig.Url, opts...) + if err != nil { + log.Error(err) + } + + nc.Subscribe(definition.ToAgentBroadcastSubject, func(msg *nats.Msg) { + model, err := nats_msg_model.UnmarshalMsgModel(msg.Data) + if err != nil { + log.Errorf("解析消息体失败:%s", err) + } + + log.Infof("接收到广播命令消息, Func:%s", model.Func) + log.Infof("接收到广播命令消息, Body:%s", model.Body) + log.Infof("接收到广播命令消息, Version:%s", model.Version) + log.Infof("接收到广播命令消息, Rid:%s", model.Rid) + + go handle.HandleMsg(msg, model) + + }) + nc.Flush() + + if err := nc.LastError(); err != nil { + log.Error(err) + } + + // holdon + <-utils.StopNats +} + +func setupConnOptions(opts []nats.Option) []nats.Option { + totalWait := 10 * time.Minute + reconnectDelay := time.Second + + opts = append(opts, nats.ReconnectWait(reconnectDelay)) + opts = append(opts, nats.MaxReconnects(int(totalWait/reconnectDelay))) + opts = append(opts, nats.DisconnectErrHandler(func(nc *nats.Conn, err error) { + log.Infof("Disconnected due to:%s, will attempt reconnects for %.0fm", err, totalWait.Minutes()) + })) + opts = append(opts, nats.ReconnectHandler(func(nc *nats.Conn) { + log.Infof("Reconnected [%s]", nc.ConnectedUrl()) + })) + opts = append(opts, nats.ClosedHandler(func(nc *nats.Conn) { + log.Errorf("Exiting: %v", nc.LastError()) + })) + return opts +} diff --git a/agent/internal/nats_service/cfn_to_agent_broadcast_private_enterprise/cfn_to_agent_broadcast_private_enterprise.go b/agent/internal/nats_service/cfn_to_agent_broadcast_private_enterprise/cfn_to_agent_broadcast_private_enterprise.go new file mode 100644 index 0000000..81242c1 --- /dev/null +++ b/agent/internal/nats_service/cfn_to_agent_broadcast_private_enterprise/cfn_to_agent_broadcast_private_enterprise.go @@ -0,0 +1,67 @@ +package cfn_to_agent_broadcast_private_enterprise + +import ( + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/internal/model/nats_msg_model" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/internal/nats_service/definition" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/internal/nats_service/handle" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/log" + natsClient "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/nats-client" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/utils" + "github.com/nats-io/nats.go" + "time" +) + +func Start() { + log.Infof("启动nats订阅服务: 企业私有广播") + // Connect Options. + opts := []nats.Option{nats.Name("Enterprise Private Broadcast Subscriber")} + opts = setupConnOptions(opts) + opts = append(opts, nats.UserInfo(natsClient.NatsConfig.User, natsClient.NatsConfig.Password)) + + // Connect to NATS + nc, err := nats.Connect(natsClient.NatsConfig.Url, opts...) + if err != nil { + log.Error(err) + } + + nc.Subscribe(definition.ToAgentBroadcastPrivateEnterpriseSubject, func(msg *nats.Msg) { + model, err := nats_msg_model.UnmarshalMsgModel(msg.Data) + if err != nil { + log.Errorf("解析消息体失败:%s", err) + } + + log.Infof("接收到企业内私有广播命令消息, Func:%s", model.Func) + log.Infof("接收到企业内私有广播命令消息, Body:%s", model.Body) + log.Infof("接收到企业内私有广播命令消息, Version:%s", model.Version) + log.Infof("接收到企业内私有广播命令消息, Rid:%s", model.Rid) + + go handle.HandleMsg(msg, model) + + }) + nc.Flush() + + if err := nc.LastError(); err != nil { + log.Error(err) + } + + // holdon + <-utils.StopNats +} + +func setupConnOptions(opts []nats.Option) []nats.Option { + totalWait := 10 * time.Minute + reconnectDelay := time.Second + + opts = append(opts, nats.ReconnectWait(reconnectDelay)) + opts = append(opts, nats.MaxReconnects(int(totalWait/reconnectDelay))) + opts = append(opts, nats.DisconnectErrHandler(func(nc *nats.Conn, err error) { + log.Infof("Disconnected due to:%s, will attempt reconnects for %.0fm", err, totalWait.Minutes()) + })) + opts = append(opts, nats.ReconnectHandler(func(nc *nats.Conn) { + log.Infof("Reconnected [%s]", nc.ConnectedUrl()) + })) + opts = append(opts, nats.ClosedHandler(func(nc *nats.Conn) { + log.Errorf("Exiting: %v", nc.LastError()) + })) + return opts +} diff --git a/agent/internal/nats_service/cfn_to_agent_broadcast_public_enterprice/cfn_to_agent_broadcast_public_enterprice.go b/agent/internal/nats_service/cfn_to_agent_broadcast_public_enterprice/cfn_to_agent_broadcast_public_enterprice.go new file mode 100644 index 0000000..294bdc4 --- /dev/null +++ b/agent/internal/nats_service/cfn_to_agent_broadcast_public_enterprice/cfn_to_agent_broadcast_public_enterprice.go @@ -0,0 +1,67 @@ +package cfn_to_agent_broadcast_public_enterprice + +import ( + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/internal/model/nats_msg_model" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/internal/nats_service/definition" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/internal/nats_service/handle" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/log" + natsClient "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/nats-client" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/utils" + "github.com/nats-io/nats.go" + "time" +) + +func Start() { + log.Infof("启动nats订阅服务: 企业公共广播") + // Connect Options. + opts := []nats.Option{nats.Name("Enterprise Public Broadcast Subscriber")} + opts = setupConnOptions(opts) + opts = append(opts, nats.UserInfo(natsClient.NatsConfig.User, natsClient.NatsConfig.Password)) + + // Connect to NATS + nc, err := nats.Connect(natsClient.NatsConfig.Url, opts...) + if err != nil { + log.Error(err) + } + + nc.Subscribe(definition.ToAgentBroadcastPublicEnterpriseSubject, func(msg *nats.Msg) { + model, err := nats_msg_model.UnmarshalMsgModel(msg.Data) + if err != nil { + log.Errorf("解析消息体失败:%s", err) + } + + log.Infof("接收到企业公共广播命令消息, Func:%s", model.Func) + log.Infof("接收到企业公共广播命令消息, Body:%s", model.Body) + log.Infof("接收到企业公共广播命令消息, Version:%s", model.Version) + log.Infof("接收到企业公共广播命令消息, Rid:%s", model.Rid) + + go handle.HandleMsg(msg, model) + + }) + nc.Flush() + + if err := nc.LastError(); err != nil { + log.Error(err) + } + + // holdon + <-utils.StopNats +} + +func setupConnOptions(opts []nats.Option) []nats.Option { + totalWait := 10 * time.Minute + reconnectDelay := time.Second + + opts = append(opts, nats.ReconnectWait(reconnectDelay)) + opts = append(opts, nats.MaxReconnects(int(totalWait/reconnectDelay))) + opts = append(opts, nats.DisconnectErrHandler(func(nc *nats.Conn, err error) { + log.Infof("Disconnected due to:%s, will attempt reconnects for %.0fm", err, totalWait.Minutes()) + })) + opts = append(opts, nats.ReconnectHandler(func(nc *nats.Conn) { + log.Infof("Reconnected [%s]", nc.ConnectedUrl()) + })) + opts = append(opts, nats.ClosedHandler(func(nc *nats.Conn) { + log.Errorf("Exiting: %v", nc.LastError()) + })) + return opts +} diff --git a/agent/internal/nats_service/cfn_to_agent_unicast/cfn_to_agent_unicast.go b/agent/internal/nats_service/cfn_to_agent_unicast/cfn_to_agent_unicast.go new file mode 100644 index 0000000..5bbca15 --- /dev/null +++ b/agent/internal/nats_service/cfn_to_agent_unicast/cfn_to_agent_unicast.go @@ -0,0 +1,73 @@ +package cfn_to_agent_unicast + +import ( + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/internal/model/nats_msg_model" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/internal/nats_service/definition" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/internal/nats_service/handle" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/log" + natsClient "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/nats-client" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/utils" + "github.com/nats-io/nats.go" + "time" +) + +// 系统(小助手)日志查询服务 +// 组件详情服务(包含配置内容) +// 更新telegraf配置文件 +// 单个升级(包含升级包地址、升级配置) + +func Start() { + log.Infof("启动nats订阅服务: 单播命令") + + // Connect Options. + opts := []nats.Option{nats.Name("Agent Unicast Subscriber")} + opts = setupConnOptions(opts) + opts = append(opts, nats.UserInfo(natsClient.NatsConfig.User, natsClient.NatsConfig.Password)) + + // Connect to NATS + nc, err := nats.Connect(natsClient.NatsConfig.Url, opts...) + if err != nil { + log.Errorf("连接nats失败:%s", err) + } + + nc.Subscribe(definition.ToAgentUnicastSubject, func(msg *nats.Msg) { + model, err := nats_msg_model.UnmarshalMsgModel(msg.Data) + if err != nil { + log.Errorf("解析消息体失败:%s", err) + } + + log.Infof("接收到单播命令消息, Func:%s", model.Func) + log.Infof("接收到单播命令消息, Body:%s", model.Body) + log.Infof("接收到单播命令消息, Version:%s", model.Version) + log.Infof("接收到单播命令消息, Rid:%s", model.Rid) + + go handle.HandleMsg(msg, model) + + }) + nc.Flush() + + if err := nc.LastError(); err != nil { + log.Error(err) + } + + // holdon + <-utils.StopNats +} + +func setupConnOptions(opts []nats.Option) []nats.Option { + totalWait := 10 * time.Minute + reconnectDelay := time.Second + + opts = append(opts, nats.ReconnectWait(reconnectDelay)) + opts = append(opts, nats.MaxReconnects(int(totalWait/reconnectDelay))) + opts = append(opts, nats.DisconnectErrHandler(func(nc *nats.Conn, err error) { + log.Infof("Disconnected due to:%s, will attempt reconnects for %.0fm", err, totalWait.Minutes()) + })) + opts = append(opts, nats.ReconnectHandler(func(nc *nats.Conn) { + log.Infof("Reconnected [%s]", nc.ConnectedUrl()) + })) + opts = append(opts, nats.ClosedHandler(func(nc *nats.Conn) { + log.Errorf("Exiting: %v", nc.LastError()) + })) + return opts +} diff --git a/agent/internal/nats_service/cfn_to_schdule/asset_info/asset_info.go b/agent/internal/nats_service/cfn_to_schdule/asset_info/asset_info.go new file mode 100644 index 0000000..38e579e --- /dev/null +++ b/agent/internal/nats_service/cfn_to_schdule/asset_info/asset_info.go @@ -0,0 +1,98 @@ +package asset_info + +import ( + "encoding/json" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/config/agent" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/internal/model/nats_msg_model" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/internal/nats_service/definition" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/internal/service/heartbeat_service" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/internal/service/load_resource_info" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/log" + nats_client "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/nats-client" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/utils/randomutils" + "time" +) + +func Strat() { + InitAssertInfoScheduler() +} + +func InitAssertInfoScheduler() { + log.Infof("启动资产上报服务") + + randomNumber := randomutils.GetRandomNumber(-3600, 3600) + + // 每天执行一次 + ticker := time.NewTicker((60*60*24 + time.Duration(randomNumber)) * time.Second) // 创建一个定时器 + go func() { + for { + select { + case <-ticker.C: + updataAssertInfoMq() + doAssertInfoMQ() + } + } + }() + + go func() { + updataAssertInfoMq() + // 开机后先上传一次(复用心跳) + doAssertInfoMQ() + }() +} + +func updataAssertInfoMq() { + rid := nats_msg_model.MsgModel{ + Func: definition.GETCOMPANY, + Body: []byte(agent.Agent.RID), + Version: "v1", + Rid: agent.Agent.RID, + } + marshal, _ := rid.Marshal() + msg, err := nats_client.Request(definition.ToScheduleQueueSubject, marshal) + if err != nil { + log.Errorf("获取资产信息失败(企业、资产类型):%s", err) + return + } + + if msg == nil || msg.Data == nil { + log.Errorf("获取资产信息为空!") + return + } + + var resp = definition.Response{} + err = json.Unmarshal(msg.Data, &resp) + + if resp.Code == 200 { + var rinfo = agent.AgentConfig{} + err = json.Unmarshal(resp.Data, &rinfo) + agent.Agent.Enterprise = rinfo.Enterprise + agent.Agent.Type = rinfo.Type + load_resource_info.LoadAgentInfo(*agent.Agent) + } else { + log.Infof("资产信息获取失败:%s", resp.Msg) + return + } + +} + +// 1.返回应用一体机开机后运行时间和开始时间 +// 2.返回CPU、内存、网卡等配置信息、型号信息、操作系统信息,硬盘信息等 +func doAssertInfoMQ() { + bytes, err := json.Marshal(heartbeat_service.AgentState) + if err != nil { + log.Infof("上报心跳信息序列化失败:%s", err) + return + } + + msg := nats_msg_model.MsgModel{ + Func: definition.ASSETINFO, + Body: bytes, + Version: "v1", + Rid: agent.Agent.RID, + } + + marshal, _ := msg.Marshal() + log.Infof("上报资产信息") + nats_client.Publish(definition.ToScheduleSubject, marshal) +} diff --git a/agent/internal/nats_service/cfn_to_schdule/asset_info/asset_windows.go b/agent/internal/nats_service/cfn_to_schdule/asset_info/asset_windows.go new file mode 100644 index 0000000..6146b5d --- /dev/null +++ b/agent/internal/nats_service/cfn_to_schdule/asset_info/asset_windows.go @@ -0,0 +1,237 @@ +package asset_info + +import ( + "fmt" + "net" + "strings" + "syscall" + "time" + "unsafe" + + "github.com/StackExchange/wmi" +) + +var ( + advapi = syscall.NewLazyDLL("Advapi32.dll") + kernel = syscall.NewLazyDLL("Kernel32.dll") +) + +// 开机时间 +func GetStartTime() string { + GetTickCount := kernel.NewProc("GetTickCount") + r, _, _ := GetTickCount.Call() + if r == 0 { + return "" + } + ms := time.Duration(r * 1000 * 1000) + return ms.String() +} + +// 当前用户名 +func GetUserName() string { + var size uint32 = 128 + var buffer = make([]uint16, size) + user := syscall.StringToUTF16Ptr("USERNAME") + domain := syscall.StringToUTF16Ptr("USERDOMAIN") + r, err := syscall.GetEnvironmentVariable(user, &buffer[0], size) + if err != nil { + return "" + } + buffer[r] = '@' + old := r + 1 + if old >= size { + return syscall.UTF16ToString(buffer[:r]) + } + r, err = syscall.GetEnvironmentVariable(domain, &buffer[old], size-old) + return syscall.UTF16ToString(buffer[:old+r]) +} + +// 系统版本 +func GetSystemVersion() string { + version, err := syscall.GetVersion() + if err != nil { + return "" + } + return fmt.Sprintf("%d.%d (%d)", byte(version), uint8(version>>8), version>>16) +} + +type diskusage struct { + Path string `json:"path"` + Total uint64 `json:"total"` + Free uint64 `json:"free"` +} + +func usage(getDiskFreeSpaceExW *syscall.LazyProc, path string) (diskusage, error) { + lpFreeBytesAvailable := int64(0) + var info = diskusage{Path: path} + diskret, _, err := getDiskFreeSpaceExW.Call( + uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(info.Path))), + uintptr(unsafe.Pointer(&lpFreeBytesAvailable)), + uintptr(unsafe.Pointer(&(info.Total))), + uintptr(unsafe.Pointer(&(info.Free)))) + if diskret != 0 { + err = nil + } + return info, err +} + +// 硬盘信息 +func GetDiskInfo() (infos []diskusage) { + GetLogicalDriveStringsW := kernel.NewProc("GetLogicalDriveStringsW") + GetDiskFreeSpaceExW := kernel.NewProc("GetDiskFreeSpaceExW") + lpBuffer := make([]byte, 254) + diskret, _, _ := GetLogicalDriveStringsW.Call( + uintptr(len(lpBuffer)), + uintptr(unsafe.Pointer(&lpBuffer[0]))) + if diskret == 0 { + return + } + for _, v := range lpBuffer { + if v >= 65 && v <= 90 { + path := string(v) + ":" + if path == "A:" || path == "B:" { + continue + } + info, err := usage(GetDiskFreeSpaceExW, string(v)+":") + if err != nil { + continue + } + infos = append(infos, info) + } + } + return infos +} + +// CPU信息 +// 简单的获取方法fmt.Sprintf("Num:%d Arch:%s\n", runtime.NumCPU(), runtime.GOARCH) +func GetCpuInfo() string { + var size uint32 = 128 + var buffer = make([]uint16, size) + var index = uint32(copy(buffer, syscall.StringToUTF16("Num:")) - 1) + nums := syscall.StringToUTF16Ptr("NUMBER_OF_PROCESSORS") + arch := syscall.StringToUTF16Ptr("PROCESSOR_ARCHITECTURE") + r, err := syscall.GetEnvironmentVariable(nums, &buffer[index], size-index) + if err != nil { + return "" + } + index += r + index += uint32(copy(buffer[index:], syscall.StringToUTF16(" Arch:")) - 1) + r, err = syscall.GetEnvironmentVariable(arch, &buffer[index], size-index) + if err != nil { + return syscall.UTF16ToString(buffer[:index]) + } + index += r + return syscall.UTF16ToString(buffer[:index+r]) +} + +type memoryStatusEx struct { + cbSize uint32 + dwMemoryLoad uint32 + ullTotalPhys uint64 // in bytes + ullAvailPhys uint64 + ullTotalPageFile uint64 + ullAvailPageFile uint64 + ullTotalVirtual uint64 + ullAvailVirtual uint64 + ullAvailExtendedVirtual uint64 +} + +// 内存信息 +func GetMemory() string { + GlobalMemoryStatusEx := kernel.NewProc("GlobalMemoryStatusEx") + var memInfo memoryStatusEx + memInfo.cbSize = uint32(unsafe.Sizeof(memInfo)) + mem, _, _ := GlobalMemoryStatusEx.Call(uintptr(unsafe.Pointer(&memInfo))) + if mem == 0 { + return "" + } + return fmt.Sprint(memInfo.ullTotalPhys/(1024*1024)) + "MB" +} + +type intfInfo struct { + Name string + Ipv4 []string + Ipv6 []string +} + +// 网卡信息 +func GetIntfs() []intfInfo { + intf, err := net.Interfaces() + if err != nil { + return []intfInfo{} + } + + //fmt.Println(intf) + var is = make([]intfInfo, len(intf)) + for i, v := range intf { + + if (v.Flags & net.FlagBroadcast) > 0 { + + ips, err := v.Addrs() + if err != nil { + continue + } + is[i].Name = v.Name + for _, ip := range ips { + if strings.Contains(ip.String(), ":") { + is[i].Ipv6 = append(is[i].Ipv6, ip.String()) + } else { + is[i].Ipv4 = append(is[i].Ipv4, ip.String()) + } + } + } + + } + return is +} + +// 主板信息 +func GetMoreCpuInfo() string { + var s = []struct { + Name string + }{} + err := wmi.Query("SELECT * FROM Win32_Processor", &s) + if err != nil { + return "" + } + return s[0].Name +} + +// 主板信息 +func GetMotherboardInfo() string { + var s = []struct { + Product string + }{} + err := wmi.Query("SELECT Product FROM Win32_BaseBoard WHERE (Product IS NOT NULL)", &s) + if err != nil { + return "" + } + return s[0].Product +} + +// SerialNumber信息 +func GetSerialNumber() string { + var s = []struct { + SerialNumber string + }{} + err := wmi.Query("SELECT SerialNumber FROM Win32_DiskDrive WHERE (SerialNumber IS NOT NULL) ", &s) //AND (MediaType LIKE 'Fixed hard disk%') + if err != nil { + return "" + } + fmt.Println(s) + fmt.Println("====SerialNumber===") + return s[0].SerialNumber + +} + +// BIOS信息 +func GetBiosInfo() string { + var s = []struct { + Name string + }{} + err := wmi.Query("SELECT Name FROM Win32_BIOS WHERE (Name IS NOT NULL)", &s) // WHERE (BIOSVersion IS NOT NULL) + if err != nil { + return "" + } + return s[0].Name +} diff --git a/agent/internal/nats_service/cfn_to_schdule/asset_info/asset_windows_test.go b/agent/internal/nats_service/cfn_to_schdule/asset_info/asset_windows_test.go new file mode 100644 index 0000000..e3f91aa --- /dev/null +++ b/agent/internal/nats_service/cfn_to_schdule/asset_info/asset_windows_test.go @@ -0,0 +1,32 @@ +package asset_info + +import ( + "fmt" + "runtime" + "testing" +) + +func TestGetBiosInfo(t *testing.T) { + fmt.Printf("开机时长:%s\n", GetStartTime()) + fmt.Printf("当前用户:%s\n", GetUserName()) + fmt.Printf("当前系统:%s\n", runtime.GOOS) + fmt.Printf("系统版本:%s\n", GetSystemVersion()) + fmt.Printf("Bios:%s\n", GetBiosInfo()) + fmt.Printf("Motherboard:\t%s\n", GetMotherboardInfo()) + + fmt.Printf("CPU:\t%s\n", GetCpuInfo()) + fmt.Printf("CPU:\t%s\n", GetMoreCpuInfo()) + + fmt.Printf("Memory:\t%s\n", GetMemory()) + fmt.Printf("Disk:\t%v\n", GetDiskInfo()) + + intfs := GetIntfs() + for _, i := range intfs { + fmt.Printf("Interfaces:\t%v\n", i) + } + + number := GetSerialNumber() + + fmt.Printf("number:\t%v\n", number) + +} diff --git a/agent/internal/nats_service/cfn_to_schdule/heartbeat/heartbeat.go b/agent/internal/nats_service/cfn_to_schdule/heartbeat/heartbeat.go new file mode 100644 index 0000000..8134117 --- /dev/null +++ b/agent/internal/nats_service/cfn_to_schdule/heartbeat/heartbeat.go @@ -0,0 +1,58 @@ +package heartbeat + +import ( + "encoding/json" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/config" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/config/agent" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/internal/model/nats_msg_model" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/internal/nats_service/definition" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/internal/service/heartbeat_service" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/log" + natsClient "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/nats-client" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/utils/randomutils" + "time" +) + +var ( + commonConfig = config.Config +) + +func Strat() { + InitHeartBeatScheduler() +} + +// InitHeartBeatScheduler +// 初始化 heartbeat 定时器 +func InitHeartBeatScheduler() { + log.Infof("启动心跳服务") + // 每 30 秒钟时执行一次 + randomNumber := randomutils.GetRandomNumber(-10, 10) + ticker := time.NewTicker(time.Duration(commonConfig.Schedule.HeartBeat+int64(randomNumber)) * time.Second) // 创建一个定时器 + go func() { + for { + select { + case <-ticker.C: + doHeartBeatMQ() + } + } + }() +} + +func doHeartBeatMQ() { + bytes, err := json.Marshal(heartbeat_service.AgentState) + if err != nil { + log.Infof("上报心跳信息序列化失败:%s", err) + return + } + + msg := nats_msg_model.MsgModel{ + Func: definition.HEALTHCHECK, + Body: bytes, + Version: "v1", + Rid: agent.Agent.RID, + } + + marshal, _ := msg.Marshal() + log.Infof("上报心跳") + natsClient.Publish(definition.ToScheduleSubject, marshal) +} diff --git a/agent/internal/nats_service/definition/const.go b/agent/internal/nats_service/definition/const.go new file mode 100644 index 0000000..20ef61e --- /dev/null +++ b/agent/internal/nats_service/definition/const.go @@ -0,0 +1,11 @@ +package definition + +import ( + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/utils" + "path/filepath" +) + +var ( + RunDirectory, _ = utils.GetCurrentPath() + CompressedAgentPath = filepath.Join(RunDirectory, "../upgrade") +) diff --git a/agent/internal/nats_service/definition/func_definition.go b/agent/internal/nats_service/definition/func_definition.go new file mode 100644 index 0000000..704aa38 --- /dev/null +++ b/agent/internal/nats_service/definition/func_definition.go @@ -0,0 +1,18 @@ +package definition + +const ( + HEALTHCHECK = "cfn.schedule.health.probe" + ASSETINFO = "cfn.schedule.asset.base_info" + GETCOMPANY = "cfn.schedule.asset.company" + // AGENTUPGRADERESULT = "cfn.schedule.deamon.agent_upgrade_result" + AGENTLOG = "cfn.agent.log.agent" + TELEGRAFLOG = "cfn.agent.log.telegraf" + COMPONENTINFO = "cfn.agent.component.detail_info" + TELEGRAFCONF = "cfn.agent.component.telegraf_conf" // 更新telegraf配置文件 + + AGENTDOWNLOAD = "cfn.agent.agent.download" + UPGRADEPLAN = "cfn.agent.agent.upgrade_plan" + UPGRADEIMMEDIATELY = "cfn.agent.agent.upgrade_immediately" + + AGENTDOWNLOADLOCAL = "cfn.agent.agent.download_local" +) diff --git a/agent/internal/nats_service/definition/response.go b/agent/internal/nats_service/definition/response.go new file mode 100644 index 0000000..e570029 --- /dev/null +++ b/agent/internal/nats_service/definition/response.go @@ -0,0 +1,62 @@ +package definition + +import ( + "encoding/json" + "net/http" +) + +func UnmarshalMsgModel(data []byte) (Response, error) { + var r Response + err := json.Unmarshal(data, &r) + return r, err +} + +func (r *Response) Marshal() ([]byte, error) { + return json.Marshal(r) +} + +type Response struct { + Code int `json:"code"` + Msg string `json:"msg"` + Data []byte `json:"data"` +} + +func Resp() *Response { + // 初始化response + return &Response{ + Code: 0, + Msg: "", + Data: nil, + } +} + +// FailCode 自定义错误码返回 +func (r *Response) Fail(msg ...string) *Response { + r.Code = http.StatusInternalServerError + if msg != nil { + r.Msg = msg[0] + } + return r +} + +// FailCode 自定义错误码返回 +func (r *Response) FailWithData(data []byte, msg ...string) *Response { + r.Code = http.StatusInternalServerError + if msg != nil { + r.Msg = msg[0] + } + r.Data = data + + return r +} + +// Success 正确返回 +func (r *Response) Success(data []byte, msg ...string) *Response { + r.Code = http.StatusOK + if msg != nil { + r.Msg = msg[0] + } + r.Data = data + + return r +} diff --git a/agent/internal/nats_service/definition/subject.go b/agent/internal/nats_service/definition/subject.go new file mode 100644 index 0000000..1dc8e93 --- /dev/null +++ b/agent/internal/nats_service/definition/subject.go @@ -0,0 +1,16 @@ +package definition + +import ( + "fmt" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/config/agent" +) + +var ( + ToAgentUnicastSubject = fmt.Sprintf("cfn_to_agent_unicast_%s", agent.Agent.RID) + ToAgentBroadcastSubject = "cfn_to_agent_broadcast" + ToAgentBroadcastPublicEnterpriseSubject = fmt.Sprintf("cfn_to_agent_broadcast_public_%s", agent.Agent.Enterprise) + + ToScheduleSubject = "cfn_to_schedule" + ToAgentBroadcastPrivateEnterpriseSubject = fmt.Sprintf("cfn_to_agent_broadcast_private_%s", agent.Agent.Enterprise) + ToScheduleQueueSubject = "cfn_to_schedule_queue" +) diff --git a/agent/internal/nats_service/handle/handle_msg.go b/agent/internal/nats_service/handle/handle_msg.go new file mode 100644 index 0000000..9f39945 --- /dev/null +++ b/agent/internal/nats_service/handle/handle_msg.go @@ -0,0 +1,482 @@ +package handle + +import ( + "encoding/json" + "fmt" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/config" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/config/agent" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/config/bin_path" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/internal/model/nats_msg_model" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/internal/nats_service/definition" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/internal/service/component" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/internal/service/heartbeat_service" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/internal/service/log_service" + minioClient "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/file-download-client" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/go-sysinfo/providers/shared" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/log" + natsClient "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/nats-client" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/utils/compression" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/utils/randomutils" + "github.com/nats-io/nats.go" + "gopkg.in/yaml.v3" + "io/fs" + "os" + "path/filepath" + "strconv" + "strings" + "time" +) + +var ( + UpgradePlan = &nats_msg_model.UpgradePlan{} +) + +func HandleMsg(msg *nats.Msg, model nats_msg_model.MsgModel) { + var err error + + switch model.Func { + case definition.AGENTLOG: + lineNum := string(model.Body) + lineUnit, err2 := strconv.ParseUint(lineNum, 10, 64) + if err2 != nil { + lineUnit = 100 + } + + info, err := log_service.GetAgentLog(lineUnit) + if err != nil { + log.Errorf("获取Agent日志报错: %s", err) + ret, _ := definition.Resp().Fail(err.Error()).Marshal() + msg.Respond(ret) + } else { + marshal, _ := json.Marshal(info) + ret, _ := definition.Resp().Success(marshal, "OK!").Marshal() + msg.Respond(ret) + } + break + + case definition.TELEGRAFLOG: + lineNum := string(model.Body) + lineUnit, err2 := strconv.ParseUint(lineNum, 10, 64) + if err2 != nil { + lineUnit = 100 + } + + info, err := log_service.GetAgentUpgradeLog(lineUnit) + if err != nil { + log.Errorf("获取Telegraf日志报错: %s", err) + ret, _ := definition.Resp().Fail(err.Error()).Marshal() + msg.Respond(ret) + } else { + marshal, _ := json.Marshal(info) + ret, _ := definition.Resp().Success(marshal, "OK!").Marshal() + msg.Respond(ret) + } + break + + case definition.COMPONENTINFO: + info, err := component.GetComInfo() + if err != nil { + log.Errorf("获取组件详情报错: %s", err) + ret, _ := definition.Resp().Fail(err.Error()).Marshal() + msg.Respond(ret) + } else { + marshal, _ := json.Marshal(info) + ret, _ := definition.Resp().Success(marshal, "OK!").Marshal() + msg.Respond(ret) + } + + break + + case definition.TELEGRAFCONF: // 更新telegraf配置文件 + component.UpdateTelegrafConfig(model.Body) + if err != nil { + log.Errorf("更新Telegraf配置报错: %s", err) + ret, _ := definition.Resp().Fail(err.Error()).Marshal() + msg.Respond(ret) + } else { + ret, _ := definition.Resp().Success([]byte("完成配置文件更新!"), "OK!").Marshal() + msg.Respond(ret) + } + break + + case definition.UPGRADEIMMEDIATELY: // 单个升级(包含升级包地址、升级配置),并解压至指定目录,对于失败的情况给与结果反馈(仅报告失败结果,成功与否由升级程序判断) + upgradeCMD, err := nats_msg_model.UnmarshalUpgradeCMD(model.Body) // 解析命令 + if err != nil { + log.Errorf("消息body格式有误: %s", err) + result := nats_msg_model.NewUpgradeResult(upgradeCMD, "消息body格式有误", "0") + marshal, err := json.Marshal(result) + ret, _ := definition.Resp().FailWithData(marshal, err.Error()).Marshal() + msg.Respond(ret) + return + } + + if heartbeat_service.AgentState.AgentVersion == upgradeCMD.Version { + log.Info(definition.UPGRADEIMMEDIATELY + "当前运行版本无需升级!") + result := nats_msg_model.NewUpgradeResult(upgradeCMD, "当前运行版本无需升级!", "0") + marshal, _ := json.Marshal(result) + ret, _ := definition.Resp().FailWithData(marshal, "当前运行版本无需升级!").Marshal() + msg.Respond(ret) + return + } + + if !(heartbeat_service.AgentState.OSType == upgradeCMD.OsType && heartbeat_service.AgentState.Architecture == upgradeCMD.ArchType) { + log.Info(definition.UPGRADEIMMEDIATELY + "操作系统或服务器架构与本机不符!") + result := nats_msg_model.NewUpgradeResult(upgradeCMD, "操作系统或服务器架构与本机不符!", "0") + marshal, _ := json.Marshal(result) + ret, _ := definition.Resp().FailWithData(marshal, definition.UPGRADEIMMEDIATELY+"操作系统或服务器架构与本机不符!").Marshal() + msg.Respond(ret) + return + } + + // 下载升级包至本地 upgrade 目录 + localUpgradeCompressedFile := filepath.Join(definition.CompressedAgentPath, upgradeCMD.Filename) + err = minioClient.DownloadToLocal(upgradeCMD.DownUrl, upgradeCMD.Filename, localUpgradeCompressedFile, 3) + if err != nil { + log.Errorf("下载安装包至本地失败: %s", err) + result := nats_msg_model.NewUpgradeResult(upgradeCMD, "下载安装包至本地失败!", "0") + marshal, _ := json.Marshal(result) + ret, _ := definition.Resp().FailWithData(marshal, fmt.Sprintf(definition.UPGRADEIMMEDIATELY+"下载安装包至本地失败: %s", err)).Marshal() + msg.Respond(ret) + return + } + + // response请求端 + log.Infof(definition.UPGRADEIMMEDIATELY+"下载成功,%s,%s", upgradeCMD.Filename, upgradeCMD.Version) + ret, _ := definition.Resp().Success([]byte(definition.UPGRADEIMMEDIATELY + "下载成功!")).Marshal() + msg.Respond(ret) + + // 将安装包文件解压到以版本命名的目录 + unCompression(upgradeCMD.Filename, upgradeCMD.CompressionType, upgradeCMD.Version) + + // 更新升级计划信息 + updateUpgradeVersionInfo(upgradeCMD, true) + break + + case definition.AGENTDOWNLOAD: // 小助手升级包下载命令接收接口,并解压至指定目录 【下载所有版本、所有架构的升级包,仅解压符合自己版本、架构的升级包,已供后续升级】。对于失败的情况给与结果反馈 + if agent.Agent.Role == "Cache" { + // 解析命令 + upgradeCMD, err := nats_msg_model.UnmarshalUpgradeCMD(model.Body) + if err != nil { + log.Errorf(definition.AGENTDOWNLOAD+"消息body格式有误: %s", err) + reportUpgradeRrrors(upgradeCMD, "消息body格式有误!") + return + } + + // 下载升级包至本地 upgrade 目录 + localUpgradeCompressedFile := filepath.Join(definition.CompressedAgentPath, upgradeCMD.Filename) + err = minioClient.DownloadToLocal(upgradeCMD.DownUrl, upgradeCMD.Filename, localUpgradeCompressedFile, 3) + if err != nil { + log.Errorf(definition.AGENTDOWNLOAD+"下载安装包至本地失败: %s", err) + reportUpgradeRrrors(upgradeCMD, + fmt.Sprintf("下载安装包至本地失败!升级包:%s, 下载缓存机:%s, 企业:%s", upgradeCMD.Filename, agent.Agent.RID, agent.Agent.Enterprise), + ) + return + } + + log.Infof(definition.AGENTDOWNLOAD+"下载成功,%s,%s", upgradeCMD.Filename, upgradeCMD.Version) + + // 通知企业内其他一体机来下载 + // todo 内网IP + ip, _, _ := shared.NamedNetwork(agent.Agent.NetInterface) + LocalUpgradeCMD := nats_msg_model.LocalUpgradeCMD{ + AgentId: upgradeCMD.AgentId, + + CompressionType: upgradeCMD.CompressionType, + Filename: upgradeCMD.Filename, + Version: upgradeCMD.Version, + IP: ip, + Port: config.Config.Server.Port, + OsType: upgradeCMD.OsType, + ArchType: upgradeCMD.ArchType, + + // 备用 + DownUrl: upgradeCMD.DownUrl, + } + + bytes, _ := LocalUpgradeCMD.Marshal() + + msg := nats_msg_model.MsgModel{ + Func: definition.AGENTDOWNLOADLOCAL, + Body: bytes, + Version: "v1", + Rid: agent.Agent.RID, + } + + marshal, _ := msg.Marshal() + err = natsClient.Publish(definition.ToAgentBroadcastPrivateEnterpriseSubject, marshal) + if err != nil { + log.Errorf(definition.AGENTDOWNLOAD+"安装包本地广播失败: %s", err) + reportUpgradeRrrors(upgradeCMD, + fmt.Sprintf("安装包本地广播失败:%s,下载缓存机:%s,频道:%s", upgradeCMD.Filename, agent.Agent.RID, agent.Agent.Enterprise), + ) + return + } + + // 将安装包文件解压到以版本命名的目录,仅解压符合自己版本、架构的升级包,已供后续升级 + // 如果缓存机下载升级包失败,则通过手工升级 + if heartbeat_service.AgentState.AgentVersion == upgradeCMD.Version { + log.Infof(definition.AGENTDOWNLOAD+"版本与本机运行版本一至: %s", upgradeCMD.Version) + return + } + + if !checkOsTypeAndArchSame(upgradeCMD) { + return + } + + unCompression(upgradeCMD.Filename, upgradeCMD.CompressionType, upgradeCMD.Version) + // 更新升级计划信息 + updateUpgradeVersionInfo(upgradeCMD, false) + } + break + + case definition.UPGRADEPLAN: // 升级计划下发,更新upgradelock文件 + plan, err := nats_msg_model.UnmarshalUpgradePlan(model.Body) + if err != nil { + log.Errorf(definition.UPGRADEPLAN+"消息body格式有误: %s", err) + reportUpgradePlanRrrors(plan, "消息body格式有误!") + return + } + + if plan.DeviceType != "0" && agent.Agent.Type != plan.DeviceType { + log.Info(definition.UPGRADEPLAN + "设备不匹配!") + //reportUpgradePlanRrrors(plan, fmt.Sprintf("设备不匹配:%s", agent.Agent.RID), "0") + return + } + + updateUpgradeTimePlan(plan) + break + + case definition.AGENTDOWNLOADLOCAL: // 从企业内缓存机下载,并解压至指定目录,对于失败的情况给与结果反馈 + if agent.Agent.Role != "Cache" { + // 先收集缓存机IP列表, + localUpgradeCMD, err := nats_msg_model.UnmarshalLocalUpgradeCMD(model.Body) + if err != nil { + log.Errorf(definition.AGENTDOWNLOADLOCAL+"消息body格式有误: %s", err) + return + } + + if agent.Agent.Type != UpgradePlan.DeviceType { + log.Info(definition.AGENTDOWNLOADLOCAL + "设备不匹配!") + return + } + + if heartbeat_service.AgentState.AgentVersion == localUpgradeCMD.Version { + log.Info(definition.AGENTDOWNLOADLOCAL + "当前运行版本为最新版本!") + return + } + + if !checkOsTypeAndArchSameLocal(localUpgradeCMD) { + return + } + + // 新的版本到来了,清空CacheIPs列表,开启一个新的doLocalDownload + // todo: 支持处理连续两次紧急升级情况处理, 需要支持取消上一次未执行完的doLocalDownload方法 + if len(CacheIPs) > 0 { + if CacheIPs[0].Version != localUpgradeCMD.Version { + CacheIPs = []nats_msg_model.LocalUpgradeCMD{} + } + } + + if len(CacheIPs) == 0 { + go doLocalDownloadAndUnComporess(localUpgradeCMD) + } + + CacheIPs = append(CacheIPs, localUpgradeCMD) + } + break + } +} + +func checkOsTypeAndArchSameLocal(upgradeCMD nats_msg_model.LocalUpgradeCMD) bool { + localArch := strings.ToLower(heartbeat_service.AgentState.Architecture) + cmdArch := strings.ToLower(upgradeCMD.ArchType) + if strings.Index(localArch, cmdArch) >= 0 || strings.Index(cmdArch, localArch) >= 0 { + + } else { + log.Infof(definition.AGENTDOWNLOAD+"Architecture(%s)与本机(%s)不一致", upgradeCMD.ArchType, heartbeat_service.AgentState.Architecture) + return false + } + + localOS := strings.ToLower(heartbeat_service.AgentState.OSType) + cmdOS := strings.ToLower(upgradeCMD.OsType) + if strings.Index(localOS, cmdOS) >= 0 || strings.Index(cmdOS, localOS) >= 0 { + + } else { + log.Infof(definition.AGENTDOWNLOAD+"OSType(%s)与本机(%s)不一致", upgradeCMD.OsType, heartbeat_service.AgentState.OSType) + return false + } + + return true +} + +func checkOsTypeAndArchSame(upgradeCMD nats_msg_model.UpgradeCMD) bool { + localArch := strings.ToLower(heartbeat_service.AgentState.Architecture) + cmdArch := strings.ToLower(upgradeCMD.ArchType) + if strings.Index(localArch, cmdArch) >= 0 || strings.Index(cmdArch, localArch) >= 0 { + + } else { + log.Infof(definition.AGENTDOWNLOAD+"Architecture(%s)与本机(%s)不一致", upgradeCMD.ArchType, heartbeat_service.AgentState.Architecture) + return false + } + + localOS := strings.ToLower(heartbeat_service.AgentState.OSType) + cmdOS := strings.ToLower(upgradeCMD.OsType) + if strings.Index(localOS, cmdOS) >= 0 || strings.Index(cmdOS, localOS) >= 0 { + + } else { + log.Infof(definition.AGENTDOWNLOAD+"OSType(%s)与本机(%s)不一致", upgradeCMD.OsType, heartbeat_service.AgentState.OSType) + return false + } + + return true +} + +func reportUpgradeRrrors(upgradeCMD nats_msg_model.UpgradeCMD, upgradeResult string) { + result := nats_msg_model.NewUpgradeResult(upgradeCMD, upgradeResult, "0") + marshal, _ := json.Marshal(result) + natsClient.Publish(definition.ToScheduleSubject, marshal) +} + +func reportLocalUpgradeRrrors(upgradeCMD nats_msg_model.LocalUpgradeCMD, upgradeResult string) { + result := nats_msg_model.NewLocalUpgradeResult(upgradeCMD, upgradeResult, "0") + marshal, _ := json.Marshal(result) + natsClient.Publish(definition.ToScheduleSubject, marshal) +} + +func reportUpgradePlanRrrors(upgradePlan nats_msg_model.UpgradePlan, upgradeResult string) { + result := nats_msg_model.NewUpgradePlanResult(upgradePlan, upgradeResult, "0") + marshal, _ := json.Marshal(result) + natsClient.Publish(definition.ToScheduleSubject, marshal) +} + +func readUpgradePlan() error { + if UpgradePlan.UpgradeRule == "" || UpgradePlan.Version == "" { + return nil + } + + upgradeByte, err := os.ReadFile(bin_path.UPGRADEPLAN) + if err != nil { + log.Error(definition.UPGRADEIMMEDIATELY + "读取配置文件失败:" + err.Error()) + return nil + } + + yaml.Unmarshal(upgradeByte, UpgradePlan) + return err +} + +func updateUpgradeTimePlan(plan nats_msg_model.UpgradePlan) { + log.Info(definition.UPGRADEPLAN + "开始升级计划文本") + + err := readUpgradePlan() + if err != nil { + reportUpgradePlanRrrors(plan, fmt.Sprintf("读取配置文件失败:%s", agent.Agent.RID)) + return + } + + UpgradePlan.UpgradeRule = plan.UpgradeRule + UpgradePlan.UpgradeTime = plan.UpgradeTime + UpgradePlan.DeviceType = plan.DeviceType + UpgradePlan.Status = plan.Status + UpgradePlan.RID = agent.Agent.RID + + out, _ := yaml.Marshal(UpgradePlan) + err = os.WriteFile(bin_path.UPGRADEPLAN, out, fs.ModePerm) + if err != nil { + log.Error(definition.UPGRADEPLAN + "写升级计划文本失败:" + err.Error()) + reportUpgradePlanRrrors(plan, fmt.Sprintf("写升级计划文本失败:%s", agent.Agent.RID)) + } + log.Info(definition.UPGRADEPLAN + "完成升级计划文本") +} + +// updateUpgradeInfo 更新版本 +func updateUpgradeVersionInfo(upgradeCMD nats_msg_model.UpgradeCMD, immediately bool) { + log.Info(definition.UPGRADEPLAN + "开始升级计划文本") + + err := readUpgradePlan() + if err != nil { + reportUpgradeRrrors(upgradeCMD, fmt.Sprintf("读取配置文件失败:%s", agent.Agent.RID)) + return + } + + UpgradePlan.CurrentVerson = heartbeat_service.AgentState.AgentVersion + UpgradePlan.Version = upgradeCMD.Version + UpgradePlan.AgentId = upgradeCMD.AgentId + UpgradePlan.Option = "upgrade" + if immediately { + UpgradePlan.UpgradeRule = "0" // 0:立即升级 1:按计划升级 + } + UpgradePlan.RID = agent.Agent.RID + + out, _ := yaml.Marshal(UpgradePlan) + err = os.WriteFile(bin_path.UPGRADEPLAN, out, fs.ModePerm) + if err != nil { + log.Info(definition.UPGRADEPLAN + "写升级计划文本失败:" + err.Error()) + reportUpgradeRrrors(upgradeCMD, fmt.Sprintf("写升级计划文本失败:%s", agent.Agent.RID)) + } + + log.Info(definition.UPGRADEPLAN + "完成升级计划文本") +} + +func doLocalDownloadAndUnComporess(localUpgradeCMD nats_msg_model.LocalUpgradeCMD) { + // 5分钟后开始下载 + randomNumber := 60*5 + randomutils.GetRandomNumber(-100, 200) + ticker := time.NewTicker(time.Duration(randomNumber) * time.Second) + + log.Infof(definition.UPGRADEPLAN+"%d秒后开始下载", randomNumber) + + select { + case <-ticker.C: + log.Infof(definition.UPGRADEPLAN + "开始下载......") + + // 从缓存机中随机选一个,随机等1~10分钟下载 + // 下载安装包至 localUpgradeCompressedFile + localUpgradeCompressedFile := filepath.Join(definition.CompressedAgentPath, localUpgradeCMD.Filename) + url := fmt.Sprintf("http://%s:%d?filename=%s", localUpgradeCMD.IP, localUpgradeCMD.Port, localUpgradeCMD.Filename) + + err := minioClient.DownloadToLocal(url, localUpgradeCMD.Filename, localUpgradeCompressedFile, 3) + if err != nil { + log.Errorf(definition.AGENTDOWNLOADLOCAL+"下载安装包至本地失败: %s", err) + + log.Errorf("使用备用下载地址下载安装包至本地......") + + // 直接去minio下载 + err = minioClient.DownloadToLocal(localUpgradeCMD.DownUrl, localUpgradeCMD.Filename, localUpgradeCompressedFile, 1) + if err != nil { + log.Errorf("使用备用下载地址下载安装包至本地失败: %s", err) + reportLocalUpgradeRrrors(localUpgradeCMD, + fmt.Sprintf("下载安装包至本地失败:%s,企业频道:%s", localUpgradeCMD.Filename, definition.AGENTDOWNLOADLOCAL)) + return + } + } + + log.Infof(definition.UPGRADEPLAN + "下载完成") + + // 将安装包文件解压到以版本命名的目录 + unCompression(localUpgradeCMD.Filename, localUpgradeCMD.CompressionType, localUpgradeCMD.Version) + + heartbeat_service.AgentState.AgentVersion = localUpgradeCMD.Version + // 清空 + CacheIPs = make([]nats_msg_model.LocalUpgradeCMD, 5) + } +} + +// unCompression 将安装包文件解压到以版本命名的目录 +func unCompression(filename, compressionType, version string) { + log.Infof("开始解压%s版本小助手至本地......", version) + + versionedAgentPath := filepath.Join(definition.RunDirectory, "../upgrade", version) + + // 如果 path 路径不存在,会有 err,然后通过 IsNotExist 判定文件路径是否存在,如果 true 则不存在,注意用 os.ModePerm 这样文件是可以写入的 + if _, err := os.Stat(versionedAgentPath); os.IsNotExist(err) { + // mkdir 创建目录,mkdirAll 可创建多层级目录 + os.MkdirAll(versionedAgentPath, os.ModePerm) + } + + if compressionType == compression.ZIP { + compression.NewZipHandler().UnZip(filepath.Join(definition.CompressedAgentPath, filename), versionedAgentPath) + } else { + compression.NewTGZHandler().UNTarGZ(filepath.Join(definition.CompressedAgentPath, filename), versionedAgentPath) + } + + log.Infof("已完成解压小助手%s至%s", filepath.Join(definition.CompressedAgentPath, filename), versionedAgentPath) +} diff --git a/agent/internal/nats_service/handle/variable.go b/agent/internal/nats_service/handle/variable.go new file mode 100644 index 0000000..84f280b --- /dev/null +++ b/agent/internal/nats_service/handle/variable.go @@ -0,0 +1,5 @@ +package handle + +import "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/internal/model/nats_msg_model" + +var CacheIPs = make([]nats_msg_model.LocalUpgradeCMD, 50) diff --git a/agent/internal/nats_service/start.go b/agent/internal/nats_service/start.go new file mode 100644 index 0000000..1857d9b --- /dev/null +++ b/agent/internal/nats_service/start.go @@ -0,0 +1,23 @@ +package nats_service + +import ( + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/internal/nats_service/cfn_to_agent_broadcast" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/internal/nats_service/cfn_to_agent_broadcast_private_enterprise" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/internal/nats_service/cfn_to_agent_broadcast_public_enterprice" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/internal/nats_service/cfn_to_agent_unicast" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/internal/nats_service/cfn_to_schdule/asset_info" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/internal/nats_service/cfn_to_schdule/heartbeat" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/log" +) + +func init() { + log.Infof("启动nats订阅服务") + + go cfn_to_agent_unicast.Start() + go cfn_to_agent_broadcast.Start() + go cfn_to_agent_broadcast_private_enterprise.Start() + go cfn_to_agent_broadcast_public_enterprice.Start() + + asset_info.Strat() + heartbeat.Strat() +} diff --git a/agent/internal/nats_service/upgrade/agentv1.1.0.tar.gz b/agent/internal/nats_service/upgrade/agentv1.1.0.tar.gz new file mode 100644 index 0000000..6ae7c94 Binary files /dev/null and b/agent/internal/nats_service/upgrade/agentv1.1.0.tar.gz differ diff --git a/agent/internal/pkg/authen/contract/configuration.go b/agent/internal/pkg/authen/contract/configuration.go new file mode 100644 index 0000000..0ccd453 --- /dev/null +++ b/agent/internal/pkg/authen/contract/configuration.go @@ -0,0 +1,8 @@ +package contract + +type Configuration interface { + Secret() []byte + JWTCert() []byte + JWTTokenLife() int + JWTIssuer() string +} diff --git a/agent/internal/pkg/authen/contract/jwt.go b/agent/internal/pkg/authen/contract/jwt.go new file mode 100644 index 0000000..2ba602a --- /dev/null +++ b/agent/internal/pkg/authen/contract/jwt.go @@ -0,0 +1,6 @@ +package contract + +type LoginInfo interface { + GetUserID() int + GetUserName() string +} diff --git a/agent/internal/pkg/authen/contract/user.go b/agent/internal/pkg/authen/contract/user.go new file mode 100644 index 0000000..9e6705f --- /dev/null +++ b/agent/internal/pkg/authen/contract/user.go @@ -0,0 +1,6 @@ +package contract + +type User interface { + GetUserID() int + GetUserName() string +} diff --git a/agent/internal/pkg/authen/root.go b/agent/internal/pkg/authen/root.go new file mode 100644 index 0000000..a54ab10 --- /dev/null +++ b/agent/internal/pkg/authen/root.go @@ -0,0 +1,14 @@ +package authen + +import ( + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/internal/pkg/authen/contract" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/internal/pkg/authen/utility" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/internal/pkg/authen/utility/crypto" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/internal/pkg/authen/utility/jwt" +) + +var ( + Configuration contract.Configuration = utility.NewBaseConfig() + LoginInfoCryptor = crypto.NewAEADCryptor(Configuration) + JWTAuthorizer = jwt.NewJWTAuthorizer(Configuration, LoginInfoCryptor) +) diff --git a/agent/internal/pkg/authen/utility/configuration.go b/agent/internal/pkg/authen/utility/configuration.go new file mode 100644 index 0000000..8856f30 --- /dev/null +++ b/agent/internal/pkg/authen/utility/configuration.go @@ -0,0 +1,189 @@ +package utility + +import ( + "encoding/pem" + "github.com/golang/glog" + "io/ioutil" + "os" + "strconv" + "strings" +) + +func NewBaseConfig() *cfg { + return &cfg{ + secret: secret(), + jwtCert: jwtCert(), + jwtTokenLife: jwtTokenLife(), + jwtIssuer: jwtIssuer(), + } +} + +type cfg struct { + secret Lazy + jwtCert Lazy + jwtTokenLife Lazy + jwtIssuer Lazy +} + +func (s cfg) Secret() (secret []byte) { + Must(s.secret.Value(&secret)) + return +} + +func (s cfg) JWTCert() (jwtCert []byte) { + Must(s.jwtCert.Value(&jwtCert)) + return +} + +func (s cfg) JWTTokenLife() (tokenLife int) { + Must(s.jwtTokenLife.Value(&tokenLife)) + return +} + +func (s cfg) JWTIssuer() (issuer string) { + Must(s.jwtIssuer.Value(&issuer)) + return +} + +func (s cfg) Data() interface{} { + return &struct { + Secret string `json:"secret"` + JWTCert string `json:"jwtCert"` + JWTTokenLife int `json:"jwtTokenLife"` + JWTIssuer string `json:"jwtIssuer"` + }{ + Secret: "********", + JWTCert: "********", + JWTTokenLife: s.JWTTokenLife(), + JWTIssuer: s.JWTIssuer(), + } +} + +func GetEnv(key, defaultValue string) (value string) { + if value = os.Getenv(key); value == "" { + value = defaultValue + } + return +} + +func secret() Lazy { + return NewLazy(func() (value interface{}, err error) { + if s := GetEnv("AEAD_SECRET", "internal"); s == "" || s == "internal" { + value = []byte("dazyunsecretkeysforuserstenx20141019generatedKey") + return + } else { + value = []byte(s) + } + return + }) +} + +func jwtCert() Lazy { + return NewLazy(func() (value interface{}, err error) { + if c := GetEnv("JWT_CERT", "internal"); c == "" || c == "internal" { + value = loadSecret() + return + } else if _, err = os.Stat(c); err == nil { + var content []byte + if content, err = ioutil.ReadFile(c); err != nil { + return + } + block, _ := pem.Decode(content) + value = block.Bytes + return + } + return + }) +} + +func jwtTokenLife() Lazy { + return NewLazy(func() (value interface{}, err error) { + hourStr := GetEnv("BEARER_TOKEN_LIFE", "2") + var hour int + if hour, err = strconv.Atoi(hourStr); err != nil { + glog.Fatalf("parse jwt token life failed, should be specified by number in hour, %s", err) + } + value = hour + return + }) +} + +func jwtIssuer() Lazy { + return NewLazy(func() (value interface{}, err error) { + value = GetEnv("TOKEN_ISSUER", "api-server") + return + }) +} + +func corsAllowHeaders() Lazy { + return NewLazy(func() (value interface{}, err error) { + headers := GetEnv( + "ALLOW_HEADERS", + "Content-Type,username,authorization,teamspace,project,onbehalfuser") + value = strings.Split(headers, ",") + return + }) +} + +func corsAllowMethods() Lazy { + return NewLazy(func() (value interface{}, err error) { + methods := GetEnv("ALLOW_METHODS", "GET,POST,PUT,DELETE,PATCH") + value = strings.Split(methods, ",") + return + }) +} + +func loadSecret() []byte { + block, _ := pem.Decode([]byte(`-----BEGIN RSA PRIVATE KEY----- +MIIJKQIBAAKCAgEA2XUrxPC2ZzfLXiGz3J+BJmfpzNWyRNhUEdljCb9QfMz/54t1 +Eri+R3XEvPE+h7n7izBYohv/qXstwM55SBrLqhHVx8qD9SB5FfvkFjU1Q8K+d/1C +KPQpGZnckCtr9c+LjXVB6h1yXP5lBcmbR2SNBAhN+zRdOAoxyw2CgycCo0kyAotT +a9EiKYVLoP4TyDJJIebRp2rRVENBl647jC2Lp13YEXeZhxe6gHw2yEJcg0e8qn3A +hw/842CNbhCVO5kvEhFnCrfJ7w/K7FpS4sQQzI4BocwrvDJxbwBH7RS9Q4+bax5k +RQqB5C6bGWS59ePmqSJ/HjPBPaWcXT/FaFCqZX6umCiAkVWi2F6PK/Un3mkbCa3u +AnqjPO0urgjel3W7kGbHZc1qnSPypUPhmuoLcLCZvzfpY6sl6rUyn8Pbbx3AaEbn +Taxn2wvWVYAik9eeSO3mXOqdZOyFxL1oQe+dv5BjfC/KaeTA5LDPQZ+GpqUq/1ya +lkeEUU8UsrOuSnmekxYQkHEOJ+LAK/aiCsVkbe05Ues4lAFB8g8FZ9Fo/yoCSAzs +AMC4MPFqtNE+GeZPNAsGaNh3GUDWET7IRmu/scZI5L/eBkAyPwsKohkZOpAzfLEx +5OiewUQ9J5zUBXZuKaZL/5HYyqf6/stItzU7UtP62X8cLi8NnC1Vq+4c9mMCAwEA +AQKCAgEAkgCrrIT50v+RGdiDDKRDcGfggFkSYkrk1z8f0dGT1tdFEk9+AV3s08ns +l/dZxoNGssN5Hw6xbzd3FrcKkzD6gWuMH6KHSrPM2MfQ8mAzLRW6EJIIM7sLRVca +0el1iQsaZZXO9cNjn7BmX6ZnDV4jmAuDYCBeXlvp5q1hbXFpwfxJCZBGKGO4Diyj +BKrS2V154Ls7FK8RcQPfLFodPRbvZyYJBmFIwX1bCR1dIsP7nWEy+T2JYKWJY5jW +HIohyGwnQHhSuM2BVXNDCcHzWLHPnafSzLFqw+cSZjbIFBQSpyPqc9dp9zkA0RXB +qSEKAmBL5E93De4t1pg1Dh7dChbu8A0BjeT9YOX3eLdSnu5A9tJXu8lubyzRBjNE +360tFwC8c4IAs0KuEq3W1tzv4AEPqyx/k0V/o1d8do9WL7NqtiEk/EuMr5pkPOmb +yUQOT9qY/B1bMk8jHllRE6E68BdhFa6pbgVrpsFehXslukN/QsgCK4yEsS22aE0R +K4TLmRaYT8JU9hlbY86dRhqNKM+m5aH4Aa1owUj6nZQq3zYgY+0SoCdlJdGSD0gG +LN5E4i2tlPvxBo/z7ObV9Cc+zI1OwcH5WvcBYINYDPLN9Fr9iZzLC0zI1H7eL0ut +0eI+4BfytTk8Ff8EA2mLrfYy1fyNZqBol7lyzIg6iTBDaLzUCTECggEBAPjFmarO +dsj2KDWr1VOobwaoY54Q/AavRARnGi24QcprHLQAJsKIUbSmMoIV/qRBUhyw88VB +LCRW92kK4GJJIOwkr8l/zNIJRGRg/CK4E0S3vu4GA1x300VAole/nk/jUKQGCUL9 +27XJrrOKNz1Equn98OSeRB9j7AJ0aPy5BafI2PWoUoMPfpWfULeK8eBAxiYMewwk +VpBzJr/FoZD4QgFKXhk6PoxdqdEaRQxySRWNCwgjVsSxYKVxO47bd3lJjGgXF2sd +hfGnurBZt0jL7CmmdcI4t8+05svZrw02L+GPmp6dnpmVudlKdTbJPjRdW/SVAFuc +vwtXrbD3glY5pccCggEBAN/GpsnePTRwGKsM43RLDlKM0SGMbpAoORFNC/X6PtQ9 +nLNM+y+ZUYVT+/TZXsKY7vOKOFFxcS/vt3Sez6CH1fkiEj7eHMFadjnYQp4mDJUh +NuT3eOCCLnqImuVhyuGjb0kVo+Uw7NLKcWFshj16ybCO/3dSLAAf/QWjk9XMVarP +0GHXysqOE+6aqvVwjqgZcXrteP8z/Xrij2pVsDUn6+o7E9c/snUAxpNxO5pKuaI0 +cYpLC6YILhbwLgICo22wY4W3Cd7GNJ+F8UK9QZChnbJmrMG7gqtD2wqNKiiKue4T +n3jGG9W6DRwYsVExDsWTC8WPJ5ZULBZ8dDY5OXmPeoUCggEAXHIB0sl6tt9Sve8n +DTmQWKcGrdyd61YCLqipv8ezGyeGuRU9UhkaU8lXB6Roxl1HyEWxsOGxJ6fxtOVH +0P5f76EKehS15m9vLOYljDlfX6/wkb9GTHxy1E9ahMU+bW2JsApWMsDnfrx94VZB +hNEZum6VsD9oDUoykA72XMPc6CbpCREN6Io/fhaABlTp4W3wtH760t5GFNPV2Hn2 +ukqnLJeYNEPCrqK30m6yrhdiNVH+gX2wZtOLmK9ldIb19Opx9NRv7WxBNDYiWBpe +0/yDvE6RgCVXmSYehi5UsNIsJOQaj0r/fw92ytqyiDNsnET9QPyF74VmMS7Z6uNv +Wd9+TQKCAQBVFD0ToShaCIiIeCT+cQ7n+dwFSlQ7AN/5oPZ8NgGvRiGO1iTmSv+A +lpbD1+U8TVMESzfwVxY2qIhykXLVUO/cgcS4HFCIfvFWOs/ROxwrku5BDYnqqfQr +6EYkEhNFyJKmEdE3cWuJFSkYZl9/fnCybRvZ7OcHwSG9BB1P+xlTESHkIVxbuLsB +S9LV8E58wPexShpnxQeJshvezOdqvlvmuUFo5DHgZEQbiMClf+WmMxQ8BR5PqOqF +FBoZ75DdQmQEUbwx89/MCuvYeQY1jAzd6EWkfrtGjEz6bQNrWJsqVlGaZI/uqYcU +eJrqCKHaIncmTLA7apM8lWLFvuoIOrHVAoIBAQDzs6q8kcDRwJhvGCbCf2wGpRTZ +Jza0O/toOSgI7fqrOopub/XZ/mNCsa/fFskbkJJwBj34Mt3wb59P9zFVIRyX+amh +YREfbwk/PF8fEW6OH0FFtP6Y5D0K8MV/0qls7lVTPXeKT7UXA1nQHbBG0Bb1G2Kq +hhovPyYZzh0xOmiXC0z8pBn1BLMnbC7/PUkDiVBmxJYAilD11xJDx+LLEuCzC8vL +J2FMsw2T/L4egMn2Ae17AuVXOxgTkkfIf1NI4AtoznnFmhtG4+ztFyoOAOUUDE2L +8kjUZ1BV35VkZFSz5NPbdelAp4HzkhdbUvfA9MotTtYtN8mgTPJ2neq8Lx4Z +-----END RSA PRIVATE KEY-----`)) + return block.Bytes +} diff --git a/agent/internal/pkg/authen/utility/crypto/aead.go b/agent/internal/pkg/authen/utility/crypto/aead.go new file mode 100644 index 0000000..1d3c938 --- /dev/null +++ b/agent/internal/pkg/authen/utility/crypto/aead.go @@ -0,0 +1,83 @@ +package crypto + +import ( + "bytes" + "crypto/aes" + "crypto/cipher" + "crypto/rand" + "crypto/sha512" + "golang.org/x/crypto/pbkdf2" + "io" +) + +func NewAEADCryptor(c config) *aead { + return &aead{ + secret: c.Secret(), + } +} + +type aead struct { + secret []byte +} + +func (a aead) Encrypt(content []byte) (encrypted []byte, err error) { + var salt []byte + if salt, err = randBytes(64); err != nil { + return + } + var block cipher.Block + if block, err = aes.NewCipher(a.key(salt)); err != nil { + return + } + var iv []byte + if iv, err = randBytes(12); err != nil { + return + } + if _, err = io.ReadFull(rand.Reader, iv); err != nil { + return + } + var aes256gcm cipher.AEAD + if aes256gcm, err = cipher.NewGCM(block); err != nil { + return + } + withTag := aes256gcm.Seal(nil, iv, content, nil) + encryptedLen := len(withTag) - 16 + withoutTag := withTag[:encryptedLen] + tag := withTag[encryptedLen:] + encrypted = bytes.Join([][]byte{salt, iv, tag, withoutTag}, []byte{}) + return +} + +func (a aead) Decrypt(encrypted []byte) (content []byte, err error) { + if len(encrypted) <= 92 { + err = Undecryptable + return + } + salt := encrypted[:64] + iv := encrypted[64:76] + tag := encrypted[76:92] + withoutTag := encrypted[92:] + withTag := bytes.Join([][]byte{withoutTag, tag}, []byte{}) + var block cipher.Block + if block, err = aes.NewCipher(a.key(salt)); err != nil { + return + } + var aes256gcm cipher.AEAD + if aes256gcm, err = cipher.NewGCM(block); err != nil { + return + } + content, err = aes256gcm.Open(nil, iv, withTag, nil) + return +} + +func (a aead) key(salt []byte) []byte { + return pbkdf2.Key(a.secret, salt, 2145, 32, sha512.New) +} + +func randBytes(length int) (bytes []byte, err error) { + bytes = make([]byte, length) + if _, err = io.ReadFull(rand.Reader, bytes); err != nil { + return + } + return +} diff --git a/agent/internal/pkg/authen/utility/crypto/cipher.go b/agent/internal/pkg/authen/utility/crypto/cipher.go new file mode 100644 index 0000000..663b81e --- /dev/null +++ b/agent/internal/pkg/authen/utility/crypto/cipher.go @@ -0,0 +1,69 @@ +package crypto + +import ( + "bytes" + "crypto/aes" + "crypto/cipher" + "encoding/base64" +) + +var ( + seckey = []byte("Q1A2Z3X!D^R$T&G*B(N)U088") //24Byte + base64Tables = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/" + iv = []byte{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1} +) + +func base64Decode(src string) ([]byte, error) { + coder := base64.NewEncoding(base64Tables) + return coder.DecodeString(src) +} + +func aesPadding(src []byte, blockSize int) []byte { + padding := blockSize - len(src)%blockSize + padtext := bytes.Repeat([]byte{byte(padding)}, padding) + return append(src, padtext...) +} + +func aesUnPadding(src []byte) []byte { + length := len(src) + unpadding := int(src[length-1]) + return src[:(length - unpadding)] +} +func Encrypt(src string) (string, error) { + block, err := aes.NewCipher(seckey) + if err != nil { + return "", nil + } + + arr := aesPadding([]byte(src), aes.BlockSize) + mode := cipher.NewCBCEncrypter(block, iv) + mode.CryptBlocks(arr, arr) + return string(arr), nil +} +func Decrypt(src string) (string, error) { + block, err := aes.NewCipher(seckey) + if err != nil { + return "", nil + } + arr := []byte(src) + mode := cipher.NewCBCDecrypter(block, iv) + mode.CryptBlocks(arr, arr) + arr = aesUnPadding(arr) + return string(arr), nil + +} + +func Base64AndDecrypt(src string) (string, error) { + block, err := aes.NewCipher(seckey) + if err != nil { + return "", nil + } + arr, err := base64Decode(src) + if err != nil { + return "", err + } + mode := cipher.NewCBCDecrypter(block, iv) + mode.CryptBlocks(arr, arr) + arr = aesUnPadding(arr) + return string(arr), nil +} diff --git a/agent/internal/pkg/authen/utility/crypto/common.go b/agent/internal/pkg/authen/utility/crypto/common.go new file mode 100644 index 0000000..01170c1 --- /dev/null +++ b/agent/internal/pkg/authen/utility/crypto/common.go @@ -0,0 +1,35 @@ +package crypto + +import ( + "errors" + "hash" +) + +var Undecryptable = errors.New("undecryptable input") + +type config interface { + Secret() []byte +} + +// https://nodejs.org/api/crypto.html#crypto_crypto_createdecipher_algorithm_password_options +// openssl/evp.h - EVP_BytesToKey +func bytesToKey(hashAlgorithm func() hash.Hash, secret, salt []byte, iteration int, keySize, ivSize int) (key, iv []byte) { + h := hashAlgorithm() + var d, result []byte + sum := make([]byte, 0, h.Size()) + for len(result) < keySize+ivSize { + h.Reset() + h.Write(d) + h.Write(secret) + h.Write(salt) + sum = h.Sum(sum[:0]) + for j := 1; j < iteration; j++ { + h.Reset() + h.Write(sum) + sum = h.Sum(sum[:0]) + } + d = append(d[:0], sum...) + result = append(result, d...) + } + return result[:keySize], result[keySize : keySize+ivSize] +} diff --git a/agent/internal/pkg/authen/utility/crypto/des.go b/agent/internal/pkg/authen/utility/crypto/des.go new file mode 100644 index 0000000..2281183 --- /dev/null +++ b/agent/internal/pkg/authen/utility/crypto/des.go @@ -0,0 +1,87 @@ +package crypto + +import ( + "crypto/cipher" + "crypto/des" + "crypto/md5" +) + +func NewDESCryptor(c config) *desEDE3CBC { + k, iv := bytesToKey(md5.New, c.Secret(), []byte{}, 1, 24, 8) + return &desEDE3CBC{ + key: k, + iv: iv, + } +} + +type desEDE3CBC struct { + key []byte + iv []byte +} + +func (d desEDE3CBC) Encrypt(content []byte) (encrypted []byte, err error) { + var block cipher.Block + if block, err = des.NewTripleDESCipher(d.key); err != nil { + return + } + cbc := cipher.NewCBCEncrypter(block, d.iv) + encrypted, err = d.crypt(content, cbc) + return +} + +func (d desEDE3CBC) Decrypt(encrypted []byte) (content []byte, err error) { + var block cipher.Block + if block, err = des.NewTripleDESCipher(d.key); err != nil { + return + } + cbc := cipher.NewCBCDecrypter(block, d.iv) + content, err = d.crypt(encrypted, cbc) + return +} + +func (d desEDE3CBC) crypt(in []byte, cbc cipher.BlockMode) (out []byte, err error) { + blockSize := cbc.BlockSize() + padded := padding(in, blockSize) + entriesCount := len(padded) + crypted := make([][]byte, entriesCount) + for i := 0; i < entriesCount; i++ { + crypted[i] = make([]byte, blockSize) + cbc.CryptBlocks(crypted[i], padded[i]) + } + contentSize := len(in) + out = make([]byte, contentSize) + for i := 0; i < entriesCount; i++ { + for j := 0; j < blockSize; j++ { + index := i*blockSize + j + if index >= contentSize { + break + } + out[index] = crypted[i][j] + } + } + return +} + +func padding(content []byte, blockSize int) (padded [][]byte) { + contentLength := len(content) + size := contentLength / blockSize + if contentLength%blockSize != 0 { + size++ + } + padded = make([][]byte, size) + for i := 0; i < size; i++ { + padded[i] = make([]byte, blockSize) + entry := padded[i] + for j := 0; j < blockSize; j++ { + index := i*blockSize + j + if index >= contentLength { + for k := j; k < blockSize; k++ { + entry[k] = 0 + } + break + } + entry[j] = content[index] + } + } + return +} diff --git a/agent/internal/pkg/authen/utility/jwt/jwt.go b/agent/internal/pkg/authen/utility/jwt/jwt.go new file mode 100644 index 0000000..b9a357d --- /dev/null +++ b/agent/internal/pkg/authen/utility/jwt/jwt.go @@ -0,0 +1,181 @@ +package jwt + +import ( + "encoding/base64" + "errors" + "github.com/dgrijalva/jwt-go" + "strings" + "time" +) + +var ( + UnexpectedSingingMethod = errors.New("unexpected signing method") + UnknownEntity = errors.New("unknown entity") +) + +func NewJWTAuthorizer(c config, cryptor cryptor) *authorizer { + tl := time.Duration(c.JWTTokenLife()) + return &authorizer{ + tokenLifeInHour: tl, + tokenLifeInSecond: int(time.Hour * tl / time.Second), + issuer: c.JWTIssuer(), + cert: c.JWTCert(), + cryptor: cryptor, + } +} + +type config interface { + JWTCert() []byte + JWTTokenLife() int + JWTIssuer() string +} + +type loginInfo interface { + GetUserID() int + GetUserName() string + GetUserToken() string +} + +type authorizer struct { + tokenLifeInHour time.Duration + tokenLifeInSecond int + issuer string + cert []byte + cryptor cryptor +} + +type entity struct { + Encrypted string `json:"encrypted"` + jwt.StandardClaims `json:",inline"` +} + +func (a authorizer) Authorize(li loginInfo) (token string, err error) { + i := &info{ + Username: li.GetUserName(), + Token: li.GetUserToken(), + UserID: li.GetUserID(), + } + now := time.Now() + var encrypted string + if encrypted, err = encryptLoginInfo(i, a.cryptor); err != nil { + return + } + claims := jwt.NewWithClaims(jwt.SigningMethodHS512, &entity{ + Encrypted: encrypted, + StandardClaims: jwt.StandardClaims{ + ExpiresAt: now.Add(a.tokenLifeInHour * time.Hour).Unix(), + Issuer: a.issuer, + }, + }) + var rawToken string + if rawToken, err = claims.SignedString(a.cert); err != nil { + return + } + token = (&dto{ + Token: rawToken, + ExpiresIn: a.tokenLifeInSecond, + IssuedAt: now, + UserID: i.UserID, + }).String() + return +} + +func (a authorizer) Validate(token string) (l loginInfo, err error) { + var t *jwt.Token + if t, err = jwt.ParseWithClaims(token, &entity{}, a.validateCert); err != nil { + return + } + obj, ok := t.Claims.(*entity) + if !ok { + err = UnknownEntity + return + } + var li *info + if li, err = decryptLoginInfo(obj.Encrypted, a.cryptor); err != nil { + return + } + l = li + return +} + +const ( + bearerKeyword = "Bearer" + bearerKeywordLength = len(bearerKeyword) +) + +func (authorizer) IsBearerToken(rawToken string) (ok bool, token string) { + ok = len(rawToken) > bearerKeywordLength && + strings.EqualFold(bearerKeyword, rawToken[:bearerKeywordLength]) + if ok { + token = strings.TrimSpace(rawToken[bearerKeywordLength:]) + } + return +} + +func (authorizer) IsTokenExpired(err error) bool { + if jve, ok := err.(*jwt.ValidationError); ok && jve.Inner != nil { + return jve.Errors&jwt.ValidationErrorExpired == jwt.ValidationErrorExpired + } + return false +} + +func (a authorizer) validateCert(token *jwt.Token) (obj interface{}, err error) { + if _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok { + err = UnexpectedSingingMethod + return + } + return a.cert, nil +} + +var ( + secret = loadSecret() +) + +func loadSecret() []byte { + block, _ := base64.StdEncoding.DecodeString(`IUkVLcM72OGEOJgQX3BXjLP4dsLs084joI2aQWcmB8rLxPyEX2cxLdVllRrx/v/Tzh2iSPsftmcABQ+q5kVYdQ==`) + return block +} + +type LoginInfo struct { + Username string `json:"username"` + Encrypted string `json:"encrypted"` +} + +type tokenEntity struct { + Encrypted string `json:"encrypted"` + Username string `json:"login_loginname"` + Role int32 `json:"role"` + ApiToken string `json:"api_token"` + LoginAppId string `json:"login_app_id"` + LoginUid string `json:"login_uid"` + LoginAccountId string `json:"login_account_id"` + ClientIp string `json:"client_ip"` + LoginAccountName string `json:"login_account_name"` + UsersAppId string `json:"users_app_id"` + LoginUname string `json:"login_uname"` + jwt.StandardClaims +} + +func (authorizer) ValidateToken(rawToken string) (loginInfo *LoginInfo, err error) { + var token *jwt.Token + if token, err = jwt.ParseWithClaims(rawToken, &tokenEntity{}, validateSecret); err != nil { + return nil, err + } + obj, ok := token.Claims.(*tokenEntity) + if !ok { + return nil, UnknownEntity + } + loginInfo = &LoginInfo{ + Username: obj.Username, + Encrypted: obj.Encrypted, + } + return +} + +func validateSecret(token *jwt.Token) (obj interface{}, err error) { + if _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok { + err = UnexpectedSingingMethod + return + } + return secret, nil +} diff --git a/agent/internal/pkg/authen/utility/jwt/login_info.go b/agent/internal/pkg/authen/utility/jwt/login_info.go new file mode 100644 index 0000000..79b9a19 --- /dev/null +++ b/agent/internal/pkg/authen/utility/jwt/login_info.go @@ -0,0 +1,71 @@ +package jwt + +import ( + "encoding/base64" + "encoding/json" + "time" +) + +type dto struct { + Token string `json:"token"` + ExpiresIn int `json:"expiresIn"` + IssuedAt time.Time `json:"issuedAt"` + UserID int `json:"userID"` +} + +func (t dto) String() string { + c, _ := json.Marshal(t) + return string(c) +} + +type cryptor interface { + Encrypt(content []byte) (encrypted []byte, err error) + Decrypt(encrypted []byte) (content []byte, err error) +} + +type info struct { + Username string `json:"username"` + Token string `json:"token"` + UserID int `json:"userId"` +} + +func (i info) GetUserID() int { + return i.UserID +} + +func (i info) GetUserName() string { + return i.Username +} + +func (i info) GetUserToken() string { + return i.Token +} + +func encryptLoginInfo(i *info, cryptor cryptor) (encrypted string, err error) { + var buffer []byte + if buffer, err = json.Marshal(i); err != nil { + return + } + if buffer, err = cryptor.Encrypt(buffer); err != nil { + return + } + encrypted = base64.StdEncoding.EncodeToString(buffer) + return +} + +func decryptLoginInfo(encrypted string, cryptor cryptor) (decrypted *info, err error) { + var buffer []byte + buffer, err = base64.StdEncoding.DecodeString(encrypted) + if err != nil { + return + } + if buffer, err = cryptor.Decrypt(buffer); err != nil { + return + } + var t info + if err = json.Unmarshal(buffer, &t); err != nil { + return + } + decrypted = &t + return +} diff --git a/agent/internal/pkg/authen/utility/lazy.go b/agent/internal/pkg/authen/utility/lazy.go new file mode 100644 index 0000000..af69fce --- /dev/null +++ b/agent/internal/pkg/authen/utility/lazy.go @@ -0,0 +1,55 @@ +package utility + +import ( + "errors" + "reflect" + "sync" +) + +type Lazy interface { + Value(value interface{}) (err error) +} + +var ( + TypeMismatched = errors.New("type mismatched") +) + +type lazy struct { + lock *sync.Once + fetch func() (value interface{}, err error) + value interface{} + done bool +} + +func (l *lazy) Value(value interface{}) (err error) { + if l.done { + l.setValueTo(value) + return + } + l.lock.Do(func() { + if l.value, err = l.fetch(); err != nil { + l.lock = new(sync.Once) + return + } else { + l.setValueTo(value) + l.done = true + } + }) + return +} + +func (l lazy) setValueTo(receiver interface{}) (err error) { + rv := reflect.Indirect(reflect.ValueOf(receiver)) + vv := reflect.ValueOf(l.value) + if vv.Type().AssignableTo(rv.Type()) && rv.CanSet() { + rv.Set(reflect.ValueOf(l.value)) + } + return TypeMismatched +} + +func NewLazy(fetch func() (value interface{}, err error)) Lazy { + return &lazy{ + lock: new(sync.Once), + fetch: fetch, + } +} diff --git a/agent/internal/pkg/authen/utility/lazy_test.go b/agent/internal/pkg/authen/utility/lazy_test.go new file mode 100644 index 0000000..e93917b --- /dev/null +++ b/agent/internal/pkg/authen/utility/lazy_test.go @@ -0,0 +1,23 @@ +package utility + +import "testing" + +func TestLazy_Value(t *testing.T) { + i := 0 + someLazyValue := NewLazy(func() (value interface{}, err error) { + value = "some lazy value" + i++ + return + }) + var theValueOfSomeLazyValue string + if err := someLazyValue.Value(&theValueOfSomeLazyValue); err != nil { + t.Fatalf("get value failed, %s", err) + } + t.Logf("value of lazy: %s", theValueOfSomeLazyValue) + if err := someLazyValue.Value(&theValueOfSomeLazyValue); err != nil { + t.Fatalf("get value failed, %s", err) + } + if i > 1 { + t.Fatalf("lazy should only evaluate once, but %d times", i) + } +} diff --git a/agent/internal/pkg/authen/utility/must.go b/agent/internal/pkg/authen/utility/must.go new file mode 100644 index 0000000..7da8f67 --- /dev/null +++ b/agent/internal/pkg/authen/utility/must.go @@ -0,0 +1,38 @@ +package utility + +import "reflect" + +func Must(err error) { + if err != nil { + panic(err) + } +} + +type receiver interface { + Value(interface{}) +} + +type holder struct { + value interface{} +} + +func (h holder) Value(receiver interface{}) { + reflect.Indirect(reflect.ValueOf(receiver)).Set(reflect.ValueOf(h.value)) +} + +// So everyone who may use this, must be familiar with golang. +// Then they should take care with the 'receiver'. +// func someMethod(someArg string, otherArg int) (returnValue int, err error) +// for instance a method like above, then MustGet can used like below +// var returnValue int +// MustGet(someMethod("someArg", /* otherArg */ 888)).Value(&returnValue) +// and if someMethod's second return value (the err) was not nil +// the expression panic, other way the 'returnValue' should be the first +// return value of someMethod. +// YOU SHOULD take care of the receiver type yourself, if the 'var returnValue int' +// line goes to 'var returnValue string', then it also panic, since the first +// return value of someMethod is int not string. +func MustGet(anything interface{}, err error) receiver { + Must(err) + return &holder{value: anything} +} diff --git a/agent/internal/pkg/authen/utility/must_test.go b/agent/internal/pkg/authen/utility/must_test.go new file mode 100644 index 0000000..0ccc39a --- /dev/null +++ b/agent/internal/pkg/authen/utility/must_test.go @@ -0,0 +1,57 @@ +package utility + +import ( + "errors" + "testing" +) + +func chaosMonkeyA(t *testing.T) error { + t.Logf("chaosMonkeyA") + return errors.New("chaosMonkeyA") +} + +func chaosMonkeyB(t *testing.T) (string, error) { + t.Logf("chaosMonkeyB") + return "chaosMonkeyB", errors.New("chaosMonkeyB") +} + +func iAmWalkingInLine(t *testing.T) (float64, error) { + t.Logf("iAmWalkingInLine") + return 88888888.88888888, nil +} + +func TestMust(t *testing.T) { + func() { + defer func() { + if err := recover(); err != nil { + t.Logf("caseA: should panic, %s", err) + } else { + t.Fatalf("caseA: should panic, but didn't") + } + }() + Must(chaosMonkeyA(t)) + }() + func() { + defer func() { + if err := recover(); err != nil { + t.Logf("caseB: should panic, %s", err) + } else { + t.Fatalf("caseB: should panic, but didn't") + } + }() + var value string + MustGet(chaosMonkeyB(t)).Value(&value) + }() + func() { + defer func() { + if err := recover(); err != nil { + t.Fatalf("caseC: should not panic, but paniced %s", err) + } else { + t.Logf("caseC: everything went fine") + } + }() + var value float64 + MustGet(iAmWalkingInLine(t)).Value(&value) + t.Logf("caseC: value, %f", value) + }() +} diff --git a/agent/internal/pkg/errors/code.go b/agent/internal/pkg/errors/code.go new file mode 100644 index 0000000..63f7ce2 --- /dev/null +++ b/agent/internal/pkg/errors/code.go @@ -0,0 +1,38 @@ +package errors + +const ( + SUCCESS = 200 + FAILURE = 600 + NotFound = 404 + InvalidParameter = 400 + ServerError = 500 + TooManyRequests = 429 + AuthorizationError = 401 + RBACError = 403 +) + +type ErrorText struct { + Language string +} + +func NewErrorText(language string) *ErrorText { + return &ErrorText{ + Language: language, + } +} + +func (et *ErrorText) Text(code int) (str string) { + var ok bool + switch et.Language { + case "zh_CN": + str, ok = zhCNText[code] + case "en": + str, ok = enUSText[code] + default: + str, ok = zhCNText[code] + } + if !ok { + return "unknown error" + } + return +} diff --git a/agent/internal/pkg/errors/code_test.go b/agent/internal/pkg/errors/code_test.go new file mode 100644 index 0000000..b3a89e7 --- /dev/null +++ b/agent/internal/pkg/errors/code_test.go @@ -0,0 +1,16 @@ +package errors + +import ( + "testing" +) + +func TestText(t *testing.T) { + var errorText = NewErrorText("zh_CN") + if "OK" != errorText.Text(0) { + t.Error("text 返回 msg 不是预期的") + } + + if "unknown error" != errorText.Text(1202389) { + t.Error("text 返回 msg 不是预期的") + } +} diff --git a/agent/internal/pkg/errors/en-us.go b/agent/internal/pkg/errors/en-us.go new file mode 100644 index 0000000..d82eff4 --- /dev/null +++ b/agent/internal/pkg/errors/en-us.go @@ -0,0 +1,12 @@ +package errors + +var enUSText = map[int]string{ + SUCCESS: "OK", + FAILURE: "FAIL", + NotFound: "resources not found", + ServerError: "Internal server error", + TooManyRequests: "Too many requests", + InvalidParameter: "Parameter error", + AuthorizationError: "Authorization error", + RBACError: "No access", +} diff --git a/agent/internal/pkg/errors/error.go b/agent/internal/pkg/errors/error.go new file mode 100644 index 0000000..3d7cdff --- /dev/null +++ b/agent/internal/pkg/errors/error.go @@ -0,0 +1,63 @@ +package errors + +import ( + "errors" + "fmt" + c "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/config" +) + +type BusinessError struct { + code int + message string + contextErr []error +} + +func (e *BusinessError) Error() string { + return fmt.Sprintf("[Code]:%d [Msg]:%s, [context error] %s", e.code, e.message, e.contextErr) +} + +func (e *BusinessError) GetCode() int { + return e.code +} + +func (e *BusinessError) GetMessage() string { + return e.message +} + +func (e *BusinessError) SetCode(code int) { + e.code = code +} + +func (e *BusinessError) SetMessage(message string) { + e.message = message +} + +func (e *BusinessError) SetContextErr(err error) { + e.contextErr = append(e.contextErr, err) +} + +func (e *BusinessError) GetContextErr() []error { + return e.contextErr +} + +// NewBusinessError Create a business error +func NewBusinessError(code int, message ...string) *BusinessError { + var msg string + if message != nil { + msg = message[0] + } else { + msg = NewErrorText(c.Config.Language).Text(code) + } + err := new(BusinessError) + err.SetCode(code) + err.SetMessage(msg) + return err +} + +func AsBusinessError(err error) (*BusinessError, error) { + var BusinessError = new(BusinessError) + if errors.As(err, &BusinessError) { + return BusinessError, nil + } + return nil, err +} diff --git a/agent/internal/pkg/errors/zh-cn.go b/agent/internal/pkg/errors/zh-cn.go new file mode 100644 index 0000000..dcaf4b9 --- /dev/null +++ b/agent/internal/pkg/errors/zh-cn.go @@ -0,0 +1,12 @@ +package errors + +var zhCNText = map[int]string{ + SUCCESS: "OK", + FAILURE: "FAIL", + NotFound: "资源不存在", + ServerError: "服务器内部错误", + TooManyRequests: "请求过多", + InvalidParameter: "参数错误", + AuthorizationError: "权限错误", + RBACError: "暂无访问权限", +} diff --git a/agent/internal/pkg/func_make/func_make.go b/agent/internal/pkg/func_make/func_make.go new file mode 100644 index 0000000..2a479b1 --- /dev/null +++ b/agent/internal/pkg/func_make/func_make.go @@ -0,0 +1,51 @@ +package func_make + +import ( + "errors" + "reflect" +) + +type FuncMap map[string]reflect.Value + +func New() FuncMap { + return make(FuncMap, 2) +} + +func (f FuncMap) Register(name string, fn any) error { + v := reflect.ValueOf(fn) + if v.Kind() != reflect.Func { + return errors.New(name + " is not a function type.") + } + f[name] = v + return nil +} + +func (f FuncMap) Registers(funcMap map[string]any) (err error) { + for k, v := range funcMap { + err = f.Register(k, v) + if err != nil { + break + } + } + return +} + +func (f FuncMap) Call(name string, params ...any) (result []reflect.Value, err error) { + if _, ok := f[name]; !ok { + err = errors.New(name + " method does not exist.") + return + } + in := make([]reflect.Value, len(params)) + for k, param := range params { + in[k] = reflect.ValueOf(param) + } + + defer func() { + if e := recover(); e != nil { + err = errors.New("call " + name + " method fail. " + e.(string)) + } + }() + + result = f[name].Call(in) + return +} diff --git a/agent/internal/pkg/func_make/func_make_test.go b/agent/internal/pkg/func_make/func_make_test.go new file mode 100644 index 0000000..bebaf43 --- /dev/null +++ b/agent/internal/pkg/func_make/func_make_test.go @@ -0,0 +1,45 @@ +package func_make + +import ( + "testing" +) + +var ( + funcMap = map[string]interface{}{ + "test": func(str string) string { + return str + }, + } + funcMake = New() +) + +func TestRegisters(t *testing.T) { + err := funcMake.Registers(funcMap) + if err != nil { + t.Errorf("绑定失败") + } +} + +func TestRegister(t *testing.T) { + err := funcMake.Register("test1", func(str ...string) string { + var res string + for _, v := range str { + res += v + } + return res + }) + if err != nil { + t.Errorf("绑定失败") + } +} + +func TestCall(t *testing.T) { + TestRegisters(t) + TestRegister(t) + if _, err := funcMake.Call("test", "1"); err != nil { + t.Errorf("请求test方法失败:%s", err) + } + if _, err := funcMake.Call("test1", "2323", "ddd"); err != nil { + t.Errorf("请求test1方法失败:%s", err) + } +} diff --git a/agent/internal/pkg/request/request.go b/agent/internal/pkg/request/request.go new file mode 100644 index 0000000..30d2b47 --- /dev/null +++ b/agent/internal/pkg/request/request.go @@ -0,0 +1,47 @@ +package request + +import ( + "bytes" + "errors" + "github.com/gin-gonic/gin" + "io/ioutil" + "net/http" +) + +func GetQueryParams(c *gin.Context) map[string]any { + query := c.Request.URL.Query() + var queryMap = make(map[string]any, len(query)) + for k := range query { + queryMap[k] = c.Query(k) + } + return queryMap +} + +func GetPostFormParams(c *gin.Context) (map[string]any, error) { + if err := c.Request.ParseMultipartForm(32 << 20); err != nil { + if !errors.Is(err, http.ErrNotMultipart) { + return nil, err + } + } + var postMap = make(map[string]any, len(c.Request.PostForm)) + for k, v := range c.Request.PostForm { + if len(v) > 1 { + postMap[k] = v + } else if len(v) == 1 { + postMap[k] = v[0] + } + } + + return postMap, nil +} + +func GetBody(c *gin.Context) []byte { + // 读取body数据 + body, err := c.GetRawData() + if err != nil { + return nil + } + //把读过的字节流重新放到body + c.Request.Body = ioutil.NopCloser(bytes.NewBuffer(body)) + return body +} diff --git a/agent/internal/pkg/response/response.go b/agent/internal/pkg/response/response.go new file mode 100644 index 0000000..921e0f7 --- /dev/null +++ b/agent/internal/pkg/response/response.go @@ -0,0 +1,142 @@ +package response + +import ( + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/config" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/internal/pkg/errors" + "github.com/gin-gonic/gin" + "net/http" + "time" +) + +type result struct { + Code int `json:"code"` + Msg string `json:"msg"` + Data interface{} `json:"data"` + Cost string `json:"cost"` +} + +type Response struct { + httpCode int + result *result +} + +func Resp() *Response { + // 初始化response + return &Response{ + httpCode: http.StatusOK, + result: &result{ + Code: 0, + Msg: "", + Data: nil, + Cost: "", + }, + } +} + +// Fail 错误返回 +func (r *Response) Fail(c *gin.Context, code int, msg string, data ...any) { + r.SetCode(code) + r.SetMessage(msg) + if data != nil { + r.WithData(data[0]) + } + r.json(c) +} + +// FailCode 自定义错误码返回 +func (r *Response) FailCode(c *gin.Context, code int, msg ...string) { + r.SetCode(code) + if msg != nil { + r.SetMessage(msg[0]) + } + r.json(c) +} + +// Success 正确返回 +func (r *Response) Success(c *gin.Context) { + r.SetCode(errors.SUCCESS) + r.json(c) +} + +// WithDataSuccess 成功后需要返回值 +func (r *Response) WithDataSuccess(c *gin.Context, data interface{}) { + r.SetCode(errors.SUCCESS) + r.WithData(data) + r.json(c) +} + +// SetCode 设置返回code码 +func (r *Response) SetCode(code int) *Response { + r.result.Code = code + return r +} + +// SetHttpCode 设置http状态码 +func (r *Response) SetHttpCode(code int) *Response { + r.httpCode = code + return r +} + +type defaultRes struct { + Result any `json:"result"` +} + +// WithData 设置返回data数据 +func (r *Response) WithData(data any) *Response { + switch data.(type) { + case string, int, bool: + r.result.Data = &defaultRes{Result: data} + default: + r.result.Data = data + } + return r +} + +// SetMessage 设置返回自定义错误消息 +func (r *Response) SetMessage(message string) *Response { + r.result.Msg = message + return r +} + +var ErrorText = errors.NewErrorText(config.Config.Language) + +// json 返回 gin 框架的 HandlerFunc +func (r *Response) json(c *gin.Context) { + if r.result.Msg == "" { + r.result.Msg = ErrorText.Text(r.result.Code) + } + + // if r.Data == nil { + // r.Data = struct{}{} + // } + + r.result.Cost = time.Since(c.GetTime("requestStartTime")).String() + c.AbortWithStatusJSON(r.httpCode, r.result) +} + +// Success 业务成功响应 +func Success(c *gin.Context, data ...any) { + if data != nil { + Resp().WithDataSuccess(c, data[0]) + return + } + Resp().Success(c) +} + +// FailCode 业务失败响应 +func FailCode(c *gin.Context, code int, data ...any) { + if data != nil { + Resp().WithData(data[0]).FailCode(c, code) + return + } + Resp().FailCode(c, code) +} + +// Fail 业务失败响应 +func Fail(c *gin.Context, code int, message string, data ...any) { + if data != nil { + Resp().WithData(data[0]).FailCode(c, code, message) + return + } + Resp().FailCode(c, code, message) +} diff --git a/agent/internal/pkg/utils/utils.go b/agent/internal/pkg/utils/utils.go new file mode 100644 index 0000000..76427d2 --- /dev/null +++ b/agent/internal/pkg/utils/utils.go @@ -0,0 +1,56 @@ +package utils + +import ( + "database/sql/driver" + "fmt" + "strings" + "time" +) + +type FormatDate struct { + time.Time +} + +const ( + timeFormat = "2006-01-02 15:04:05" +) + +func (t FormatDate) MarshalJSON() ([]byte, error) { + if &t == nil || t.IsZero() { + return []byte("null"), nil + } + return []byte(fmt.Sprintf("\"%s\"", t.Format(timeFormat))), nil +} + +func (t FormatDate) Value() (driver.Value, error) { + var zeroTime time.Time + if t.Time.UnixNano() == zeroTime.UnixNano() { + return nil, nil + } + return t.Time, nil +} + +func (t *FormatDate) Scan(v interface{}) error { + if value, ok := v.(time.Time); ok { + *t = FormatDate{value} + return nil + } + return fmt.Errorf("can not convert %v to timestamp", v) +} + +func (t *FormatDate) String() string { + if t == nil || t.IsZero() { + return "" + } + return fmt.Sprintf("%s", t.Time.Format(timeFormat)) +} + +func (t *FormatDate) UnmarshalJSON(data []byte) error { + str := string(data) + if str == "null" { + return nil + } + t1, err := time.ParseInLocation(timeFormat, strings.Trim(str, "\""), time.Local) + *t = FormatDate{t1} + return err +} diff --git a/agent/internal/routers/api_router.go b/agent/internal/routers/api_router.go new file mode 100644 index 0000000..cb39aee --- /dev/null +++ b/agent/internal/routers/api_router.go @@ -0,0 +1,30 @@ +package routers + +import ( + controllerV1 "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/internal/controller/v1" + "github.com/gin-gonic/gin" +) + +func setApiRoute(r *gin.Engine) { + // version 1 + v1 := r.Group("/api/v1") + { + //v1.POST("/login", controllerV1.Login) + //v1.GET("/host/info", controllerV1.GetAssertInfo) + + v1.POST("/asset", controllerV1.AssetInformationEntry) + v1.POST("/shutdown", controllerV1.Shutdown) + + v1.GET("/host/compose", controllerV1.GetComponentInfo) + + v1.GET("/log/agent", controllerV1.GetAgentLog) + v1.GET("/log/agent/upgrade", controllerV1.GetAgentUpgradeLog) + v1.GET("/log/telegraf", controllerV1.GetTelegrafLog) + + v1.POST("/telegraf/config", controllerV1.UpdateTelegrafConfig) + v1.POST("/telegraf/config/recover", controllerV1.RecoverConfig) + + v1.GET("/agent/download", controllerV1.DownloadAgent) + + } +} diff --git a/agent/internal/routers/router.go b/agent/internal/routers/router.go new file mode 100644 index 0000000..9eac789 --- /dev/null +++ b/agent/internal/routers/router.go @@ -0,0 +1,75 @@ +package routers + +import ( + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/config" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/internal/middleware" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/internal/pkg/errors" + response2 "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/internal/pkg/response" + "github.com/gin-gonic/gin" + "io/ioutil" + "net/http" +) + +func SetRouters() *gin.Engine { + var r *gin.Engine + + if config.Config.Debug == false { + // 生产模式 + r = ReleaseRouter() + r.Use( + middleware.AuthenticationHandler(), + middleware.RequestCostHandler(), + middleware.CustomLogger(), + middleware.CustomRecovery(), + middleware.CorsHandler(), + middleware.AppContextHandler(), + middleware.AuthorizationHandler(), + ) + } else { + // 开发调试模式 + r = gin.New() + r.Use( + //gin.Logger(), + middleware.AuthenticationHandler(), + middleware.RequestCostHandler(), + middleware.CustomLogger(), + middleware.CustomRecovery(), + middleware.CorsHandler(), + middleware.AppContextHandler(), + middleware.AuthorizationHandler(), + ) + } + //// set up trusted agents + //err := r.SetTrustedProxies([]string{"127.0.0.1"}) + //if err != nil { + // panic(err) + //} + + // ping + //r.Any("/ping", func(c *gin.Context) { + // c.AbortWithStatusJSON(http.StatusOK, gin.H{ + // "message": "pong!", + // }) + //}) + + // 设置 API 路由 + setApiRoute(r) + + r.NoRoute(func(c *gin.Context) { + response2.Resp().SetHttpCode(http.StatusNotFound).FailCode(c, errors.NotFound) + }) + + return r +} + +// ReleaseRouter 生产模式使用官方建议设置为 release 模式 +func ReleaseRouter() *gin.Engine { + // 切换到生产模式 + gin.SetMode(gin.ReleaseMode) + // 禁用 gin 输出接口访问日志 + gin.DefaultWriter = ioutil.Discard + + engine := gin.New() + + return engine +} diff --git a/agent/internal/service/component/component.go b/agent/internal/service/component/component.go new file mode 100644 index 0000000..694ea29 --- /dev/null +++ b/agent/internal/service/component/component.go @@ -0,0 +1,152 @@ +package component + +import ( + "errors" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/utils" + "strings" + + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/internal/model/component" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/go-sysinfo" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/go-sysinfo/types" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/log" + + telegrafConfig "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/plugins/outputs/http" + "github.com/influxdata/telegraf/plugins/outputs/prometheus_client" + + _ "github.com/influxdata/telegraf/plugins/parsers/prometheus" + _ "github.com/influxdata/telegraf/plugins/serializers/json" + _ "github.com/influxdata/telegraf/plugins/serializers/prometheus" + _ "github.com/influxdata/telegraf/plugins/serializers/prometheusremotewrite" + + _ "github.com/influxdata/telegraf/plugins/inputs/cpu" // register plugin + _ "github.com/influxdata/telegraf/plugins/inputs/disk" // register plugin + _ "github.com/influxdata/telegraf/plugins/inputs/diskio" // register plugin + //_ "github.com/influxdata/telegraf/plugins/inputs/all" + _ "github.com/influxdata/telegraf/plugins/inputs/http" // register plugin + _ "github.com/influxdata/telegraf/plugins/inputs/kernel" // register plugin + _ "github.com/influxdata/telegraf/plugins/inputs/linux_cpu" // register plugin + _ "github.com/influxdata/telegraf/plugins/inputs/mem" // register plugin + _ "github.com/influxdata/telegraf/plugins/inputs/nats" // register plugin + _ "github.com/influxdata/telegraf/plugins/inputs/net" // register plugin + _ "github.com/influxdata/telegraf/plugins/inputs/netstat" // register plugin + _ "github.com/influxdata/telegraf/plugins/inputs/nstat" // register plugin + _ "github.com/influxdata/telegraf/plugins/inputs/processes" // register plugin + _ "github.com/influxdata/telegraf/plugins/inputs/swap" // register plugin + _ "github.com/influxdata/telegraf/plugins/inputs/sysstat" // register plugin + _ "github.com/influxdata/telegraf/plugins/inputs/system" // register plugin + _ "github.com/influxdata/telegraf/plugins/inputs/systemd_units" // register plugin + _ "github.com/influxdata/telegraf/plugins/inputs/win_eventlog" // register plugin + _ "github.com/influxdata/telegraf/plugins/inputs/win_services" // register plugin + _ "github.com/influxdata/telegraf/plugins/inputs/wireless" // register plugin +) + +func GetComInfo() ([]component.Component, error) { + + var telegrafCom component.Component + info, err := GetTelegrafProcessInfo() + if err != nil { + telegrafCom.COMName = "telegraf" + telegrafCom.COMPort = 0 + telegrafCom.COMType = "telegraf" + telegrafCom.COMVersion = "1.28.2" + telegrafCom.Status = "0" + } else { + telegrafCom.COMName = "telegraf" + telegrafCom.COMPort = 0 + telegrafCom.COMType = "telegraf" + telegrafCom.COMVersion = "1.28.2" + telegrafCom.LastStarttime = info.StartTime + telegrafCom.Status = "1" + } + + runDirectory, _ := utils.GetCurrentPath() + conf := runDirectory + "/data-collector/telegraf/conf/telegraf.conf" + + c := telegrafConfig.NewConfig() + err = c.LoadConfig(conf) + if err != nil { + log.Errorf("读取Telegraf配置失败:%s", err) + log.Errorf("读取Telegraf配置失败:%s", conf) + + } else { + inputs := c.Inputs + for _, input := range inputs { + telegrafCom.Config.Metrics = append(telegrafCom.Config.Metrics, input.Config.Name) + } + + outputs := c.Outputs + for _, output := range outputs { + + prometheusClient, ok := output.Output.(*prometheus_client.PrometheusClient) + + if ok { + ms1 := component.MetricServer{ + Name: output.Config.Name, + Address: prometheusClient.Listen + prometheusClient.Path, + } + telegrafCom.Config.MetricServer = append(telegrafCom.Config.MetricServer, ms1) + } else { + httpClient, ok := output.Output.(*http.HTTP) + if ok { + ms2 := component.MetricServer{ + Name: output.Config.Name, + Address: httpClient.URL, + } + telegrafCom.Config.MetricServer = append(telegrafCom.Config.MetricServer, ms2) + } + } + + } + + telegrafCom.Config.Tags = c.Tags + } + + data := make([]component.Component, 1) + data[0] = telegrafCom + return data, nil +} + +func GetTelegrafProcessInfo() (*types.ProcessInfo, error) { + processes, err := sysinfo.Processes() + if err != nil { + log.Errorf("心跳检测服务调用失败!") + return nil, err + } + + for _, process := range processes { + info, err := process.Info() + + if err != nil { + continue + } + + if strings.Contains(info.Name, "telegraf") || strings.Contains(info.Name, "Telegraf") { + return &info, nil + } + } + + return nil, errors.New("telegraf采集器进程未运行!") +} + +func GetProcessExporterProcessInfo() (*types.ProcessInfo, error) { + processes, err := sysinfo.Processes() + if err != nil { + log.Errorf("心跳检测服务调用失败!") + return nil, err + } + + for _, process := range processes { + info, err := process.Info() + + if err != nil { + continue + } + + if strings.Contains(info.Name, "process-exporter") || strings.Contains(info.Name, "process-exporter") { + return &info, nil + } + } + + return nil, errors.New("process-exporte采集器进程未运行!") +} diff --git a/agent/internal/service/component/data-collector/telegraf/conf/telegraf.conf b/agent/internal/service/component/data-collector/telegraf/conf/telegraf.conf new file mode 100644 index 0000000..0186921 --- /dev/null +++ b/agent/internal/service/component/data-collector/telegraf/conf/telegraf.conf @@ -0,0 +1,12800 @@ +# Telegraf Configuration +# +# Telegraf is entirely plugin driven. All metrics are gathered from the +# declared inputs, and sent to the declared outputs. +# +# Plugins must be declared in here to be active. +# To deactivate a plugin, comment out the name and any variables. +# +# Use 'telegraf -config telegraf.conf -test' to see what metrics a config +# file would generate. +# +# Environment variables can be used anywhere in this config file, simply surround +# them with ${}. For strings the variable must be within quotes (ie, "${STR_VAR}"), +# for numbers and booleans they should be plain (ie, ${INT_VAR}, ${BOOL_VAR}) + + +# Global tags can be specified here in key="value" format. +[global_tags] + # dc = "us-east-1" # will tag all metrics with dc=us-east-1 + # rack = "1a" + ## Environment variables can be used as tags, and throughout the config file + # user = "$USER" + +# Configuration for telegraf agent +[agent] + ## Default data collection interval for all inputs + interval = "10s" + ## Rounds collection interval to 'interval' + ## ie, if interval="10s" then always collect on :00, :10, :20, etc. + round_interval = true + + ## Telegraf will send metrics to outputs in batches of at most + ## metric_batch_size metrics. + ## This controls the size of writes that Telegraf sends to output plugins. + metric_batch_size = 1000 + + ## Maximum number of unwritten metrics per output. Increasing this value + ## allows for longer periods of output downtime without dropping metrics at the + ## cost of higher maximum memory usage. + metric_buffer_limit = 10000 + + ## Collection jitter is used to jitter the collection by a random amount. + ## Each plugin will sleep for a random time within jitter before collecting. + ## This can be used to avoid many plugins querying things like sysfs at the + ## same time, which can have a measurable effect on the system. + collection_jitter = "0s" + + ## Collection offset is used to shift the collection by the given amount. + ## This can be be used to avoid many plugins querying constraint devices + ## at the same time by manually scheduling them in time. + # collection_offset = "0s" + + ## Default flushing interval for all outputs. Maximum flush_interval will be + ## flush_interval + flush_jitter + flush_interval = "10s" + ## Jitter the flush interval by a random amount. This is primarily to avoid + ## large write spikes for users running a large number of telegraf instances. + ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s + flush_jitter = "0s" + + ## Collected metrics are rounded to the precision specified. Precision is + ## specified as an interval with an integer + unit (e.g. 0s, 10ms, 2us, 4s). + ## Valid time units are "ns", "us" (or "µs"), "ms", "s". + ## + ## By default or when set to "0s", precision will be set to the same + ## timestamp order as the collection interval, with the maximum being 1s: + ## ie, when interval = "10s", precision will be "1s" + ## when interval = "250ms", precision will be "1ms" + ## + ## Precision will NOT be used for service inputs. It is up to each individual + ## service input to set the timestamp at the appropriate precision. + precision = "0s" + + ## Log at debug level. + # debug = false + ## Log only error level messages. + # quiet = false + + ## Log target controls the destination for logs and can be one of "file", + ## "stderr" or, on Windows, "eventlog". When set to "file", the output file + ## is determined by the "logfile" setting. + # logtarget = "file" + + ## Name of the file to be logged to when using the "file" logtarget. If set to + ## the empty string then logs are written to stderr. + # logfile = "" + + ## The logfile will be rotated after the time interval specified. When set + ## to 0 no time based rotation is performed. Logs are rotated only when + ## written to, if there is no log activity rotation may be delayed. + # logfile_rotation_interval = "0h" + + ## The logfile will be rotated when it becomes larger than the specified + ## size. When set to 0 no size based rotation is performed. + # logfile_rotation_max_size = "0MB" + + ## Maximum number of rotated archives to keep, any older logs are deleted. + ## If set to -1, no archives are removed. + # logfile_rotation_max_archives = 5 + + ## Pick a timezone to use when logging or type 'local' for local time. + ## Example: America/Chicago + # log_with_timezone = "" + + ## Override default hostname, if empty use os.Hostname() + hostname = "" + ## If set to true, do no set the "host" tag in the telegraf agent. + omit_hostname = false + + ## Method of translating SNMP objects. Can be "netsnmp" (deprecated) which + ## translates by calling external programs snmptranslate and snmptable, + ## or "gosmi" which translates using the built-in gosmi library. + # snmp_translator = "netsnmp" + + ## Name of the file to load the state of plugins from and store the state to. + ## If uncommented and not empty, this file will be used to save the state of + ## stateful plugins on termination of Telegraf. If the file exists on start, + ## the state in the file will be restored for the plugins. + # statefile = "" + +############################################################################### +# SECRETSTORE PLUGINS # +############################################################################### + + +# # Secret-store to access Docker Secrets +# [[secretstores.docker]] +# ## Unique identifier for the secretstore. +# ## This id can later be used in plugins to reference the secrets +# ## in this secret-store via @{:} (mandatory) +# id = "docker_secretstore" +# +# ## Default Path to directory where docker stores the secrets file +# ## Current implementation in docker compose v2 only allows the following +# ## value for the path where the secrets are mounted at runtime +# # path = "/run/secrets" +# +# ## Allow dynamic secrets that are updated during runtime of telegraf +# ## Dynamic Secrets work only with `file` or `external` configuration +# ## in `secrets` section of the `docker-compose.yml` file +# # dynamic = false + + +# # Read secrets from a HTTP endpoint +# [[secretstores.http]] +# ## Unique identifier for the secret-store. +# ## This id can later be used in plugins to reference the secrets +# ## in this secret-store via @{:} (mandatory) +# id = "secretstore" +# +# ## URLs from which to read the secrets +# url = "http://localhost/secrets" +# +# ## Optional HTTP headers +# # headers = {"X-Special-Header" = "Special-Value"} +# +# ## Optional Token for Bearer Authentication via +# ## "Authorization: Bearer " header +# # token = "your-token" +# +# ## Optional Credentials for HTTP Basic Authentication +# # username = "username" +# # password = "pa$$word" +# +# ## OAuth2 Client Credentials. The options 'client_id', 'client_secret', and 'token_url' are required to use OAuth2. +# # client_id = "clientid" +# # client_secret = "secret" +# # token_url = "https://indentityprovider/oauth2/v1/token" +# # scopes = ["urn:opc:idm:__myscopes__"] +# +# ## HTTP Proxy support +# # use_system_proxy = false +# # http_proxy_url = "" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Minimal TLS version to accept by the client +# # tls_min_version = "TLS12" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Optional Cookie authentication +# # cookie_auth_url = "https://localhost/authMe" +# # cookie_auth_method = "POST" +# # cookie_auth_username = "username" +# # cookie_auth_password = "pa$$word" +# # cookie_auth_headers = { Content-Type = "application/json", X-MY-HEADER = "hello" } +# # cookie_auth_body = '{"username": "user", "password": "pa$$word", "authenticate": "me"}' +# ## When unset or set to zero the authentication will only happen once +# ## and will never renew the cookie. Set to a suitable duration if you +# ## require cookie renewal! +# # cookie_auth_renewal = "0s" +# +# ## Amount of time allowed to complete the HTTP request +# # timeout = "5s" +# +# ## List of success status codes +# # success_status_codes = [200] +# +# ## JSONata expression to transform the server response into a +# ## { "secret name": "secret value", ... } +# ## form. See https://jsonata.org for more information and a playground. +# # transformation = '' +# +# ## Cipher used to decrypt the secrets. +# ## In case your secrets are transmitted in an encrypted form, you need +# ## to specify the cipher used and provide the corresponding configuration. +# ## Please refer to https://github.com/influxdata/telegraf/blob/master/plugins/secretstores/http/README.md +# ## for supported values. +# # cipher = "none" +# +# ## AES cipher parameters +# # [secretstores.http.aes] +# # ## Key (hex-encoded) and initialization-vector (IV) for the decryption. +# # ## In case the key (and IV) is derived from a password, the values can +# # ## be omitted. +# # key = "" +# # init_vector = "" +# # +# # ## Parameters for password-based-key derivation. +# # ## These parameters must match the encryption side to derive the same +# # ## key on both sides! +# # # kdf_algorithm = "PBKDF2-HMAC-SHA256" +# # # password = "" +# # # salt = "" +# # # iterations = 0 + + +# # File based Javascript Object Signing and Encryption based secret-store +# [[secretstores.jose]] +# ## Unique identifier for the secret-store. +# ## This id can later be used in plugins to reference the secrets +# ## in this secret-store via @{:} (mandatory) +# id = "secretstore" +# +# ## Directory for storing the secrets +# path = "/etc/telegraf/secrets" +# +# ## Password to access the secrets. +# ## If no password is specified here, Telegraf will prompt for it at startup time. +# # password = "" + + +# # Secret-store to retrieve and maintain tokens from various OAuth2 services +# [[secretstores.oauth2]] +# ## Unique identifier for the secret-store. +# ## This id can later be used in plugins to reference the secrets +# ## in this secret-store via @{:} (mandatory) +# id = "secretstore" +# +# ## Service to retrieve the token(s) from +# ## Currently supported services are "custom", "auth0" and "AzureAD" +# # service = "custom" +# +# ## Setting to overwrite the queried token-endpoint +# ## This setting is optional for some serices but mandatory for others such +# ## as "custom" or "auth0". Please check the documentation at +# ## https://github.com/influxdata/telegraf/blob/master/plugins/secretstores/oauth2/README.md +# # token_endpoint = "" +# +# ## Tenant ID for the AzureAD service +# # tenant_id = "" +# +# ## Minimal remaining time until the token expires +# ## If a token expires less than the set duration in the future, the token is +# ## renewed. This is useful to avoid race-condition issues where a token is +# ## still valid, but isn't when the request reaches the API endpoint of +# ## your service using the token. +# # token_expiry_margin = "1s" +# +# ## Section for defining a token secret +# [[secretstores.oauth2.token]] +# ## Unique secret-key used for referencing the token via @{:} +# key = "" +# ## Client-ID and secret for the 2-legged OAuth flow +# client_id = "" +# client_secret = "" +# ## Scopes to send in the request +# # scopes = [] +# +# ## Additional (optional) parameters to include in the token request +# ## This might for example include the "audience" parameter required for +# ## auth0. +# # [secretstores.oauth2.token.parameters] +# # audience = "" + + +# # Operating System native secret-store +# [[secretstores.os]] +# ## Unique identifier for the secret-store. +# ## This id can later be used in plugins to reference the secrets +# ## in this secret-store via @{:} (mandatory) +# id = "secretstore" +# +# ## Keyring Name & Collection +# ## * Linux: keyring name used for the secrets, collection is unused +# ## * macOS: keyring specifies the macOS' Keychain name and collection is an +# ## optional Keychain service name +# ## * Windows: keys follow a fixed pattern in the form +# ## `::`. Please keep this in mind when +# ## creating secrets with the Windows credential tool. +# # keyring = "telegraf" +# # collection = "" +# +# ## macOS Keychain password +# ## If no password is specified here, Telegraf will prompt for it at startup +# ## time. +# # password = "" +# +# ## Allow dynamic secrets that are updated during runtime of telegraf +# # dynamic = false + + +############################################################################### +# OUTPUT PLUGINS # +############################################################################### + + +# # Configuration for sending metrics to InfluxDB 2.0 +# [[outputs.influxdb_v2]] +# ## The URLs of the InfluxDB cluster nodes. +# ## +# ## Multiple URLs can be specified for a single cluster, only ONE of the +# ## urls will be written to each interval. +# ## ex: urls = ["https://us-west-2-1.aws.cloud2.influxdata.com"] +# urls = ["http://127.0.0.1:8086"] +# +# ## Token for authentication. +# token = "" +# +# ## Organization is the name of the organization you wish to write to. +# organization = "" +# +# ## Destination bucket to write into. +# bucket = "" +# +# ## The value of this tag will be used to determine the bucket. If this +# ## tag is not set the 'bucket' option is used as the default. +# # bucket_tag = "" +# +# ## If true, the bucket tag will not be added to the metric. +# # exclude_bucket_tag = false +# +# ## Timeout for HTTP messages. +# # timeout = "5s" +# +# ## Additional HTTP headers +# # http_headers = {"X-Special-Header" = "Special-Value"} +# +# ## HTTP Proxy override, if unset values the standard proxy environment +# ## variables are consulted to determine which proxy, if any, should be used. +# # http_proxy = "http://corporate.proxy:3128" +# +# ## HTTP User-Agent +# # user_agent = "telegraf" +# +# ## Content-Encoding for write request body, can be set to "gzip" to +# ## compress body or "identity" to apply no encoding. +# # content_encoding = "gzip" +# +# ## Enable or disable uint support for writing uints influxdb 2.0. +# # influx_uint_support = false +# +# ## HTTP/2 Timeouts +# ## The following values control the HTTP/2 client's timeouts. These settings +# ## are generally not required unless a user is seeing issues with client +# ## disconnects. If a user does see issues, then it is suggested to set these +# ## values to "15s" for ping timeout and "30s" for read idle timeout and +# ## retry. +# ## +# ## Note that the timer for read_idle_timeout begins at the end of the last +# ## successful write and not at the beginning of the next write. +# # ping_timeout = "0s" +# # read_idle_timeout = "0s" +# +# ## Optional TLS Config for use on HTTP connections. +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Configuration for Amon Server to send metrics to. +# [[outputs.amon]] +# ## Amon Server Key +# server_key = "my-server-key" # required. +# +# ## Amon Instance URL +# amon_instance = "https://youramoninstance" # required +# +# ## Connection timeout. +# # timeout = "5s" + + +# # Publishes metrics to an AMQP broker +# [[outputs.amqp]] +# ## Broker to publish to. +# ## deprecated in 1.7; use the brokers option +# # url = "amqp://localhost:5672/influxdb" +# +# ## Brokers to publish to. If multiple brokers are specified a random broker +# ## will be selected anytime a connection is established. This can be +# ## helpful for load balancing when not using a dedicated load balancer. +# brokers = ["amqp://localhost:5672/influxdb"] +# +# ## Maximum messages to send over a connection. Once this is reached, the +# ## connection is closed and a new connection is made. This can be helpful for +# ## load balancing when not using a dedicated load balancer. +# # max_messages = 0 +# +# ## Exchange to declare and publish to. +# exchange = "telegraf" +# +# ## Exchange type; common types are "direct", "fanout", "topic", "header", "x-consistent-hash". +# # exchange_type = "topic" +# +# ## If true, exchange will be passively declared. +# # exchange_passive = false +# +# ## Exchange durability can be either "transient" or "durable". +# # exchange_durability = "durable" +# +# ## Additional exchange arguments. +# # exchange_arguments = { } +# # exchange_arguments = {"hash_property" = "timestamp"} +# +# ## Authentication credentials for the PLAIN auth_method. +# # username = "" +# # password = "" +# +# ## Auth method. PLAIN and EXTERNAL are supported +# ## Using EXTERNAL requires enabling the rabbitmq_auth_mechanism_ssl plugin as +# ## described here: https://www.rabbitmq.com/plugins.html +# # auth_method = "PLAIN" +# +# ## Metric tag to use as a routing key. +# ## ie, if this tag exists, its value will be used as the routing key +# # routing_tag = "host" +# +# ## Static routing key. Used when no routing_tag is set or as a fallback +# ## when the tag specified in routing tag is not found. +# # routing_key = "" +# # routing_key = "telegraf" +# +# ## Delivery Mode controls if a published message is persistent. +# ## One of "transient" or "persistent". +# # delivery_mode = "transient" +# +# ## InfluxDB database added as a message header. +# ## deprecated in 1.7; use the headers option +# # database = "telegraf" +# +# ## InfluxDB retention policy added as a message header +# ## deprecated in 1.7; use the headers option +# # retention_policy = "default" +# +# ## Static headers added to each published message. +# # headers = { } +# # headers = {"database" = "telegraf", "retention_policy" = "default"} +# +# ## Connection timeout. If not provided, will default to 5s. 0s means no +# ## timeout (not recommended). +# # timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Optional Proxy Configuration +# # use_proxy = false +# # proxy_url = "localhost:8888" +# +# ## If true use batch serialization format instead of line based delimiting. +# ## Only applies to data formats which are not line based such as JSON. +# ## Recommended to set to true. +# # use_batch_format = false +# +# ## Content encoding for message payloads, can be set to "gzip" to or +# ## "identity" to apply no encoding. +# ## +# ## Please note that when use_batch_format = false each amqp message contains only +# ## a single metric, it is recommended to use compression with batch format +# ## for best results. +# # content_encoding = "identity" +# +# ## Data format to output. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# # data_format = "influx" + + +# # Send metrics to Azure Application Insights +# [[outputs.application_insights]] +# ## Instrumentation key of the Application Insights resource. +# instrumentation_key = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxx" +# +# ## Regions that require endpoint modification https://docs.microsoft.com/en-us/azure/azure-monitor/app/custom-endpoints +# # endpoint_url = "https://dc.services.visualstudio.com/v2/track" +# +# ## Timeout for closing (default: 5s). +# # timeout = "5s" +# +# ## Enable additional diagnostic logging. +# # enable_diagnostic_logging = false +# +# ## NOTE: Due to the way TOML is parsed, tables must be at the END of the +# ## plugin definition, otherwise additional config options are read as part of +# ## the table +# +# ## Context Tag Sources add Application Insights context tags to a tag value. +# ## +# ## For list of allowed context tag keys see: +# ## https://github.com/microsoft/ApplicationInsights-Go/blob/master/appinsights/contracts/contexttagkeys.go +# # [outputs.application_insights.context_tag_sources] +# # "ai.cloud.role" = "kubernetes_container_name" +# # "ai.cloud.roleInstance" = "kubernetes_pod_name" + + +# # Sends metrics to Azure Data Explorer +# [[outputs.azure_data_explorer]] +# ## The URI property of the Azure Data Explorer resource on Azure +# ## ex: endpoint_url = https://myadxresource.australiasoutheast.kusto.windows.net +# endpoint_url = "" +# +# ## The Azure Data Explorer database that the metrics will be ingested into. +# ## The plugin will NOT generate this database automatically, it's expected that this database already exists before ingestion. +# ## ex: "exampledatabase" +# database = "" +# +# ## Timeout for Azure Data Explorer operations +# # timeout = "20s" +# +# ## Type of metrics grouping used when pushing to Azure Data Explorer. +# ## Default is "TablePerMetric" for one table per different metric. +# ## For more information, please check the plugin README. +# # metrics_grouping_type = "TablePerMetric" +# +# ## Name of the single table to store all the metrics (Only needed if metrics_grouping_type is "SingleTable"). +# # table_name = "" +# +# ## Creates tables and relevant mapping if set to true(default). +# ## Skips table and mapping creation if set to false, this is useful for running Telegraf with the lowest possible permissions i.e. table ingestor role. +# # create_tables = true +# +# ## Ingestion method to use. +# ## Available options are +# ## - managed -- streaming ingestion with fallback to batched ingestion or the "queued" method below +# ## - queued -- queue up metrics data and process sequentially +# # ingestion_type = "queued" + + +# # Send aggregate metrics to Azure Monitor +# [[outputs.azure_monitor]] +# ## Timeout for HTTP writes. +# # timeout = "20s" +# +# ## Set the namespace prefix, defaults to "Telegraf/". +# # namespace_prefix = "Telegraf/" +# +# ## Azure Monitor doesn't have a string value type, so convert string +# ## fields to dimensions (a.k.a. tags) if enabled. Azure Monitor allows +# ## a maximum of 10 dimensions so Telegraf will only send the first 10 +# ## alphanumeric dimensions. +# # strings_as_dimensions = false +# +# ## Both region and resource_id must be set or be available via the +# ## Instance Metadata service on Azure Virtual Machines. +# # +# ## Azure Region to publish metrics against. +# ## ex: region = "southcentralus" +# # region = "" +# # +# ## The Azure Resource ID against which metric will be logged, e.g. +# ## ex: resource_id = "/subscriptions//resourceGroups//providers/Microsoft.Compute/virtualMachines/" +# # resource_id = "" +# +# ## Optionally, if in Azure US Government, China, or other sovereign +# ## cloud environment, set the appropriate REST endpoint for receiving +# ## metrics. (Note: region may be unused in this context) +# # endpoint_url = "https://monitoring.core.usgovcloudapi.net" + + +# # Configuration for Google Cloud BigQuery to send entries +# [[outputs.bigquery]] +# ## Credentials File +# credentials_file = "/path/to/service/account/key.json" +# +# ## Google Cloud Platform Project +# project = "my-gcp-project" +# +# ## The namespace for the metric descriptor +# dataset = "telegraf" +# +# ## Timeout for BigQuery operations. +# # timeout = "5s" +# +# ## Character to replace hyphens on Metric name +# # replace_hyphen_to = "_" + + +# ## Configuration to publish Telegraf metrics to Clarify +# [[outputs.clarify]] +# ## Credentials File (Oauth 2.0 from Clarify integration) +# credentials_file = "/path/to/clarify/credentials.json" +# +# ## Clarify username password (Basic Auth from Clarify integration) +# username = "i-am-bob" +# password = "secret-password" +# +# ## Timeout for Clarify operations +# # timeout = "20s" +# +# ## Optional tags to be included when generating the unique ID for a signal in Clarify +# # id_tags = [] +# # clarify_id_tag = 'clarify_input_id' + + +# # Publish Telegraf metrics to a Google Cloud PubSub topic +# [[outputs.cloud_pubsub]] +# ## Required. Name of Google Cloud Platform (GCP) Project that owns +# ## the given PubSub topic. +# project = "my-project" +# +# ## Required. Name of PubSub topic to publish metrics to. +# topic = "my-topic" +# +# ## Content encoding for message payloads, can be set to "gzip" or +# ## "identity" to apply no encoding. +# # content_encoding = "identity" +# +# ## Required. Data format to consume. +# ## Each data format has its own unique set of configuration options. +# ## Read more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" +# +# ## Optional. Filepath for GCP credentials JSON file to authorize calls to +# ## PubSub APIs. If not set explicitly, Telegraf will attempt to use +# ## Application Default Credentials, which is preferred. +# # credentials_file = "path/to/my/creds.json" +# +# ## Optional. If true, will send all metrics per write in one PubSub message. +# # send_batched = true +# +# ## The following publish_* parameters specifically configures batching +# ## requests made to the GCP Cloud PubSub API via the PubSub Golang library. Read +# ## more here: https://godoc.org/cloud.google.com/go/pubsub#PublishSettings +# +# ## Optional. Send a request to PubSub (i.e. actually publish a batch) +# ## when it has this many PubSub messages. If send_batched is true, +# ## this is ignored and treated as if it were 1. +# # publish_count_threshold = 1000 +# +# ## Optional. Send a request to PubSub (i.e. actually publish a batch) +# ## when it has this many PubSub messages. If send_batched is true, +# ## this is ignored and treated as if it were 1 +# # publish_byte_threshold = 1000000 +# +# ## Optional. Specifically configures requests made to the PubSub API. +# # publish_num_go_routines = 2 +# +# ## Optional. Specifies a timeout for requests to the PubSub API. +# # publish_timeout = "30s" +# +# ## Optional. If true, published PubSub message data will be base64-encoded. +# # base64_data = false +# +# ## NOTE: Due to the way TOML is parsed, tables must be at the END of the +# ## plugin definition, otherwise additional config options are read as part of +# ## the table +# +# ## Optional. PubSub attributes to add to metrics. +# # [outputs.cloud_pubsub.attributes] +# # my_attr = "tag_value" + + +# # Configuration for AWS CloudWatch output. +# [[outputs.cloudwatch]] +# ## Amazon REGION +# region = "us-east-1" +# +# ## Amazon Credentials +# ## Credentials are loaded in the following order +# ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified +# ## 2) Assumed credentials via STS if role_arn is specified +# ## 3) explicit credentials from 'access_key' and 'secret_key' +# ## 4) shared profile from 'profile' +# ## 5) environment variables +# ## 6) shared credentials file +# ## 7) EC2 Instance Profile +# #access_key = "" +# #secret_key = "" +# #token = "" +# #role_arn = "" +# #web_identity_token_file = "" +# #role_session_name = "" +# #profile = "" +# #shared_credential_file = "" +# +# ## Endpoint to make request against, the correct endpoint is automatically +# ## determined and this option should only be set if you wish to override the +# ## default. +# ## ex: endpoint_url = "http://localhost:8000" +# # endpoint_url = "" +# +# ## Set http_proxy +# # use_system_proxy = false +# # http_proxy_url = "http://localhost:8888" +# +# ## Namespace for the CloudWatch MetricDatums +# namespace = "InfluxData/Telegraf" +# +# ## If you have a large amount of metrics, you should consider to send statistic +# ## values instead of raw metrics which could not only improve performance but +# ## also save AWS API cost. If enable this flag, this plugin would parse the required +# ## CloudWatch statistic fields (count, min, max, and sum) and send them to CloudWatch. +# ## You could use basicstats aggregator to calculate those fields. If not all statistic +# ## fields are available, all fields would still be sent as raw metrics. +# # write_statistics = false +# +# ## Enable high resolution metrics of 1 second (if not enabled, standard resolution are of 60 seconds precision) +# # high_resolution_metrics = false + + +# # Configuration for AWS CloudWatchLogs output. +# [[outputs.cloudwatch_logs]] +# ## The region is the Amazon region that you wish to connect to. +# ## Examples include but are not limited to: +# ## - us-west-1 +# ## - us-west-2 +# ## - us-east-1 +# ## - ap-southeast-1 +# ## - ap-southeast-2 +# ## ... +# region = "us-east-1" +# +# ## Amazon Credentials +# ## Credentials are loaded in the following order +# ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified +# ## 2) Assumed credentials via STS if role_arn is specified +# ## 3) explicit credentials from 'access_key' and 'secret_key' +# ## 4) shared profile from 'profile' +# ## 5) environment variables +# ## 6) shared credentials file +# ## 7) EC2 Instance Profile +# #access_key = "" +# #secret_key = "" +# #token = "" +# #role_arn = "" +# #web_identity_token_file = "" +# #role_session_name = "" +# #profile = "" +# #shared_credential_file = "" +# +# ## Endpoint to make request against, the correct endpoint is automatically +# ## determined and this option should only be set if you wish to override the +# ## default. +# ## ex: endpoint_url = "http://localhost:8000" +# # endpoint_url = "" +# +# ## Cloud watch log group. Must be created in AWS cloudwatch logs upfront! +# ## For example, you can specify the name of the k8s cluster here to group logs from all cluster in oine place +# log_group = "my-group-name" +# +# ## Log stream in log group +# ## Either log group name or reference to metric attribute, from which it can be parsed: +# ## tag: or field:. If log stream is not exist, it will be created. +# ## Since AWS is not automatically delete logs streams with expired logs entries (i.e. empty log stream) +# ## you need to put in place appropriate house-keeping (https://forums.aws.amazon.com/thread.jspa?threadID=178855) +# log_stream = "tag:location" +# +# ## Source of log data - metric name +# ## specify the name of the metric, from which the log data should be retrieved. +# ## I.e., if you are using docker_log plugin to stream logs from container, then +# ## specify log_data_metric_name = "docker_log" +# log_data_metric_name = "docker_log" +# +# ## Specify from which metric attribute the log data should be retrieved: +# ## tag: or field:. +# ## I.e., if you are using docker_log plugin to stream logs from container, then +# ## specify log_data_source = "field:message" +# log_data_source = "field:message" + + +# # Configuration for CrateDB to send metrics to. +# [[outputs.cratedb]] +# # A github.com/jackc/pgx/v4 connection string. +# # See https://pkg.go.dev/github.com/jackc/pgx/v4#ParseConfig +# url = "postgres://user:password@localhost/schema?sslmode=disable" +# # Timeout for all CrateDB queries. +# timeout = "5s" +# # Name of the table to store metrics in. +# table = "metrics" +# # If true, and the metrics table does not exist, create it automatically. +# table_create = true +# # The character(s) to replace any '.' in an object key with +# key_separator = "_" + + +# # Configuration for DataDog API to send metrics to. +# [[outputs.datadog]] +# ## Datadog API key +# apikey = "my-secret-key" +# +# ## Connection timeout. +# # timeout = "5s" +# +# ## Write URL override; useful for debugging. +# # url = "https://app.datadoghq.com/api/v1/series" +# +# ## Set http_proxy +# # use_system_proxy = false +# # http_proxy_url = "http://localhost:8888" +# +# ## Override the default (none) compression used to send data. +# ## Supports: "zlib", "none" +# # compression = "none" + + +# # Send metrics to nowhere at all +# [[outputs.discard]] +# # no configuration + + +# # Send telegraf metrics to a Dynatrace environment +# [[outputs.dynatrace]] +# ## For usage with the Dynatrace OneAgent you can omit any configuration, +# ## the only requirement is that the OneAgent is running on the same host. +# ## Only setup environment url and token if you want to monitor a Host without the OneAgent present. +# ## +# ## Your Dynatrace environment URL. +# ## For Dynatrace OneAgent you can leave this empty or set it to "http://127.0.0.1:14499/metrics/ingest" (default) +# ## For Dynatrace SaaS environments the URL scheme is "https://{your-environment-id}.live.dynatrace.com/api/v2/metrics/ingest" +# ## For Dynatrace Managed environments the URL scheme is "https://{your-domain}/e/{your-environment-id}/api/v2/metrics/ingest" +# url = "" +# +# ## Your Dynatrace API token. +# ## Create an API token within your Dynatrace environment, by navigating to Settings > Integration > Dynatrace API +# ## The API token needs data ingest scope permission. When using OneAgent, no API token is required. +# api_token = "" +# +# ## Optional prefix for metric names (e.g.: "telegraf") +# prefix = "telegraf" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Optional flag for ignoring tls certificate check +# # insecure_skip_verify = false +# +# ## Connection timeout, defaults to "5s" if not set. +# timeout = "5s" +# +# ## If you want metrics to be treated and reported as delta counters, add the metric names here +# additional_counters = [ ] +# +# ## NOTE: Due to the way TOML is parsed, tables must be at the END of the +# ## plugin definition, otherwise additional config options are read as part of +# ## the table +# +# ## Optional dimensions to be added to every metric +# # [outputs.dynatrace.default_dimensions] +# # default_key = "default value" + + +# # Configuration for Elasticsearch to send metrics to. +# [[outputs.elasticsearch]] +# ## The full HTTP endpoint URL for your Elasticsearch instance +# ## Multiple urls can be specified as part of the same cluster, +# ## this means that only ONE of the urls will be written to each interval +# urls = [ "http://node1.es.example.com:9200" ] # required. +# ## Elasticsearch client timeout, defaults to "5s" if not set. +# timeout = "5s" +# ## Set to true to ask Elasticsearch a list of all cluster nodes, +# ## thus it is not necessary to list all nodes in the urls config option +# enable_sniffer = false +# ## Set to true to enable gzip compression +# enable_gzip = false +# ## Set the interval to check if the Elasticsearch nodes are available +# ## Setting to "0s" will disable the health check (not recommended in production) +# health_check_interval = "10s" +# ## Set the timeout for periodic health checks. +# # health_check_timeout = "1s" +# ## HTTP basic authentication details. +# ## HTTP basic authentication details +# # username = "telegraf" +# # password = "mypassword" +# ## HTTP bearer token authentication details +# # auth_bearer_token = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9" +# +# ## Index Config +# ## The target index for metrics (Elasticsearch will create if it not exists). +# ## You can use the date specifiers below to create indexes per time frame. +# ## The metric timestamp will be used to decide the destination index name +# # %Y - year (2016) +# # %y - last two digits of year (00..99) +# # %m - month (01..12) +# # %d - day of month (e.g., 01) +# # %H - hour (00..23) +# # %V - week of the year (ISO week) (01..53) +# ## Additionally, you can specify a tag name using the notation {{tag_name}} +# ## which will be used as part of the index name. If the tag does not exist, +# ## the default tag value will be used. +# # index_name = "telegraf-{{host}}-%Y.%m.%d" +# # default_tag_value = "none" +# index_name = "telegraf-%Y.%m.%d" # required. +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Template Config +# ## Set to true if you want telegraf to manage its index template. +# ## If enabled it will create a recommended index template for telegraf indexes +# manage_template = true +# ## The template name used for telegraf indexes +# template_name = "telegraf" +# ## Set to true if you want telegraf to overwrite an existing template +# overwrite_template = false +# ## If set to true a unique ID hash will be sent as sha256(concat(timestamp,measurement,series-hash)) string +# ## it will enable data resend and update metric points avoiding duplicated metrics with diferent id's +# force_document_id = false +# +# ## Specifies the handling of NaN and Inf values. +# ## This option can have the following values: +# ## none -- do not modify field-values (default); will produce an error if NaNs or infs are encountered +# ## drop -- drop fields containing NaNs or infs +# ## replace -- replace with the value in "float_replacement_value" (default: 0.0) +# ## NaNs and inf will be replaced with the given number, -inf with the negative of that number +# # float_handling = "none" +# # float_replacement_value = 0.0 +# +# ## Pipeline Config +# ## To use a ingest pipeline, set this to the name of the pipeline you want to use. +# # use_pipeline = "my_pipeline" +# ## Additionally, you can specify a tag name using the notation {{tag_name}} +# ## which will be used as part of the pipeline name. If the tag does not exist, +# ## the default pipeline will be used as the pipeline. If no default pipeline is set, +# ## no pipeline is used for the metric. +# # use_pipeline = "{{es_pipeline}}" +# # default_pipeline = "my_pipeline" + + +# # Configuration for Event Hubs output plugin +# [[outputs.event_hubs]] +# ## The full connection string to the Event Hub (required) +# ## The shared access key must have "Send" permissions on the target Event Hub. +# connection_string = "Endpoint=sb://namespace.servicebus.windows.net/;SharedAccessKeyName=RootManageSharedAccessKey;SharedAccessKey=superSecret1234=;EntityPath=hubName" +# +# ## Client timeout (defaults to 30s) +# # timeout = "30s" +# +# ## Partition key +# ## Metric tag or field name to use for the event partition key. The value of +# ## this tag or field is set as the key for events if it exists. If both, tag +# ## and field, exist the tag is preferred. +# # partition_key = "" +# +# ## Set the maximum batch message size in bytes +# ## The allowable size depends on the Event Hub tier +# ## See: https://learn.microsoft.com/azure/event-hubs/event-hubs-quotas#basic-vs-standard-vs-premium-vs-dedicated-tiers +# ## Setting this to 0 means using the default size from the Azure Event Hubs Client library (1000000 bytes) +# # max_message_size = 1000000 +# +# ## Data format to output. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# data_format = "json" + + +# # Send metrics to command as input over stdin +# [[outputs.exec]] +# ## Command to ingest metrics via stdin. +# command = ["tee", "-a", "/dev/null"] +# +# ## Environment variables +# ## Array of "key=value" pairs to pass as environment variables +# ## e.g. "KEY=value", "USERNAME=John Doe", +# ## "LD_LIBRARY_PATH=/opt/custom/lib64:/usr/local/libs" +# # environment = [] +# +# ## Timeout for command to complete. +# # timeout = "5s" +# +# ## Data format to output. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# # data_format = "influx" + + +# # Run executable as long-running output plugin +# [[outputs.execd]] +# ## One program to run as daemon. +# ## NOTE: process and each argument should each be their own string +# command = ["my-telegraf-output", "--some-flag", "value"] +# +# ## Environment variables +# ## Array of "key=value" pairs to pass as environment variables +# ## e.g. "KEY=value", "USERNAME=John Doe", +# ## "LD_LIBRARY_PATH=/opt/custom/lib64:/usr/local/libs" +# # environment = [] +# +# ## Delay before the process is restarted after an unexpected termination +# restart_delay = "10s" +# +# ## Flag to determine whether execd should throw error when part of metrics is unserializable +# ## Setting this to true will skip the unserializable metrics and process the rest of metrics +# ## Setting this to false will throw error when encountering unserializable metrics and none will be processed +# ## This setting does not apply when use_batch_format is set. +# # ignore_serialization_error = false +# +# ## Use batch serialization instead of per metric. The batch format allows for the +# ## production of batch output formats and may more efficiently encode and write metrics. +# # use_batch_format = false +# +# ## Data format to export. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# data_format = "influx" + + +# # Send telegraf metrics to file(s) +# [[outputs.file]] +# ## Files to write to, "stdout" is a specially handled file. +# files = ["stdout", "/tmp/metrics.out"] +# +# ## Use batch serialization format instead of line based delimiting. The +# ## batch format allows for the production of non line based output formats and +# ## may more efficiently encode and write metrics. +# # use_batch_format = false +# +# ## The file will be rotated after the time interval specified. When set +# ## to 0 no time based rotation is performed. +# # rotation_interval = "0h" +# +# ## The logfile will be rotated when it becomes larger than the specified +# ## size. When set to 0 no size based rotation is performed. +# # rotation_max_size = "0MB" +# +# ## Maximum number of rotated archives to keep, any older logs are deleted. +# ## If set to -1, no archives are removed. +# # rotation_max_archives = 5 +# +# ## Data format to output. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# data_format = "influx" +# +# ## Compress output data with the specifed algorithm. +# ## If empty, compression will be disabled and files will be plain text. +# ## Supported algorithms are "zstd", "gzip" and "zlib". +# # compression_algorithm = "" +# +# ## Compression level for the algorithm above. +# ## Please note that different algorithms support different levels: +# ## zstd -- supports levels 1, 3, 7 and 11. +# ## gzip -- supports levels 0, 1 and 9. +# ## zlib -- supports levels 0, 1, and 9. +# ## By default the default compression level for each algorithm is used. +# # compression_level = -1 + + +# # Configuration for Graphite server to send metrics to +# [[outputs.graphite]] +# ## TCP endpoint for your graphite instance. +# ## If multiple endpoints are configured, the output will be load balanced. +# ## Only one of the endpoints will be written to with each iteration. +# servers = ["localhost:2003"] +# ## Prefix metrics name +# prefix = "" +# ## Graphite output template +# ## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# template = "host.tags.measurement.field" +# +# ## Strict sanitization regex +# ## This is the default sanitization regex that is used on data passed to the +# ## graphite serializer. Users can add additional characters here if required. +# ## Be aware that the characters, '/' '@' '*' are always replaced with '_', +# ## '..' is replaced with '.', and '\' is removed even if added to the +# ## following regex. +# # graphite_strict_sanitize_regex = '[^a-zA-Z0-9-:._=\p{L}]' +# +# ## Enable Graphite tags support +# # graphite_tag_support = false +# +# ## Applied sanitization mode when graphite tag support is enabled. +# ## * strict - uses the regex specified above +# ## * compatible - allows for greater number of characters +# # graphite_tag_sanitize_mode = "strict" +# +# ## Character for separating metric name and field for Graphite tags +# # graphite_separator = "." +# +# ## Graphite templates patterns +# ## 1. Template for cpu +# ## 2. Template for disk* +# ## 3. Default template +# # templates = [ +# # "cpu tags.measurement.host.field", +# # "disk* measurement.field", +# # "host.measurement.tags.field" +# #] +# +# ## timeout in seconds for the write connection to graphite +# # timeout = "2s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Send telegraf metrics to graylog +# [[outputs.graylog]] +# ## Endpoints for your graylog instances. +# servers = ["udp://127.0.0.1:12201"] +# +# ## Connection timeout. +# # timeout = "5s" +# +# ## The field to use as the GELF short_message, if unset the static string +# ## "telegraf" will be used. +# ## example: short_message_field = "message" +# # short_message_field = "" +# +# ## According to GELF payload specification, additional fields names must be prefixed +# ## with an underscore. Previous versions did not prefix custom field 'name' with underscore. +# ## Set to true for backward compatibility. +# # name_field_no_prefix = false +# +# ## Connection retry options +# ## Attempt to connect to the enpoints if the initial connection fails. +# ## If 'false', Telegraf will give up after 3 connection attempt and will +# ## exit with an error. If set to 'true', the plugin will retry to connect +# ## to the unconnected endpoints infinitely. +# # connection_retry = false +# ## Time to wait between connection retry attempts. +# # connection_retry_wait_time = "15s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Send telegraf metrics to GroundWork Monitor +# [[outputs.groundwork]] +# ## URL of your groundwork instance. +# url = "https://groundwork.example.com" +# +# ## Agent uuid for GroundWork API Server. +# agent_id = "" +# +# ## Username and password to access GroundWork API. +# username = "" +# password = "" +# +# ## Default application type to use in GroundWork client +# # default_app_type = "TELEGRAF" +# +# ## Default display name for the host with services(metrics). +# # default_host = "telegraf" +# +# ## Default service state. +# # default_service_state = "SERVICE_OK" +# +# ## The name of the tag that contains the hostname. +# # resource_tag = "host" +# +# ## The name of the tag that contains the host group name. +# # group_tag = "group" + + +# # Configurable HTTP health check resource based on metrics +# [[outputs.health]] +# ## Address and port to listen on. +# ## ex: service_address = "http://localhost:8080" +# ## service_address = "unix:///var/run/telegraf-health.sock" +# # service_address = "http://:8080" +# +# ## The maximum duration for reading the entire request. +# # read_timeout = "5s" +# ## The maximum duration for writing the entire response. +# # write_timeout = "5s" +# +# ## Username and password to accept for HTTP basic authentication. +# # basic_username = "user1" +# # basic_password = "secret" +# +# ## Allowed CA certificates for client certificates. +# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# +# ## TLS server certificate and private key. +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## NOTE: Due to the way TOML is parsed, tables must be at the END of the +# ## plugin definition, otherwise additional config options are read as part of +# ## the table +# +# ## One or more check sub-tables should be defined, it is also recommended to +# ## use metric filtering to limit the metrics that flow into this output. +# ## +# ## When using the default buffer sizes, this example will fail when the +# ## metric buffer is half full. +# ## +# ## namepass = ["internal_write"] +# ## tagpass = { output = ["influxdb"] } +# ## +# ## [[outputs.health.compares]] +# ## field = "buffer_size" +# ## lt = 5000.0 +# ## +# ## [[outputs.health.contains]] +# ## field = "buffer_size" + + +# # A plugin that can transmit metrics over HTTP +# [[outputs.http]] +# ## URL is the address to send metrics to +# url = "http://127.0.0.1:8080/telegraf" +# +# ## Timeout for HTTP message +# # timeout = "5s" +# +# ## HTTP method, one of: "POST" or "PUT" or "PATCH" +# # method = "POST" +# +# ## HTTP Basic Auth credentials +# # username = "username" +# # password = "pa$$word" +# +# ## OAuth2 Client Credentials Grant +# # client_id = "clientid" +# # client_secret = "secret" +# # token_url = "https://indentityprovider/oauth2/v1/token" +# # audience = "" +# # scopes = ["urn:opc:idm:__myscopes__"] +# +# ## Goole API Auth +# # google_application_credentials = "/etc/telegraf/example_secret.json" +# +# ## HTTP Proxy support +# # use_system_proxy = false +# # http_proxy_url = "" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Optional Cookie authentication +# # cookie_auth_url = "https://localhost/authMe" +# # cookie_auth_method = "POST" +# # cookie_auth_username = "username" +# # cookie_auth_password = "pa$$word" +# # cookie_auth_headers = '{"Content-Type": "application/json", "X-MY-HEADER":"hello"}' +# # cookie_auth_body = '{"username": "user", "password": "pa$$word", "authenticate": "me"}' +# ## cookie_auth_renewal not set or set to "0" will auth once and never renew the cookie +# # cookie_auth_renewal = "5m" +# +# ## Data format to output. +# ## Each data format has it's own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# # data_format = "influx" +# +# ## Use batch serialization format (default) instead of line based format. +# ## Batch format is more efficient and should be used unless line based +# ## format is really needed. +# # use_batch_format = true +# +# ## HTTP Content-Encoding for write request body, can be set to "gzip" to +# ## compress body or "identity" to apply no encoding. +# # content_encoding = "identity" +# +# ## MaxIdleConns controls the maximum number of idle (keep-alive) +# ## connections across all hosts. Zero means no limit. +# # max_idle_conn = 0 +# +# ## MaxIdleConnsPerHost, if non-zero, controls the maximum idle +# ## (keep-alive) connections to keep per-host. If zero, +# ## DefaultMaxIdleConnsPerHost is used(2). +# # max_idle_conn_per_host = 2 +# +# ## Idle (keep-alive) connection timeout. +# ## Maximum amount of time before idle connection is closed. +# ## Zero means no limit. +# # idle_conn_timeout = 0 +# +# ## Amazon Region +# #region = "us-east-1" +# +# ## Amazon Credentials +# ## Amazon Credentials are not built unless the following aws_service +# ## setting is set to a non-empty string. It may need to match the name of +# ## the service output to as well +# #aws_service = "execute-api" +# +# ## Credentials are loaded in the following order +# ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified +# ## 2) Assumed credentials via STS if role_arn is specified +# ## 3) explicit credentials from 'access_key' and 'secret_key' +# ## 4) shared profile from 'profile' +# ## 5) environment variables +# ## 6) shared credentials file +# ## 7) EC2 Instance Profile +# #access_key = "" +# #secret_key = "" +# #token = "" +# #role_arn = "" +# #web_identity_token_file = "" +# #role_session_name = "" +# #profile = "" +# #shared_credential_file = "" +# +# ## Optional list of statuscodes (<200 or >300) upon which requests should not be retried +# # non_retryable_statuscodes = [409, 413] +# +# ## NOTE: Due to the way TOML is parsed, tables must be at the END of the +# ## plugin definition, otherwise additional config options are read as part of +# ## the table +# +# ## Additional HTTP headers +# # [outputs.http.headers] +# # ## Should be set manually to "application/json" for json data_format +# # Content-Type = "text/plain; charset=utf-8" + + +# # Configuration for sending metrics to InfluxDB +# [[outputs.influxdb]] +# ## The full HTTP or UDP URL for your InfluxDB instance. +# ## +# ## Multiple URLs can be specified for a single cluster, only ONE of the +# ## urls will be written to each interval. +# # urls = ["unix:///var/run/influxdb.sock"] +# # urls = ["udp://127.0.0.1:8089"] +# # urls = ["http://127.0.0.1:8086"] +# +# ## The target database for metrics; will be created as needed. +# ## For UDP url endpoint database needs to be configured on server side. +# # database = "telegraf" +# +# ## The value of this tag will be used to determine the database. If this +# ## tag is not set the 'database' option is used as the default. +# # database_tag = "" +# +# ## If true, the 'database_tag' will not be included in the written metric. +# # exclude_database_tag = false +# +# ## If true, no CREATE DATABASE queries will be sent. Set to true when using +# ## Telegraf with a user without permissions to create databases or when the +# ## database already exists. +# # skip_database_creation = false +# +# ## Name of existing retention policy to write to. Empty string writes to +# ## the default retention policy. Only takes effect when using HTTP. +# # retention_policy = "" +# +# ## The value of this tag will be used to determine the retention policy. If this +# ## tag is not set the 'retention_policy' option is used as the default. +# # retention_policy_tag = "" +# +# ## If true, the 'retention_policy_tag' will not be included in the written metric. +# # exclude_retention_policy_tag = false +# +# ## Write consistency (clusters only), can be: "any", "one", "quorum", "all". +# ## Only takes effect when using HTTP. +# # write_consistency = "any" +# +# ## Timeout for HTTP messages. +# # timeout = "5s" +# +# ## HTTP Basic Auth +# # username = "telegraf" +# # password = "metricsmetricsmetricsmetrics" +# +# ## HTTP User-Agent +# # user_agent = "telegraf" +# +# ## UDP payload size is the maximum packet size to send. +# # udp_payload = "512B" +# +# ## Optional TLS Config for use on HTTP connections. +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## HTTP Proxy override, if unset values the standard proxy environment +# ## variables are consulted to determine which proxy, if any, should be used. +# # http_proxy = "http://corporate.proxy:3128" +# +# ## Additional HTTP headers +# # http_headers = {"X-Special-Header" = "Special-Value"} +# +# ## HTTP Content-Encoding for write request body, can be set to "gzip" to +# ## compress body or "identity" to apply no encoding. +# # content_encoding = "gzip" +# +# ## When true, Telegraf will output unsigned integers as unsigned values, +# ## i.e.: "42u". You will need a version of InfluxDB supporting unsigned +# ## integer values. Enabling this option will result in field type errors if +# ## existing data has been written. +# # influx_uint_support = false + + +# # Configuration for sending metrics to an Instrumental project +# [[outputs.instrumental]] +# ## Project API Token (required) +# api_token = "API Token" # required +# ## Prefix the metrics with a given name +# prefix = "" +# ## Stats output template (Graphite formatting) +# ## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#graphite +# template = "host.tags.measurement.field" +# ## Timeout in seconds to connect +# timeout = "2s" +# ## Debug true - Print communication to Instrumental +# debug = false + + +# # Save metrics to an IoTDB Database +# [[outputs.iotdb]] +# ## Configuration of IoTDB server connection +# host = "127.0.0.1" +# # port = "6667" +# +# ## Configuration of authentication +# # user = "root" +# # password = "root" +# +# ## Timeout to open a new session. +# ## A value of zero means no timeout. +# # timeout = "5s" +# +# ## Configuration of type conversion for 64-bit unsigned int +# ## IoTDB currently DOES NOT support unsigned integers (version 13.x). +# ## 32-bit unsigned integers are safely converted into 64-bit signed integers by the plugin, +# ## however, this is not true for 64-bit values in general as overflows may occur. +# ## The following setting allows to specify the handling of 64-bit unsigned integers. +# ## Available values are: +# ## - "int64" -- convert to 64-bit signed integers and accept overflows +# ## - "int64_clip" -- convert to 64-bit signed integers and clip the values on overflow to 9,223,372,036,854,775,807 +# ## - "text" -- convert to the string representation of the value +# # uint64_conversion = "int64_clip" +# +# ## Configuration of TimeStamp +# ## TimeStamp is always saved in 64bits int. timestamp_precision specifies the unit of timestamp. +# ## Available value: +# ## "second", "millisecond", "microsecond", "nanosecond"(default) +# # timestamp_precision = "nanosecond" +# +# ## Handling of tags +# ## Tags are not fully supported by IoTDB. +# ## A guide with suggestions on how to handle tags can be found here: +# ## https://iotdb.apache.org/UserGuide/Master/API/InfluxDB-Protocol.html +# ## +# ## Available values are: +# ## - "fields" -- convert tags to fields in the measurement +# ## - "device_id" -- attach tags to the device ID +# ## +# ## For Example, a metric named "root.sg.device" with the tags `tag1: "private"` and `tag2: "working"` and +# ## fields `s1: 100` and `s2: "hello"` will result in the following representations in IoTDB +# ## - "fields" -- root.sg.device, s1=100, s2="hello", tag1="private", tag2="working" +# ## - "device_id" -- root.sg.device.private.working, s1=100, s2="hello" +# # convert_tags_to = "device_id" + + +# # Configuration for the Kafka server to send metrics to +# [[outputs.kafka]] +# ## URLs of kafka brokers +# ## The brokers listed here are used to connect to collect metadata about a +# ## cluster. However, once the initial metadata collect is completed, telegraf +# ## will communicate solely with the kafka leader and not all defined brokers. +# brokers = ["localhost:9092"] +# +# ## Kafka topic for producer messages +# topic = "telegraf" +# +# ## The value of this tag will be used as the topic. If not set the 'topic' +# ## option is used. +# # topic_tag = "" +# +# ## If true, the 'topic_tag' will be removed from to the metric. +# # exclude_topic_tag = false +# +# ## Optional Client id +# # client_id = "Telegraf" +# +# ## Set the minimal supported Kafka version. Setting this enables the use of new +# ## Kafka features and APIs. Of particular interested, lz4 compression +# ## requires at least version 0.10.0.0. +# ## ex: version = "1.1.0" +# # version = "" +# +# ## The routing tag specifies a tagkey on the metric whose value is used as +# ## the message key. The message key is used to determine which partition to +# ## send the message to. This tag is prefered over the routing_key option. +# routing_tag = "host" +# +# ## The routing key is set as the message key and used to determine which +# ## partition to send the message to. This value is only used when no +# ## routing_tag is set or as a fallback when the tag specified in routing tag +# ## is not found. +# ## +# ## If set to "random", a random value will be generated for each message. +# ## +# ## When unset, no message key is added and each message is routed to a random +# ## partition. +# ## +# ## ex: routing_key = "random" +# ## routing_key = "telegraf" +# # routing_key = "" +# +# ## Compression codec represents the various compression codecs recognized by +# ## Kafka in messages. +# ## 0 : None +# ## 1 : Gzip +# ## 2 : Snappy +# ## 3 : LZ4 +# ## 4 : ZSTD +# # compression_codec = 0 +# +# ## Idempotent Writes +# ## If enabled, exactly one copy of each message is written. +# # idempotent_writes = false +# +# ## RequiredAcks is used in Produce Requests to tell the broker how many +# ## replica acknowledgements it must see before responding +# ## 0 : the producer never waits for an acknowledgement from the broker. +# ## This option provides the lowest latency but the weakest durability +# ## guarantees (some data will be lost when a server fails). +# ## 1 : the producer gets an acknowledgement after the leader replica has +# ## received the data. This option provides better durability as the +# ## client waits until the server acknowledges the request as successful +# ## (only messages that were written to the now-dead leader but not yet +# ## replicated will be lost). +# ## -1: the producer gets an acknowledgement after all in-sync replicas have +# ## received the data. This option provides the best durability, we +# ## guarantee that no messages will be lost as long as at least one in +# ## sync replica remains. +# # required_acks = -1 +# +# ## The maximum number of times to retry sending a metric before failing +# ## until the next flush. +# # max_retry = 3 +# +# ## The maximum permitted size of a message. Should be set equal to or +# ## smaller than the broker's 'message.max.bytes'. +# # max_message_bytes = 1000000 +# +# ## Optional TLS Config +# # enable_tls = false +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Period between keep alive probes. +# ## Defaults to the OS configuration if not specified or zero. +# # keep_alive_period = "15s" +# +# ## Optional SOCKS5 proxy to use when connecting to brokers +# # socks5_enabled = true +# # socks5_address = "127.0.0.1:1080" +# # socks5_username = "alice" +# # socks5_password = "pass123" +# +# ## Optional SASL Config +# # sasl_username = "kafka" +# # sasl_password = "secret" +# +# ## Optional SASL: +# ## one of: OAUTHBEARER, PLAIN, SCRAM-SHA-256, SCRAM-SHA-512, GSSAPI +# ## (defaults to PLAIN) +# # sasl_mechanism = "" +# +# ## used if sasl_mechanism is GSSAPI +# # sasl_gssapi_service_name = "" +# # ## One of: KRB5_USER_AUTH and KRB5_KEYTAB_AUTH +# # sasl_gssapi_auth_type = "KRB5_USER_AUTH" +# # sasl_gssapi_kerberos_config_path = "/" +# # sasl_gssapi_realm = "realm" +# # sasl_gssapi_key_tab_path = "" +# # sasl_gssapi_disable_pafxfast = false +# +# ## Access token used if sasl_mechanism is OAUTHBEARER +# # sasl_access_token = "" +# +# ## Arbitrary key value string pairs to pass as a TOML table. For example: +# # {logicalCluster = "cluster-042", poolId = "pool-027"} +# # sasl_extensions = {} +# +# ## SASL protocol version. When connecting to Azure EventHub set to 0. +# # sasl_version = 1 +# +# # Disable Kafka metadata full fetch +# # metadata_full = false +# +# ## Data format to output. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# # data_format = "influx" +# +# ## NOTE: Due to the way TOML is parsed, tables must be at the END of the +# ## plugin definition, otherwise additional config options are read as part of +# ## the table +# +# ## Optional topic suffix configuration. +# ## If the section is omitted, no suffix is used. +# ## Following topic suffix methods are supported: +# ## measurement - suffix equals to separator + measurement's name +# ## tags - suffix equals to separator + specified tags' values +# ## interleaved with separator +# +# ## Suffix equals to "_" + measurement name +# # [outputs.kafka.topic_suffix] +# # method = "measurement" +# # separator = "_" +# +# ## Suffix equals to "__" + measurement's "foo" tag value. +# ## If there's no such a tag, suffix equals to an empty string +# # [outputs.kafka.topic_suffix] +# # method = "tags" +# # keys = ["foo"] +# # separator = "__" +# +# ## Suffix equals to "_" + measurement's "foo" and "bar" +# ## tag values, separated by "_". If there is no such tags, +# ## their values treated as empty strings. +# # [outputs.kafka.topic_suffix] +# # method = "tags" +# # keys = ["foo", "bar"] +# # separator = "_" + + +# # Configuration for the AWS Kinesis output. +# [[outputs.kinesis]] +# ## Amazon REGION of kinesis endpoint. +# region = "ap-southeast-2" +# +# ## Amazon Credentials +# ## Credentials are loaded in the following order +# ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified +# ## 2) Assumed credentials via STS if role_arn is specified +# ## 3) explicit credentials from 'access_key' and 'secret_key' +# ## 4) shared profile from 'profile' +# ## 5) environment variables +# ## 6) shared credentials file +# ## 7) EC2 Instance Profile +# #access_key = "" +# #secret_key = "" +# #token = "" +# #role_arn = "" +# #web_identity_token_file = "" +# #role_session_name = "" +# #profile = "" +# #shared_credential_file = "" +# +# ## Endpoint to make request against, the correct endpoint is automatically +# ## determined and this option should only be set if you wish to override the +# ## default. +# ## ex: endpoint_url = "http://localhost:8000" +# # endpoint_url = "" +# +# ## Kinesis StreamName must exist prior to starting telegraf. +# streamname = "StreamName" +# +# ## Data format to output. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# data_format = "influx" +# +# ## debug will show upstream aws messages. +# debug = false +# +# ## NOTE: Due to the way TOML is parsed, tables must be at the END of the +# ## plugin definition, otherwise additional config options are read as part of +# ## the table +# +# ## The partition key can be calculated using one of several methods: +# ## +# ## Use a static value for all writes: +# # [outputs.kinesis.partition] +# # method = "static" +# # key = "howdy" +# # +# ## Use a random partition key on each write: +# # [outputs.kinesis.partition] +# # method = "random" +# # +# ## Use the measurement name as the partition key: +# # [outputs.kinesis.partition] +# # method = "measurement" +# # +# ## Use the value of a tag for all writes, if the tag is not set the empty +# ## default option will be used. When no default, defaults to "telegraf" +# # [outputs.kinesis.partition] +# # method = "tag" +# # key = "host" +# # default = "mykey" + + +# # Configuration for Librato API to send metrics to. +# [[outputs.librato]] +# ## Librato API Docs +# ## http://dev.librato.com/v1/metrics-authentication +# ## Librato API user +# api_user = "telegraf@influxdb.com" # required. +# ## Librato API token +# api_token = "my-secret-token" # required. +# ## Debug +# # debug = false +# ## Connection timeout. +# # timeout = "5s" +# ## Output source Template (same as graphite buckets) +# ## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#graphite +# ## This template is used in librato's source (not metric's name) +# template = "host" + + +# # A plugin that can send metrics over HTTPs to Logz.io +# [[outputs.logzio]] +# ## Connection timeout, defaults to "5s" if not set. +# # timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## Logz.io account token +# token = "your logz.io token" # required +# +# ## Use your listener URL for your Logz.io account region. +# # url = "https://listener.logz.io:8071" + + +# # A plugin that can transmit logs to Loki +# [[outputs.loki]] +# ## The domain of Loki +# domain = "https://loki.domain.tld" +# +# ## Endpoint to write api +# # endpoint = "/loki/api/v1/push" +# +# ## Connection timeout, defaults to "5s" if not set. +# # timeout = "5s" +# +# ## Basic auth credential +# # username = "loki" +# # password = "pass" +# +# ## Additional HTTP headers +# # http_headers = {"X-Scope-OrgID" = "1"} +# +# ## If the request must be gzip encoded +# # gzip_request = false +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## Metric Name Label +# ## Label to use for the metric name to when sending metrics. If set to an +# ## empty string, this will not add the label. This is NOT suggested as there +# ## is no way to differentiate between multiple metrics. +# # metric_name_label = "__name" + + +# # A plugin that can transmit logs to mongodb +# [[outputs.mongodb]] +# # connection string examples for mongodb +# dsn = "mongodb://localhost:27017" +# # dsn = "mongodb://mongod1:27017,mongod2:27017,mongod3:27017/admin&replicaSet=myReplSet&w=1" +# +# # overrides serverSelectionTimeoutMS in dsn if set +# # timeout = "30s" +# +# # default authentication, optional +# # authentication = "NONE" +# +# # for SCRAM-SHA-256 authentication +# # authentication = "SCRAM" +# # username = "root" +# # password = "***" +# +# # for x509 certificate authentication +# # authentication = "X509" +# # tls_ca = "ca.pem" +# # tls_key = "client.pem" +# # # tls_key_pwd = "changeme" # required for encrypted tls_key +# # insecure_skip_verify = false +# +# # database to store measurements and time series collections +# # database = "telegraf" +# +# # granularity can be seconds, minutes, or hours. +# # configuring this value will be based on your input collection frequency. +# # see https://docs.mongodb.com/manual/core/timeseries-collections/#create-a-time-series-collection +# # granularity = "seconds" +# +# # optionally set a TTL to automatically expire documents from the measurement collections. +# # ttl = "360h" + + +# # Configuration for MQTT server to send metrics to +# [[outputs.mqtt]] +# ## MQTT Brokers +# ## The list of brokers should only include the hostname or IP address and the +# ## port to the broker. This should follow the format `[{scheme}://]{host}:{port}`. For +# ## example, `localhost:1883` or `mqtt://localhost:1883`. +# ## Scheme can be any of the following: tcp://, mqtt://, tls://, mqtts:// +# ## non-TLS and TLS servers can not be mix-and-matched. +# servers = ["localhost:1883", ] # or ["mqtts://tls.example.com:1883"] +# +# ## Protocol can be `3.1.1` or `5`. Default is `3.1.1` +# # procotol = "3.1.1" +# +# ## MQTT Topic for Producer Messages +# ## MQTT outputs send metrics to this topic format: +# ## {{ .TopicPrefix }}/{{ .Hostname }}/{{ .PluginName }}/{{ .Tag "tag_key" }} +# ## (e.g. prefix/web01.example.com/mem/some_tag_value) +# ## Each path segment accepts either a template placeholder, an environment variable, or a tag key +# ## of the form `{{.Tag "tag_key_name"}}`. Empty path elements as well as special MQTT characters +# ## (such as `+` or `#`) are invalid to form the topic name and will lead to an error. +# ## In case a tag is missing in the metric, that path segment omitted for the final topic. +# topic = "telegraf/{{ .Hostname }}/{{ .PluginName }}" +# +# ## QoS policy for messages +# ## The mqtt QoS policy for sending messages. +# ## See https://www.ibm.com/support/knowledgecenter/en/SSFKSJ_9.0.0/com.ibm.mq.dev.doc/q029090_.htm +# ## 0 = at most once +# ## 1 = at least once +# ## 2 = exactly once +# # qos = 2 +# +# ## Keep Alive +# ## Defines the maximum length of time that the broker and client may not +# ## communicate. Defaults to 0 which turns the feature off. +# ## +# ## For version v2.0.12 and later mosquitto there is a bug +# ## (see https://github.com/eclipse/mosquitto/issues/2117), which requires +# ## this to be non-zero. As a reference eclipse/paho.mqtt.golang defaults to 30. +# # keep_alive = 0 +# +# ## username and password to connect MQTT server. +# # username = "telegraf" +# # password = "metricsmetricsmetricsmetrics" +# +# ## client ID +# ## The unique client id to connect MQTT server. If this parameter is not set +# ## then a random ID is generated. +# # client_id = "" +# +# ## Timeout for write operations. default: 5s +# # timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## When true, metrics will be sent in one MQTT message per flush. Otherwise, +# ## metrics are written one metric per MQTT message. +# ## DEPRECATED: Use layout option instead +# # batch = false +# +# ## When true, metric will have RETAIN flag set, making broker cache entries until someone +# ## actually reads it +# # retain = false +# +# ## Layout of the topics published. +# ## The following choices are available: +# ## non-batch -- send individual messages, one for each metric +# ## batch -- send all metric as a single message per MQTT topic +# ## NOTE: The following options will ignore the 'data_format' option and send single values +# ## field -- send individual messages for each field, appending its name to the metric topic +# ## homie-v4 -- send metrics with fields and tags according to the 4.0.0 specs +# ## see https://homieiot.github.io/specification/ +# # layout = "non-batch" +# +# ## HOMIE specific settings +# ## The following options provide templates for setting the device name +# ## and the node-ID for the topics. Both options are MANDATORY and can contain +# ## {{ .PluginName }} (metric name), {{ .Tag "key"}} (tag reference to 'key') +# ## or constant strings. The templays MAY NOT contain slashes! +# # homie_device_name = "" +# # homie_node_id = "" +# +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# data_format = "influx" +# +# ## NOTE: Due to the way TOML is parsed, tables must be at the END of the +# ## plugin definition, otherwise additional config options are read as part of +# ## the table +# +# ## Optional MQTT 5 publish properties +# ## These setting only apply if the "protocol" property is set to 5. This must +# ## be defined at the end of the plugin settings, otherwise TOML will assume +# ## anything else is part of this table. For more details on publish properties +# ## see the spec: +# ## https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901109 +# # [outputs.mqtt.v5] +# # content_type = "" +# # response_topic = "" +# # message_expiry = "0s" +# # topic_alias = 0 +# # [outputs.mqtt.v5.user_properties] +# # "key1" = "value 1" +# # "key2" = "value 2" + + +# # Send telegraf measurements to NATS +# [[outputs.nats]] +# ## URLs of NATS servers +# servers = ["nats://localhost:4222"] +# +# ## Optional client name +# # name = "" +# +# ## Optional credentials +# # username = "" +# # password = "" +# +# ## Optional NATS 2.0 and NATS NGS compatible user credentials +# # credentials = "/etc/telegraf/nats.creds" +# +# ## NATS subject for producer messages +# subject = "telegraf" +# +# ## Use Transport Layer Security +# # secure = false +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Data format to output. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# data_format = "influx" + + +# # Send aggregated metrics to Nebius.Cloud Monitoring +# [[outputs.nebius_cloud_monitoring]] +# ## Timeout for HTTP writes. +# # timeout = "20s" +# +# ## Nebius.Cloud monitoring API endpoint. Normally should not be changed +# # endpoint = "https://monitoring.api.il.nebius.cloud/monitoring/v2/data/write" + + +# # Send metrics to New Relic metrics endpoint +# [[outputs.newrelic]] +# ## The 'insights_key' parameter requires a NR license key. +# ## New Relic recommends you create one +# ## with a convenient name such as TELEGRAF_INSERT_KEY. +# ## reference: https://docs.newrelic.com/docs/apis/intro-apis/new-relic-api-keys/#ingest-license-key +# # insights_key = "New Relic License Key Here" +# +# ## Prefix to add to add to metric name for easy identification. +# ## This is very useful if your metric names are ambiguous. +# # metric_prefix = "" +# +# ## Timeout for writes to the New Relic API. +# # timeout = "15s" +# +# ## HTTP Proxy override. If unset use values from the standard +# ## proxy environment variables to determine proxy, if any. +# # http_proxy = "http://corporate.proxy:3128" +# +# ## Metric URL override to enable geographic location endpoints. +# # If not set use values from the standard +# # metric_url = "https://metric-api.newrelic.com/metric/v1" + + +# # Send telegraf measurements to NSQD +# [[outputs.nsq]] +# ## Location of nsqd instance listening on TCP +# server = "localhost:4150" +# ## NSQ topic for producer messages +# topic = "telegraf" +# +# ## Data format to output. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# data_format = "influx" + + +# # Send OpenTelemetry metrics over gRPC +# [[outputs.opentelemetry]] +# ## Override the default (localhost:4317) OpenTelemetry gRPC service +# ## address:port +# # service_address = "localhost:4317" +# +# ## Override the default (5s) request timeout +# # timeout = "5s" +# +# ## Optional TLS Config. +# ## +# ## Root certificates for verifying server certificates encoded in PEM format. +# # tls_ca = "/etc/telegraf/ca.pem" +# ## The public and private keypairs for the client encoded in PEM format. +# ## May contain intermediate certificates. +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS, but skip TLS chain and host verification. +# # insecure_skip_verify = false +# ## Send the specified TLS server name via SNI. +# # tls_server_name = "foo.example.com" +# +# ## Override the default (gzip) compression used to send data. +# ## Supports: "gzip", "none" +# # compression = "gzip" +# +# ## NOTE: Due to the way TOML is parsed, tables must be at the END of the +# ## plugin definition, otherwise additional config options are read as part of +# ## the table +# +# ## Configuration options for the Coralogix dialect +# ## Enable the following section of you use this plugin with a Coralogix endpoint +# # [outputs.opentelemetry.coralogix] +# # ## Your Coralogix private key (required). +# # ## Please note that this is sensitive data! +# # private_key = "your_coralogix_key" +# # +# # ## Application and subsystem names for the metrics (required) +# # application = "$NAMESPACE" +# # subsystem = "$HOSTNAME" +# +# ## Additional OpenTelemetry resource attributes +# # [outputs.opentelemetry.attributes] +# # "service.name" = "demo" +# +# ## Additional gRPC request metadata +# # [outputs.opentelemetry.headers] +# # key1 = "value1" + + +# # Configuration for OpenTSDB server to send metrics to +# [[outputs.opentsdb]] +# ## prefix for metrics keys +# prefix = "my.specific.prefix." +# +# ## DNS name of the OpenTSDB server +# ## Using "opentsdb.example.com" or "tcp://opentsdb.example.com" will use the +# ## telnet API. "http://opentsdb.example.com" will use the Http API. +# host = "opentsdb.example.com" +# +# ## Port of the OpenTSDB server +# port = 4242 +# +# ## Number of data points to send to OpenTSDB in Http requests. +# ## Not used with telnet API. +# http_batch_size = 50 +# +# ## URI Path for Http requests to OpenTSDB. +# ## Used in cases where OpenTSDB is located behind a reverse proxy. +# http_path = "/api/put" +# +# ## Debug true - Prints OpenTSDB communication +# debug = false +# +# ## Separator separates measurement name from field +# separator = "_" + + +# # Publishes metrics to a postgresql database +# [[outputs.postgresql]] +# ## Specify connection address via the standard libpq connection string: +# ## host=... user=... password=... sslmode=... dbname=... +# ## Or a URL: +# ## postgres://[user[:password]]@localhost[/dbname]?sslmode=[disable|verify-ca|verify-full] +# ## See https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING +# ## +# ## All connection parameters are optional. Environment vars are also supported. +# ## e.g. PGPASSWORD, PGHOST, PGUSER, PGDATABASE +# ## All supported vars can be found here: +# ## https://www.postgresql.org/docs/current/libpq-envars.html +# ## +# ## Non-standard parameters: +# ## pool_max_conns (default: 1) - Maximum size of connection pool for parallel (per-batch per-table) inserts. +# ## pool_min_conns (default: 0) - Minimum size of connection pool. +# ## pool_max_conn_lifetime (default: 0s) - Maximum age of a connection before closing. +# ## pool_max_conn_idle_time (default: 0s) - Maximum idle time of a connection before closing. +# ## pool_health_check_period (default: 0s) - Duration between health checks on idle connections. +# # connection = "" +# +# ## Postgres schema to use. +# # schema = "public" +# +# ## Store tags as foreign keys in the metrics table. Default is false. +# # tags_as_foreign_keys = false +# +# ## Suffix to append to table name (measurement name) for the foreign tag table. +# # tag_table_suffix = "_tag" +# +# ## Deny inserting metrics if the foreign tag can't be inserted. +# # foreign_tag_constraint = false +# +# ## Store all tags as a JSONB object in a single 'tags' column. +# # tags_as_jsonb = false +# +# ## Store all fields as a JSONB object in a single 'fields' column. +# # fields_as_jsonb = false +# +# ## Name of the timestamp column +# ## NOTE: Some tools (e.g. Grafana) require the default name so be careful! +# # timestamp_column_name = "time" +# +# ## Type of the timestamp column +# ## Currently, "timestamp without time zone" and "timestamp with time zone" +# ## are supported +# # timestamp_column_type = "timestamp without time zone" +# +# ## Templated statements to execute when creating a new table. +# # create_templates = [ +# # '''CREATE TABLE {{ .table }} ({{ .columns }})''', +# # ] +# +# ## Templated statements to execute when adding columns to a table. +# ## Set to an empty list to disable. Points containing tags for which there is no column will be skipped. Points +# ## containing fields for which there is no column will have the field omitted. +# # add_column_templates = [ +# # '''ALTER TABLE {{ .table }} ADD COLUMN IF NOT EXISTS {{ .columns|join ", ADD COLUMN IF NOT EXISTS " }}''', +# # ] +# +# ## Templated statements to execute when creating a new tag table. +# # tag_table_create_templates = [ +# # '''CREATE TABLE {{ .table }} ({{ .columns }}, PRIMARY KEY (tag_id))''', +# # ] +# +# ## Templated statements to execute when adding columns to a tag table. +# ## Set to an empty list to disable. Points containing tags for which there is no column will be skipped. +# # tag_table_add_column_templates = [ +# # '''ALTER TABLE {{ .table }} ADD COLUMN IF NOT EXISTS {{ .columns|join ", ADD COLUMN IF NOT EXISTS " }}''', +# # ] +# +# ## The postgres data type to use for storing unsigned 64-bit integer values (Postgres does not have a native +# ## unsigned 64-bit integer type). +# ## The value can be one of: +# ## numeric - Uses the PostgreSQL "numeric" data type. +# ## uint8 - Requires pguint extension (https://github.com/petere/pguint) +# # uint64_type = "numeric" +# +# ## When using pool_max_conns>1, and a temporary error occurs, the query is retried with an incremental backoff. This +# ## controls the maximum backoff duration. +# # retry_max_backoff = "15s" +# +# ## Approximate number of tag IDs to store in in-memory cache (when using tags_as_foreign_keys). +# ## This is an optimization to skip inserting known tag IDs. +# ## Each entry consumes approximately 34 bytes of memory. +# # tag_cache_size = 100000 +# +# ## Enable & set the log level for the Postgres driver. +# # log_level = "warn" # trace, debug, info, warn, error, none + + +# A plugin that can transmit metrics over HTTP +[[outputs.http]] + # URL is the address to send metrics to + url = "http://localhost:9090/api/v1/write" + + # Timeout for HTTP message + timeout = "5s" + + # HTTP method, one of: "POST" or "PUT" or "PATCH" + method = "POST" + + # # HTTP Basic Auth credentials + # username = "username" + # password = "pa$$word" + + # Data format to output. + # Each data format has it's own unique set of configuration options, read + # more about them here: + # https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md + data_format = "prometheusremotewrite" + + # Additional HTTP headers + [outputs.http.headers] + ## Should be set manually to "application/json" for json data_format + # Content-Type = "text/plain; charset=utf-8" + Content-Type = "application/x-protobuf" + Content-Encoding = "snappy" + X-Prometheus-Remote-Write-Version = "0.1.0" + + +# # Configuration for the Prometheus client to spawn +# [[outputs.prometheus_client]] +# ## Address to listen on. +# listen = ":9273" +# +# ## Maximum duration before timing out read of the request +# # read_timeout = "10s" +# ## Maximum duration before timing out write of the response +# # write_timeout = "10s" +# +# ## Metric version controls the mapping from Prometheus metrics into Telegraf metrics. +# ## See "Metric Format Configuration" in plugins/inputs/prometheus/README.md for details. +# ## Valid options: 1, 2 +# # metric_version = 1 +# +# ## Use HTTP Basic Authentication. +# # basic_username = "Foo" +# # basic_password = "Bar" +# +# ## If set, the IP Ranges which are allowed to access metrics. +# ## ex: ip_range = ["192.168.0.0/24", "192.168.1.0/30"] +# # ip_range = [] +# +# ## Path to publish the metrics on. +# # path = "/metrics" +# +# ## Expiration interval for each metric. 0 == no expiration +# # expiration_interval = "60s" +# +# ## Collectors to enable, valid entries are "gocollector" and "process". +# ## If unset, both are enabled. +# # collectors_exclude = ["gocollector", "process"] +# +# ## Send string metrics as Prometheus labels. +# ## Unless set to false all string metrics will be sent as labels. +# # string_as_label = true +# +# ## If set, enable TLS with the given certificate. +# # tls_cert = "/etc/ssl/telegraf.crt" +# # tls_key = "/etc/ssl/telegraf.key" +# +# ## Set one or more allowed client CA certificate file names to +# ## enable mutually authenticated TLS connections +# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# +# ## Export metric collection time. +# # export_timestamp = false +# +# ## Specify the metric type explicitly. +# ## This overrides the metric-type of the Telegraf metric. Globbing is allowed. +# # [outputs.prometheus_client.metric_types] +# # counter = [] +# # gauge = [] + + +# # Publishes metrics to a redis timeseries server +# [[outputs.redistimeseries]] +# ## The address of the RedisTimeSeries server. +# address = "127.0.0.1:6379" +# +# ## Redis ACL credentials +# # username = "" +# # password = "" +# # database = 0 +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# # insecure_skip_verify = false + + +# # Configuration for Riemann to send metrics to +# [[outputs.riemann]] +# ## The full TCP or UDP URL of the Riemann server +# url = "tcp://localhost:5555" +# +# ## Riemann event TTL, floating-point time in seconds. +# ## Defines how long that an event is considered valid for in Riemann +# # ttl = 30.0 +# +# ## Separator to use between measurement and field name in Riemann service name +# ## This does not have any effect if 'measurement_as_attribute' is set to 'true' +# separator = "/" +# +# ## Set measurement name as Riemann attribute 'measurement', instead of prepending it to the Riemann service name +# # measurement_as_attribute = false +# +# ## Send string metrics as Riemann event states. +# ## Unless enabled all string metrics will be ignored +# # string_as_state = false +# +# ## A list of tag keys whose values get sent as Riemann tags. +# ## If empty, all Telegraf tag values will be sent as tags +# # tag_keys = ["telegraf","custom_tag"] +# +# ## Additional Riemann tags to send. +# # tags = ["telegraf-output"] +# +# ## Description for Riemann event +# # description_text = "metrics collected from telegraf" +# +# ## Riemann client write timeout, defaults to "5s" if not set. +# # timeout = "5s" + + +# ## DEPRECATED: The "riemann_legacy" plugin is deprecated in version 1.3.0 and will be removed in 1.30.0, use 'outputs.riemann' instead (see https://github.com/influxdata/telegraf/issues/1878). +# # Configuration for the Riemann server to send metrics to +# [[outputs.riemann_legacy]] +# ## URL of server +# url = "localhost:5555" +# ## transport protocol to use either tcp or udp +# transport = "tcp" +# ## separator to use between input name and field name in Riemann service name +# separator = " " + + +# # Send aggregate metrics to Sensu Monitor +# [[outputs.sensu]] +# ## BACKEND API URL is the Sensu Backend API root URL to send metrics to +# ## (protocol, host, and port only). The output plugin will automatically +# ## append the corresponding backend API path +# ## /api/core/v2/namespaces/:entity_namespace/events/:entity_name/:check_name). +# ## +# ## Backend Events API reference: +# ## https://docs.sensu.io/sensu-go/latest/api/events/ +# ## +# ## AGENT API URL is the Sensu Agent API root URL to send metrics to +# ## (protocol, host, and port only). The output plugin will automatically +# ## append the correspeonding agent API path (/events). +# ## +# ## Agent API Events API reference: +# ## https://docs.sensu.io/sensu-go/latest/api/events/ +# ## +# ## NOTE: if backend_api_url and agent_api_url and api_key are set, the output +# ## plugin will use backend_api_url. If backend_api_url and agent_api_url are +# ## not provided, the output plugin will default to use an agent_api_url of +# ## http://127.0.0.1:3031 +# ## +# # backend_api_url = "http://127.0.0.1:8080" +# # agent_api_url = "http://127.0.0.1:3031" +# +# ## API KEY is the Sensu Backend API token +# ## Generate a new API token via: +# ## +# ## $ sensuctl cluster-role create telegraf --verb create --resource events,entities +# ## $ sensuctl cluster-role-binding create telegraf --cluster-role telegraf --group telegraf +# ## $ sensuctl user create telegraf --group telegraf --password REDACTED +# ## $ sensuctl api-key grant telegraf +# ## +# ## For more information on Sensu RBAC profiles & API tokens, please visit: +# ## - https://docs.sensu.io/sensu-go/latest/reference/rbac/ +# ## - https://docs.sensu.io/sensu-go/latest/reference/apikeys/ +# ## +# # api_key = "${SENSU_API_KEY}" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Timeout for HTTP message +# # timeout = "5s" +# +# ## HTTP Content-Encoding for write request body, can be set to "gzip" to +# ## compress body or "identity" to apply no encoding. +# # content_encoding = "identity" +# +# ## NOTE: Due to the way TOML is parsed, tables must be at the END of the +# ## plugin definition, otherwise additional config options are read as part of +# ## the table +# +# ## Sensu Event details +# ## +# ## Below are the event details to be sent to Sensu. The main portions of the +# ## event are the check, entity, and metrics specifications. For more information +# ## on Sensu events and its components, please visit: +# ## - Events - https://docs.sensu.io/sensu-go/latest/reference/events +# ## - Checks - https://docs.sensu.io/sensu-go/latest/reference/checks +# ## - Entities - https://docs.sensu.io/sensu-go/latest/reference/entities +# ## - Metrics - https://docs.sensu.io/sensu-go/latest/reference/events#metrics +# ## +# ## Check specification +# ## The check name is the name to give the Sensu check associated with the event +# ## created. This maps to check.metatadata.name in the event. +# [outputs.sensu.check] +# name = "telegraf" +# +# ## Entity specification +# ## Configure the entity name and namespace, if necessary. This will be part of +# ## the entity.metadata in the event. +# ## +# ## NOTE: if the output plugin is configured to send events to a +# ## backend_api_url and entity_name is not set, the value returned by +# ## os.Hostname() will be used; if the output plugin is configured to send +# ## events to an agent_api_url, entity_name and entity_namespace are not used. +# # [outputs.sensu.entity] +# # name = "server-01" +# # namespace = "default" +# +# ## Metrics specification +# ## Configure the tags for the metrics that are sent as part of the Sensu event +# # [outputs.sensu.tags] +# # source = "telegraf" +# +# ## Configure the handler(s) for processing the provided metrics +# # [outputs.sensu.metrics] +# # handlers = ["influxdb","elasticsearch"] + + +# # Send metrics and events to SignalFx +# [[outputs.signalfx]] +# ## SignalFx Org Access Token +# access_token = "my-secret-token" +# +# ## The SignalFx realm that your organization resides in +# signalfx_realm = "us9" # Required if ingest_url is not set +# +# ## You can optionally provide a custom ingest url instead of the +# ## signalfx_realm option above if you are using a gateway or proxy +# ## instance. This option takes precident over signalfx_realm. +# ingest_url = "https://my-custom-ingest/" +# +# ## Event typed metrics are omitted by default, +# ## If you require an event typed metric you must specify the +# ## metric name in the following list. +# included_event_names = ["plugin.metric_name"] + + +# # Generic socket writer capable of handling multiple socket types. +# [[outputs.socket_writer]] +# ## URL to connect to +# # address = "tcp://127.0.0.1:8094" +# # address = "tcp://example.com:http" +# # address = "tcp4://127.0.0.1:8094" +# # address = "tcp6://127.0.0.1:8094" +# # address = "tcp6://[2001:db8::1]:8094" +# # address = "udp://127.0.0.1:8094" +# # address = "udp4://127.0.0.1:8094" +# # address = "udp6://127.0.0.1:8094" +# # address = "unix:///tmp/telegraf.sock" +# # address = "unixgram:///tmp/telegraf.sock" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Period between keep alive probes. +# ## Only applies to TCP sockets. +# ## 0 disables keep alive probes. +# ## Defaults to the OS configuration. +# # keep_alive_period = "5m" +# +# ## Content encoding for message payloads, can be set to "gzip" or to +# ## "identity" to apply no encoding. +# ## +# # content_encoding = "identity" +# +# ## Data format to generate. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# # data_format = "influx" + + +# # Save metrics to an SQL Database +# [[outputs.sql]] +# ## Database driver +# ## Valid options: mssql (Microsoft SQL Server), mysql (MySQL), pgx (Postgres), +# ## sqlite (SQLite3), snowflake (snowflake.com) clickhouse (ClickHouse) +# # driver = "" +# +# ## Data source name +# ## The format of the data source name is different for each database driver. +# ## See the plugin readme for details. +# # data_source_name = "" +# +# ## Timestamp column name +# # timestamp_column = "timestamp" +# +# ## Table creation template +# ## Available template variables: +# ## {TABLE} - table name as a quoted identifier +# ## {TABLELITERAL} - table name as a quoted string literal +# ## {COLUMNS} - column definitions (list of quoted identifiers and types) +# # table_template = "CREATE TABLE {TABLE}({COLUMNS})" +# +# ## Table existence check template +# ## Available template variables: +# ## {TABLE} - tablename as a quoted identifier +# # table_exists_template = "SELECT 1 FROM {TABLE} LIMIT 1" +# +# ## Initialization SQL +# # init_sql = "" +# +# ## Maximum amount of time a connection may be idle. "0s" means connections are +# ## never closed due to idle time. +# # connection_max_idle_time = "0s" +# +# ## Maximum amount of time a connection may be reused. "0s" means connections +# ## are never closed due to age. +# # connection_max_lifetime = "0s" +# +# ## Maximum number of connections in the idle connection pool. 0 means unlimited. +# # connection_max_idle = 2 +# +# ## Maximum number of open connections to the database. 0 means unlimited. +# # connection_max_open = 0 +# +# ## NOTE: Due to the way TOML is parsed, tables must be at the END of the +# ## plugin definition, otherwise additional config options are read as part of +# ## the table +# +# ## Metric type to SQL type conversion +# ## The values on the left are the data types Telegraf has and the values on +# ## the right are the data types Telegraf will use when sending to a database. +# ## +# ## The database values used must be data types the destination database +# ## understands. It is up to the user to ensure that the selected data type is +# ## available in the database they are using. Refer to your database +# ## documentation for what data types are available and supported. +# #[outputs.sql.convert] +# # integer = "INT" +# # real = "DOUBLE" +# # text = "TEXT" +# # timestamp = "TIMESTAMP" +# # defaultvalue = "TEXT" +# # unsigned = "UNSIGNED" +# # bool = "BOOL" +# # ## This setting controls the behavior of the unsigned value. By default the +# # ## setting will take the integer value and append the unsigned value to it. The other +# # ## option is "literal", which will use the actual value the user provides to +# # ## the unsigned option. This is useful for a database like ClickHouse where +# # ## the unsigned value should use a value like "uint64". +# # # conversion_style = "unsigned_suffix" + + +# # Configuration for Google Cloud Stackdriver to send metrics to +# [[outputs.stackdriver]] +# ## GCP Project +# project = "erudite-bloom-151019" +# +# ## The namespace for the metric descriptor +# ## This is optional and users are encouraged to set the namespace as a +# ## resource label instead. If omitted it is not included in the metric name. +# namespace = "telegraf" +# +# ## Metric Type Prefix +# ## The DNS name used with the metric type as a prefix. +# # metric_type_prefix = "custom.googleapis.com" +# +# ## Metric Name Format +# ## Specifies the layout of the metric name, choose from: +# ## * path: 'metric_type_prefix_namespace_name_key' +# ## * official: 'metric_type_prefix/namespace_name_key/kind' +# # metric_name_format = "path" +# +# ## Metric Data Type +# ## By default, telegraf will use whatever type the metric comes in as. +# ## However, for some use cases, forcing int64, may be preferred for values: +# ## * source: use whatever was passed in +# ## * double: preferred datatype to allow queries by PromQL. +# # metric_data_type = "source" +# +# ## Tags as resource labels +# ## Tags defined in this option, when they exist, are added as a resource +# ## label and not included as a metric label. The values from tags override +# ## the values defined under the resource_labels config options. +# # tags_as_resource_label = [] +# +# ## Custom resource type +# # resource_type = "generic_node" +# +# ## NOTE: Due to the way TOML is parsed, tables must be at the END of the +# ## plugin definition, otherwise additional config options are read as part of +# ## the table +# +# ## Additional resource labels +# # [outputs.stackdriver.resource_labels] +# # node_id = "$HOSTNAME" +# # namespace = "myapp" +# # location = "eu-north0" + + +# # Configuration for active mq with stomp protocol to send metrics to +# [[outputs.stomp]] +# host = "localhost:61613" +# +# ## Queue name for producer messages +# queueName = "telegraf" +# +# ## Username and password if required by the Active MQ server. +# # username = "" +# # password = "" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## Data format to output. +# data_format = "json" + + +# # A plugin that can send metrics to Sumo Logic HTTP metric collector. +# [[outputs.sumologic]] +# ## Unique URL generated for your HTTP Metrics Source. +# ## This is the address to send metrics to. +# # url = "https://events.sumologic.net/receiver/v1/http/" +# +# ## Data format to be used for sending metrics. +# ## This will set the "Content-Type" header accordingly. +# ## Currently supported formats: +# ## * graphite - for Content-Type of application/vnd.sumologic.graphite +# ## * carbon2 - for Content-Type of application/vnd.sumologic.carbon2 +# ## * prometheus - for Content-Type of application/vnd.sumologic.prometheus +# ## +# ## More information can be found at: +# ## https://help.sumologic.com/03Send-Data/Sources/02Sources-for-Hosted-Collectors/HTTP-Source/Upload-Metrics-to-an-HTTP-Source#content-type-headers-for-metrics +# ## +# ## NOTE: +# ## When unset, telegraf will by default use the influx serializer which is currently unsupported +# ## in HTTP Source. +# data_format = "carbon2" +# +# ## Timeout used for HTTP request +# # timeout = "5s" +# +# ## Max HTTP request body size in bytes before compression (if applied). +# ## By default 1MB is recommended. +# ## NOTE: +# ## Bear in mind that in some serializer a metric even though serialized to multiple +# ## lines cannot be split any further so setting this very low might not work +# ## as expected. +# # max_request_body_size = 1000000 +# +# ## Additional, Sumo specific options. +# ## Full list can be found here: +# ## https://help.sumologic.com/03Send-Data/Sources/02Sources-for-Hosted-Collectors/HTTP-Source/Upload-Metrics-to-an-HTTP-Source#supported-http-headers +# +# ## Desired source name. +# ## Useful if you want to override the source name configured for the source. +# # source_name = "" +# +# ## Desired host name. +# ## Useful if you want to override the source host configured for the source. +# # source_host = "" +# +# ## Desired source category. +# ## Useful if you want to override the source category configured for the source. +# # source_category = "" +# +# ## Comma-separated key=value list of dimensions to apply to every metric. +# ## Custom dimensions will allow you to query your metrics at a more granular level. +# # dimensions = "" + + +# # Configuration for Syslog server to send metrics to +# [[outputs.syslog]] +# ## URL to connect to +# ## ex: address = "tcp://127.0.0.1:8094" +# ## ex: address = "tcp4://127.0.0.1:8094" +# ## ex: address = "tcp6://127.0.0.1:8094" +# ## ex: address = "tcp6://[2001:db8::1]:8094" +# ## ex: address = "udp://127.0.0.1:8094" +# ## ex: address = "udp4://127.0.0.1:8094" +# ## ex: address = "udp6://127.0.0.1:8094" +# address = "tcp://127.0.0.1:8094" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Period between keep alive probes. +# ## Only applies to TCP sockets. +# ## 0 disables keep alive probes. +# ## Defaults to the OS configuration. +# # keep_alive_period = "5m" +# +# ## The framing technique with which it is expected that messages are +# ## transported (default = "octet-counting"). Whether the messages come +# ## using the octect-counting (RFC5425#section-4.3.1, RFC6587#section-3.4.1), +# ## or the non-transparent framing technique (RFC6587#section-3.4.2). Must +# ## be one of "octet-counting", "non-transparent". +# # framing = "octet-counting" +# +# ## The trailer to be expected in case of non-transparent framing (default = "LF"). +# ## Must be one of "LF", or "NUL". +# # trailer = "LF" +# +# ## SD-PARAMs settings +# ## Syslog messages can contain key/value pairs within zero or more +# ## structured data sections. For each unrecognized metric tag/field a +# ## SD-PARAMS is created. +# ## +# ## Example: +# ## [[outputs.syslog]] +# ## sdparam_separator = "_" +# ## default_sdid = "default@32473" +# ## sdids = ["foo@123", "bar@456"] +# ## +# ## input => xyzzy,x=y foo@123_value=42,bar@456_value2=84,something_else=1 +# ## output (structured data only) => [foo@123 value=42][bar@456 value2=84][default@32473 something_else=1 x=y] +# +# ## SD-PARAMs separator between the sdid and tag/field key (default = "_") +# # sdparam_separator = "_" +# +# ## Default sdid used for tags/fields that don't contain a prefix defined in +# ## the explicit sdids setting below If no default is specified, no SD-PARAMs +# ## will be used for unrecognized field. +# # default_sdid = "default@32473" +# +# ## List of explicit prefixes to extract from tag/field keys and use as the +# ## SDID, if they match (see above example for more details): +# # sdids = ["foo@123", "bar@456"] +# +# ## Default severity value. Severity and Facility are used to calculate the +# ## message PRI value (RFC5424#section-6.2.1). Used when no metric field +# ## with key "severity_code" is defined. If unset, 5 (notice) is the default +# # default_severity_code = 5 +# +# ## Default facility value. Facility and Severity are used to calculate the +# ## message PRI value (RFC5424#section-6.2.1). Used when no metric field with +# ## key "facility_code" is defined. If unset, 1 (user-level) is the default +# # default_facility_code = 1 +# +# ## Default APP-NAME value (RFC5424#section-6.2.5) +# ## Used when no metric tag with key "appname" is defined. +# ## If unset, "Telegraf" is the default +# # default_appname = "Telegraf" + + +# # Configuration for sending metrics to Amazon Timestream. +# [[outputs.timestream]] +# ## Amazon Region +# region = "us-east-1" +# +# ## Amazon Credentials +# ## Credentials are loaded in the following order: +# ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified +# ## 2) Assumed credentials via STS if role_arn is specified +# ## 3) explicit credentials from 'access_key' and 'secret_key' +# ## 4) shared profile from 'profile' +# ## 5) environment variables +# ## 6) shared credentials file +# ## 7) EC2 Instance Profile +# #access_key = "" +# #secret_key = "" +# #token = "" +# #role_arn = "" +# #web_identity_token_file = "" +# #role_session_name = "" +# #profile = "" +# #shared_credential_file = "" +# +# ## Endpoint to make request against, the correct endpoint is automatically +# ## determined and this option should only be set if you wish to override the +# ## default. +# ## ex: endpoint_url = "http://localhost:8000" +# # endpoint_url = "" +# +# ## Timestream database where the metrics will be inserted. +# ## The database must exist prior to starting Telegraf. +# database_name = "yourDatabaseNameHere" +# +# ## Specifies if the plugin should describe the Timestream database upon starting +# ## to validate if it has access necessary permissions, connection, etc., as a safety check. +# ## If the describe operation fails, the plugin will not start +# ## and therefore the Telegraf agent will not start. +# describe_database_on_start = false +# +# ## Specifies how the data is organized in Timestream. +# ## Valid values are: single-table, multi-table. +# ## When mapping_mode is set to single-table, all of the data is stored in a single table. +# ## When mapping_mode is set to multi-table, the data is organized and stored in multiple tables. +# ## The default is multi-table. +# mapping_mode = "multi-table" +# +# ## Specifies if the plugin should create the table, if the table does not exist. +# create_table_if_not_exists = true +# +# ## Specifies the Timestream table magnetic store retention period in days. +# ## Check Timestream documentation for more details. +# ## NOTE: This property is valid when create_table_if_not_exists = true. +# create_table_magnetic_store_retention_period_in_days = 365 +# +# ## Specifies the Timestream table memory store retention period in hours. +# ## Check Timestream documentation for more details. +# ## NOTE: This property is valid when create_table_if_not_exists = true. +# create_table_memory_store_retention_period_in_hours = 24 +# +# ## Specifies how the data is written into Timestream. +# ## Valid values are: true, false +# ## When use_multi_measure_records is set to true, all of the tags and fields are stored +# ## as a single row in a Timestream table. +# ## When use_multi_measure_record is set to false, Timestream stores each field in a +# ## separate table row, thereby storing the tags multiple times (once for each field). +# ## The recommended setting is true. +# ## The default is false. +# use_multi_measure_records = "false" +# +# ## Specifies the measure_name to use when sending multi-measure records. +# ## NOTE: This property is valid when use_multi_measure_records=true and mapping_mode=multi-table +# measure_name_for_multi_measure_records = "telegraf_measure" +# +# ## Specifies the name of the table to write data into +# ## NOTE: This property is valid when mapping_mode=single-table. +# # single_table_name = "" +# +# ## Specifies the name of dimension when all of the data is being stored in a single table +# ## and the measurement name is transformed into the dimension value +# ## (see Mapping data from Influx to Timestream for details) +# ## NOTE: This property is valid when mapping_mode=single-table. +# # single_table_dimension_name_for_telegraf_measurement_name = "namespace" +# +# ## Only valid and optional if create_table_if_not_exists = true +# ## Specifies the Timestream table tags. +# ## Check Timestream documentation for more details +# # create_table_tags = { "foo" = "bar", "environment" = "dev"} +# +# ## Specify the maximum number of parallel go routines to ingest/write data +# ## If not specified, defaulted to 1 go routines +# max_write_go_routines = 25 +# +# ## Please see README.md to know how line protocol data is mapped to Timestream +# ## + + +# # Write metrics to Warp 10 +# [[outputs.warp10]] +# # Prefix to add to the measurement. +# prefix = "telegraf." +# +# # URL of the Warp 10 server +# warp_url = "http://localhost:8080" +# +# # Write token to access your app on warp 10 +# token = "Token" +# +# # Warp 10 query timeout +# # timeout = "15s" +# +# ## Print Warp 10 error body +# # print_error_body = false +# +# ## Max string error size +# # max_string_error_size = 511 +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# [[outputs.wavefront]] +# ## Url for Wavefront API or Wavefront proxy instance. +# ## Direct Ingestion via Wavefront API requires authentication. See below. +# url = "https://metrics.wavefront.com" +# +# ## Maximum number of metrics to send per HTTP request. This value should be higher than the `metric_batch_size`. Default is 10,000. Values higher than 40,000 are not recommended. +# # http_maximum_batch_size = 10000 +# +# ## prefix for metrics keys +# # prefix = "my.specific.prefix." +# +# ## whether to use "value" for name of simple fields. default is false +# # simple_fields = false +# +# ## character to use between metric and field name. default is . (dot) +# # metric_separator = "." +# +# ## Convert metric name paths to use metricSeparator character +# ## When true will convert all _ (underscore) characters in final metric name. default is true +# # convert_paths = true +# +# ## Use Strict rules to sanitize metric and tag names from invalid characters +# ## When enabled forward slash (/) and comma (,) will be accepted +# # use_strict = false +# +# ## Use Regex to sanitize metric and tag names from invalid characters +# ## Regex is more thorough, but significantly slower. default is false +# # use_regex = false +# +# ## point tags to use as the source name for Wavefront (if none found, host will be used) +# # source_override = ["hostname", "address", "agent_host", "node_host"] +# +# ## whether to convert boolean values to numeric values, with false -> 0.0 and true -> 1.0. default is true +# # convert_bool = true +# +# ## Truncate metric tags to a total of 254 characters for the tag name value. Wavefront will reject any +# ## data point exceeding this limit if not truncated. Defaults to 'false' to provide backwards compatibility. +# # truncate_tags = false +# +# ## Flush the internal buffers after each batch. This effectively bypasses the background sending of metrics +# ## normally done by the Wavefront SDK. This can be used if you are experiencing buffer overruns. The sending +# ## of metrics will block for a longer time, but this will be handled gracefully by the internal buffering in +# ## Telegraf. +# # immediate_flush = true +# +# ## Send internal metrics (starting with `~sdk.go`) for valid, invalid, and dropped metrics. default is true. +# # send_internal_metrics = true +# +# ## Optional TLS Config +# ## Set to true/false to enforce TLS being enabled/disabled. If not set, +# ## enable TLS only if any of the other options are specified. +# # tls_enable = +# ## Trusted root certificates for server +# # tls_ca = "/path/to/cafile" +# ## Used for TLS client certificate authentication +# # tls_cert = "/path/to/certfile" +# ## Used for TLS client certificate authentication +# # tls_key = "/path/to/keyfile" +# ## Send the specified TLS server name via SNI +# # tls_server_name = "kubernetes.example.com" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## HTTP Timeout +# # timeout="10s" +# +# ## Authentication for Direct Ingestion. +# ## Direct Ingestion requires one of: `token`,`auth_csp_api_token`, or `auth_csp_client_credentials` +# ## See https://docs.wavefront.com/csp_getting_started.html to learn more about using CSP credentials with Wavefront. +# ## Not required if using a Wavefront proxy. +# +# ## Wavefront API Token Authentication. Ignored if using a Wavefront proxy. +# ## 1. Click the gear icon at the top right in the Wavefront UI. +# ## 2. Click your account name (usually your email) +# ## 3. Click *API access*. +# # token = "YOUR_TOKEN" +# +# ## Optional. defaults to "https://console.cloud.vmware.com/" +# ## Ignored if using a Wavefront proxy or a Wavefront API token. +# # auth_csp_base_url=https://console.cloud.vmware.com +# +# ## CSP API Token Authentication for Wavefront. Ignored if using a Wavefront proxy. +# # auth_csp_api_token=CSP_API_TOKEN_HERE +# +# ## CSP Client Credentials Authentication Information for Wavefront. Ignored if using a Wavefront proxy. +# ## See also: https://docs.wavefront.com/csp_getting_started.html#whats-a-server-to-server-app +# # [outputs.wavefront.auth_csp_client_credentials] +# # app_id=CSP_APP_ID_HERE +# # app_secret=CSP_APP_SECRET_HERE +# # org_id=CSP_ORG_ID_HERE + + +# # A plugin that can transmit metrics over WebSocket. +# [[outputs.websocket]] +# ## URL is the address to send metrics to. Make sure ws or wss scheme is used. +# url = "ws://127.0.0.1:3000/telegraf" +# +# ## Timeouts (make sure read_timeout is larger than server ping interval or set to zero). +# # connect_timeout = "30s" +# # write_timeout = "30s" +# # read_timeout = "30s" +# +# ## Optionally turn on using text data frames (binary by default). +# # use_text_frames = false +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Optional SOCKS5 proxy to use +# # socks5_enabled = true +# # socks5_address = "127.0.0.1:1080" +# # socks5_username = "alice" +# # socks5_password = "pass123" +# +# ## Optional HTTP proxy to use +# # use_system_proxy = false +# # http_proxy_url = "http://localhost:8888" +# +# ## Data format to output. +# ## Each data format has it's own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# # data_format = "influx" +# +# ## NOTE: Due to the way TOML is parsed, tables must be at the END of the +# ## plugin definition, otherwise additional config options are read as part of +# ## the table +# +# ## Additional HTTP Upgrade headers +# # [outputs.websocket.headers] +# # Authorization = "Bearer " + + +# # Send aggregated metrics to Yandex.Cloud Monitoring +# [[outputs.yandex_cloud_monitoring]] +# ## Timeout for HTTP writes. +# # timeout = "20s" +# +# ## Yandex.Cloud monitoring API endpoint. Normally should not be changed +# # endpoint_url = "https://monitoring.api.cloud.yandex.net/monitoring/v2/data/write" +# +# ## All user metrics should be sent with "custom" service specified. Normally should not be changed +# # service = "custom" + + +############################################################################### +# PROCESSOR PLUGINS # +############################################################################### + + +# # Attach AWS EC2 metadata to metrics +# [[processors.aws_ec2]] +# ## Instance identity document tags to attach to metrics. +# ## For more information see: +# ## https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-identity-documents.html +# ## +# ## Available tags: +# ## * accountId +# ## * architecture +# ## * availabilityZone +# ## * billingProducts +# ## * imageId +# ## * instanceId +# ## * instanceType +# ## * kernelId +# ## * pendingTime +# ## * privateIp +# ## * ramdiskId +# ## * region +# ## * version +# imds_tags = [] +# +# ## EC2 instance tags retrieved with DescribeTags action. +# ## In case tag is empty upon retrieval it's omitted when tagging metrics. +# ## Note that in order for this to work, role attached to EC2 instance or AWS +# ## credentials available from the environment must have a policy attached, that +# ## allows ec2:DescribeTags. +# ## +# ## For more information see: +# ## https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeTags.html +# ec2_tags = [] +# +# ## Timeout for http requests made by against aws ec2 metadata endpoint. +# timeout = "10s" +# +# ## ordered controls whether or not the metrics need to stay in the same order +# ## this plugin received them in. If false, this plugin will change the order +# ## with requests hitting cached results moving through immediately and not +# ## waiting on slower lookups. This may cause issues for you if you are +# ## depending on the order of metrics staying the same. If so, set this to true. +# ## Keeping the metrics ordered may be slightly slower. +# ordered = false +# +# ## max_parallel_calls is the maximum number of AWS API calls to be in flight +# ## at the same time. +# ## It's probably best to keep this number fairly low. +# max_parallel_calls = 10 +# +# ## cache_ttl determines how long each cached item will remain in the cache before +# ## it is removed and subsequently needs to be queried for from the AWS API. By +# ## default, no items are cached. +# # cache_ttl = "0s" +# +# ## tag_cache_size determines how many of the values which are found in imds_tags +# ## or ec2_tags will be kept in memory for faster lookup on successive processing +# ## of metrics. You may want to adjust this if you have excessively large numbers +# ## of tags on your EC2 instances, and you are using the ec2_tags field. This +# ## typically does not need to be changed when using the imds_tags field. +# # tag_cache_size = 1000 +# +# ## log_cache_stats will emit a log line periodically to stdout with details of +# ## cache entries, hits, misses, and evacuations since the last time stats were +# ## emitted. This can be helpful in determining whether caching is being effective +# ## in your environment. Stats are emitted every 30 seconds. By default, this +# ## setting is disabled. + + +# # Apply metric modifications using override semantics. +# [[processors.clone]] +# ## All modifications on inputs and aggregators can be overridden: +# # name_override = "new_name" +# # name_prefix = "new_name_prefix" +# # name_suffix = "new_name_suffix" +# +# ## Tags to be added (all values must be strings) +# # [processors.clone.tags] +# # additional_tag = "tag_value" + + +# # Convert values to another metric value type +# [[processors.converter]] +# ## Tags to convert +# ## +# ## The table key determines the target type, and the array of key-values +# ## select the keys to convert. The array may contain globs. +# ## = [...] +# [processors.converter.tags] +# measurement = [] +# string = [] +# integer = [] +# unsigned = [] +# boolean = [] +# float = [] +# +# ## Optional tag to use as metric timestamp +# # timestamp = [] +# +# ## Format of the timestamp determined by the tag above. This can be any of +# ## "unix", "unix_ms", "unix_us", "unix_ns", or a valid Golang time format. +# ## It is required, when using the timestamp option. +# # timestamp_format = "" +# +# ## Fields to convert +# ## +# ## The table key determines the target type, and the array of key-values +# ## select the keys to convert. The array may contain globs. +# ## = [...] +# [processors.converter.fields] +# measurement = [] +# tag = [] +# string = [] +# integer = [] +# unsigned = [] +# boolean = [] +# float = [] +# +# ## Optional field to use as metric timestamp +# # timestamp = [] +# +# ## Format of the timestamp determined by the field above. This can be any +# ## of "unix", "unix_ms", "unix_us", "unix_ns", or a valid Golang time +# ## format. It is required, when using the timestamp option. +# # timestamp_format = "" + + +# # Dates measurements, tags, and fields that pass through this filter. +# [[processors.date]] +# ## New tag to create +# tag_key = "month" +# +# ## New field to create (cannot set both field_key and tag_key) +# # field_key = "month" +# +# ## Date format string, must be a representation of the Go "reference time" +# ## which is "Mon Jan 2 15:04:05 -0700 MST 2006". +# date_format = "Jan" +# +# ## If destination is a field, date format can also be one of +# ## "unix", "unix_ms", "unix_us", or "unix_ns", which will insert an integer field. +# # date_format = "unix" +# +# ## Offset duration added to the date string when writing the new tag. +# # date_offset = "0s" +# +# ## Timezone to use when creating the tag or field using a reference time +# ## string. This can be set to one of "UTC", "Local", or to a location name +# ## in the IANA Time Zone database. +# ## example: timezone = "America/Los_Angeles" +# # timezone = "UTC" + + +# # Filter metrics with repeating field values +# [[processors.dedup]] +# ## Maximum time to suppress output +# dedup_interval = "600s" + + +# ## Set default fields on your metric(s) when they are nil or empty +# [[processors.defaults]] +# ## Ensures a set of fields always exists on your metric(s) with their +# ## respective default value. +# ## For any given field pair (key = default), if it's not set, a field +# ## is set on the metric with the specified default. +# ## +# ## A field is considered not set if it is nil on the incoming metric; +# ## or it is not nil but its value is an empty string or is a string +# ## of one or more spaces. +# ## = +# [processors.defaults.fields] +# field_1 = "bar" +# time_idle = 0 +# is_error = true + + +# # Map enum values according to given table. +# [[processors.enum]] +# [[processors.enum.mapping]] +# ## Name of the field to map. Globs accepted. +# field = "status" +# +# ## Name of the tag to map. Globs accepted. +# # tag = "status" +# +# ## Destination tag or field to be used for the mapped value. By default the +# ## source tag or field is used, overwriting the original value. +# dest = "status_code" +# +# ## Default value to be used for all values not contained in the mapping +# ## table. When unset and no match is found, the original field will remain +# ## unmodified and the destination tag or field will not be created. +# # default = 0 +# +# ## Table of mappings +# [processors.enum.mapping.value_mappings] +# green = 1 +# amber = 2 +# red = 3 + + +# # Run executable as long-running processor plugin +# [[processors.execd]] +# ## One program to run as daemon. +# ## NOTE: process and each argument should each be their own string +# ## eg: command = ["/path/to/your_program", "arg1", "arg2"] +# command = ["cat"] +# +# ## Environment variables +# ## Array of "key=value" pairs to pass as environment variables +# ## e.g. "KEY=value", "USERNAME=John Doe", +# ## "LD_LIBRARY_PATH=/opt/custom/lib64:/usr/local/libs" +# # environment = [] +# +# ## Delay before the process is restarted after an unexpected termination +# # restart_delay = "10s" +# +# ## Serialization format for communicating with the executed program +# ## Please note that the corresponding data-format must exist both in +# ## parsers and serializers +# # data_format = "influx" + + +# # Performs file path manipulations on tags and fields +# [[processors.filepath]] +# ## Treat the tag value as a path and convert it to its last element, storing the result in a new tag +# # [[processors.filepath.basename]] +# # tag = "path" +# # dest = "basepath" +# +# ## Treat the field value as a path and keep all but the last element of path, typically the path's directory +# # [[processors.filepath.dirname]] +# # field = "path" +# +# ## Treat the tag value as a path, converting it to its the last element without its suffix +# # [[processors.filepath.stem]] +# # tag = "path" +# +# ## Treat the tag value as a path, converting it to the shortest path name equivalent +# ## to path by purely lexical processing +# # [[processors.filepath.clean]] +# # tag = "path" +# +# ## Treat the tag value as a path, converting it to a relative path that is lexically +# ## equivalent to the source path when joined to 'base_path' +# # [[processors.filepath.rel]] +# # tag = "path" +# # base_path = "/var/log" +# +# ## Treat the tag value as a path, replacing each separator character in path with a '/' character. Has only +# ## effect on Windows +# # [[processors.filepath.toslash]] +# # tag = "path" + + +# # Add a tag of the network interface name looked up over SNMP by interface number +# [[processors.ifname]] +# ## Name of tag holding the interface number +# # tag = "ifIndex" +# +# ## Name of output tag where service name will be added +# # dest = "ifName" +# +# ## Name of tag of the SNMP agent to request the interface name from +# # agent = "agent" +# +# ## Timeout for each request. +# # timeout = "5s" +# +# ## SNMP version; can be 1, 2, or 3. +# # version = 2 +# +# ## SNMP community string. +# # community = "public" +# +# ## Number of retries to attempt. +# # retries = 3 +# +# ## The GETBULK max-repetitions parameter. +# # max_repetitions = 10 +# +# ## SNMPv3 authentication and encryption options. +# ## +# ## Security Name. +# # sec_name = "myuser" +# ## Authentication protocol; one of "MD5", "SHA", or "". +# # auth_protocol = "MD5" +# ## Authentication password. +# # auth_password = "pass" +# ## Security Level; one of "noAuthNoPriv", "authNoPriv", or "authPriv". +# # sec_level = "authNoPriv" +# ## Context Name. +# # context_name = "" +# ## Privacy protocol used for encrypted messages; one of "DES", "AES" or "". +# # priv_protocol = "" +# ## Privacy password used for encrypted messages. +# # priv_password = "" +# +# ## max_parallel_lookups is the maximum number of SNMP requests to +# ## make at the same time. +# # max_parallel_lookups = 100 +# +# ## ordered controls whether or not the metrics need to stay in the +# ## same order this plugin received them in. If false, this plugin +# ## may change the order when data is cached. If you need metrics to +# ## stay in order set this to true. keeping the metrics ordered may +# ## be slightly slower +# # ordered = false +# +# ## cache_ttl is the amount of time interface names are cached for a +# ## given agent. After this period elapses if names are needed they +# ## will be retrieved again. +# # cache_ttl = "8h" + + +# # Lookup a key derived from metrics in a static file +# [[processors.lookup]] +# ## List of files containing the lookup-table +# files = ["path/to/lut.json", "path/to/another_lut.json"] +# +# ## Format of the lookup file(s) +# ## Available formats are: +# ## json -- JSON file with 'key: {tag-key: tag-value, ...}' mapping +# ## csv_key_name_value -- CSV file with 'key,tag-key,tag-value,...,tag-key,tag-value' mapping +# ## csv_key_values -- CSV file with a header containing tag-names and +# ## rows with 'key,tag-value,...,tag-value' mappings +# # format = "json" +# +# ## Template for generating the lookup-key from the metric. +# ## This is a Golang template (see https://pkg.go.dev/text/template) to +# ## access the metric name (`{{.Name}}`), a tag value (`{{.Tag "name"}}`) or +# ## a field value (`{{.Field "name"}}`). +# key = '{{.Tag "host"}}' + + +# # Adds noise to numerical fields +# [[processors.noise]] +# ## Specified the type of the random distribution. +# ## Can be "laplacian", "gaussian" or "uniform". +# # type = "laplacian +# +# ## Center of the distribution. +# ## Only used for Laplacian and Gaussian distributions. +# # mu = 0.0 +# +# ## Scale parameter for the Laplacian or Gaussian distribution +# # scale = 1.0 +# +# ## Upper and lower bound of the Uniform distribution +# # min = -1.0 +# # max = 1.0 +# +# ## Apply the noise only to numeric fields matching the filter criteria below. +# ## Excludes takes precedence over includes. +# # include_fields = [] +# # exclude_fields = [] + + +# # Apply metric modifications using override semantics. +# [[processors.override]] +# ## All modifications on inputs and aggregators can be overridden: +# # name_override = "new_name" +# # name_prefix = "new_name_prefix" +# # name_suffix = "new_name_suffix" +# +# ## Tags to be added (all values must be strings) +# # [processors.override.tags] +# # additional_tag = "tag_value" + + +# # Parse a value in a specified field(s)/tag(s) and add the result in a new metric +# [[processors.parser]] +# ## The name of the fields whose value will be parsed. +# parse_fields = ["message"] +# +# ## The name of the tags whose value will be parsed. +# # parse_tags = [] +# +# ## If true, incoming metrics are not emitted. +# # drop_original = false +# +# ## Merge Behavior +# ## Only has effect when drop_original is set to false. Possible options +# ## include: +# ## * override: emitted metrics are merged by overriding the original metric +# ## using the newly parsed metrics, but retains the original metric +# ## timestamp. +# ## * override-with-timestamp: the same as "override", but the timestamp is +# ## set based on the new metrics if present. +# # merge = "" +# +# ## The dataformat to be read from files +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Rotate a single valued metric into a multi field metric +# [[processors.pivot]] +# ## Tag to use for naming the new field. +# tag_key = "name" +# ## Field to use as the value of the new field. +# value_key = "value" + + +# # Given a tag/field of a TCP or UDP port number, add a tag/field of the service name looked up in the system services file +# [[processors.port_name]] +# ## Name of tag holding the port number +# # tag = "port" +# ## Or name of the field holding the port number +# # field = "port" +# +# ## Name of output tag or field (depending on the source) where service name will be added +# # dest = "service" +# +# ## Default tcp or udp +# # default_protocol = "tcp" +# +# ## Tag containing the protocol (tcp or udp, case-insensitive) +# # protocol_tag = "proto" +# +# ## Field containing the protocol (tcp or udp, case-insensitive) +# # protocol_field = "proto" + + +# # Print all metrics that pass through this filter. +# [[processors.printer]] + + +# # Transforms tag and field values as well as measurement, tag and field names with regex pattern +# [[processors.regex]] +# namepass = ["nginx_requests"] +# +# # Tag and field conversions defined in a separate sub-tables +# [[processors.regex.tags]] +# ## Tag to change, "*" will change every tag +# key = "resp_code" +# ## Regular expression to match on a tag value +# pattern = "^(\\d)\\d\\d$" +# ## Matches of the pattern will be replaced with this string. Use ${1} +# ## notation to use the text of the first submatch. +# replacement = "${1}xx" +# +# [[processors.regex.fields]] +# ## Field to change +# key = "request" +# ## All the power of the Go regular expressions available here +# ## For example, named subgroups +# pattern = "^/api(?P/[\\w/]+)\\S*" +# replacement = "${method}" +# ## If result_key is present, a new field will be created +# ## instead of changing existing field +# result_key = "method" +# +# # Multiple conversions may be applied for one field sequentially +# # Let's extract one more value +# [[processors.regex.fields]] +# key = "request" +# pattern = ".*category=(\\w+).*" +# replacement = "${1}" +# result_key = "search_category" +# +# # Rename metric fields +# [[processors.regex.field_rename]] +# ## Regular expression to match on a field name +# pattern = "^search_(\\w+)d$" +# ## Matches of the pattern will be replaced with this string. Use ${1} +# ## notation to use the text of the first submatch. +# replacement = "${1}" +# ## If the new field name already exists, you can either "overwrite" the +# ## existing one with the value of the renamed field OR you can "keep" +# ## both the existing and source field. +# # result_key = "keep" +# +# # Rename metric tags +# # [[processors.regex.tag_rename]] +# # ## Regular expression to match on a tag name +# # pattern = "^search_(\\w+)d$" +# # ## Matches of the pattern will be replaced with this string. Use ${1} +# # ## notation to use the text of the first submatch. +# # replacement = "${1}" +# # ## If the new tag name already exists, you can either "overwrite" the +# # ## existing one with the value of the renamed tag OR you can "keep" +# # ## both the existing and source tag. +# # # result_key = "keep" +# +# # Rename metrics +# # [[processors.regex.metric_rename]] +# # ## Regular expression to match on an metric name +# # pattern = "^search_(\\w+)d$" +# # ## Matches of the pattern will be replaced with this string. Use ${1} +# # ## notation to use the text of the first submatch. +# # replacement = "${1}" + + +# # Rename measurements, tags, and fields that pass through this filter. +# [[processors.rename]] +# ## Specify one sub-table per rename operation. +# [[processors.rename.replace]] +# measurement = "network_interface_throughput" +# dest = "throughput" +# +# [[processors.rename.replace]] +# tag = "hostname" +# dest = "host" +# +# [[processors.rename.replace]] +# field = "lower" +# dest = "min" +# +# [[processors.rename.replace]] +# field = "upper" +# dest = "max" + + +# # ReverseDNS does a reverse lookup on IP addresses to retrieve the DNS name +# [[processors.reverse_dns]] +# ## For optimal performance, you may want to limit which metrics are passed to this +# ## processor. eg: +# ## namepass = ["my_metric_*"] +# +# ## cache_ttl is how long the dns entries should stay cached for. +# ## generally longer is better, but if you expect a large number of diverse lookups +# ## you'll want to consider memory use. +# cache_ttl = "24h" +# +# ## lookup_timeout is how long should you wait for a single dns request to repsond. +# ## this is also the maximum acceptable latency for a metric travelling through +# ## the reverse_dns processor. After lookup_timeout is exceeded, a metric will +# ## be passed on unaltered. +# ## multiple simultaneous resolution requests for the same IP will only make a +# ## single rDNS request, and they will all wait for the answer for this long. +# lookup_timeout = "3s" +# +# ## max_parallel_lookups is the maximum number of dns requests to be in flight +# ## at the same time. Requesting hitting cached values do not count against this +# ## total, and neither do mulptiple requests for the same IP. +# ## It's probably best to keep this number fairly low. +# max_parallel_lookups = 10 +# +# ## ordered controls whether or not the metrics need to stay in the same order +# ## this plugin received them in. If false, this plugin will change the order +# ## with requests hitting cached results moving through immediately and not +# ## waiting on slower lookups. This may cause issues for you if you are +# ## depending on the order of metrics staying the same. If so, set this to true. +# ## keeping the metrics ordered may be slightly slower. +# ordered = false +# +# [[processors.reverse_dns.lookup]] +# ## get the ip from the field "source_ip", and put the result in the field "source_name" +# field = "source_ip" +# dest = "source_name" +# +# [[processors.reverse_dns.lookup]] +# ## get the ip from the tag "destination_ip", and put the result in the tag +# ## "destination_name". +# tag = "destination_ip" +# dest = "destination_name" +# +# ## If you would prefer destination_name to be a field instead, you can use a +# ## processors.converter after this one, specifying the order attribute. + + +# # Add the S2 Cell ID as a tag based on latitude and longitude fields +# [[processors.s2geo]] +# ## The name of the lat and lon fields containing WGS-84 latitude and +# ## longitude in decimal degrees. +# # lat_field = "lat" +# # lon_field = "lon" +# +# ## New tag to create +# # tag_key = "s2_cell_id" +# +# ## Cell level (see https://s2geometry.io/resources/s2cell_statistics.html) +# # cell_level = 9 + + +# # Scale values with a predefined range to a different output range. +# [[processors.scale]] +# ## It is possible to define multiple different scaling that can be applied +# ## do different sets of fields. Each scaling expects the following +# ## arguments: +# ## - input_minimum: Minimum expected input value +# ## - input_maximum: Maximum expected input value +# ## - output_minimum: Minimum desired output value +# ## - output_maximum: Maximum desired output value +# ## alternatively you can specify a scaling with factor and offset +# ## - factor: factor to scale the input value with +# ## - offset: additive offset for value after scaling +# ## - fields: a list of field names (or filters) to apply this scaling to +# +# ## Example: Scaling with minimum and maximum values +# # [processors.scale.scaling] +# # input_minimum = 0 +# # input_maximum = 1 +# # output_minimum = 0 +# # output_maximum = 100 +# # fields = ["temperature1", "temperature2"] +# +# ## Example: Scaling with factor and offset +# # [processors.scale.scaling] +# # factor = 10.0 +# # offset = -5.0 +# # fields = ["voltage*"] + + +# # Split a metric into one or more metrics with the specified field(s)/tag(s) +# [[processors.split]] +# ## Keeps the original metric by default +# # drop_original = false +# +# ## Template for an output metric +# ## Users can define multiple templates to split the original metric into +# ## multiple, potentially overlapping, metrics. +# [[processors.split.template]] +# ## New metric name +# name = "" +# +# ## List of tag keys for this metric template, accepts globs, e.g. "*" +# tags = [] +# +# ## List of field keys for this metric template, accepts globs, e.g. "*" +# fields = [] + + +# # Process metrics using a Starlark script +# [[processors.starlark]] +# ## The Starlark source can be set as a string in this configuration file, or +# ## by referencing a file containing the script. Only one source or script +# ## should be set at once. +# +# ## Source of the Starlark script. +# source = ''' +# def apply(metric): +# return metric +# ''' +# +# ## File containing a Starlark script. +# # script = "/usr/local/bin/myscript.star" +# +# ## The constants of the Starlark script. +# # [processors.starlark.constants] +# # max_size = 10 +# # threshold = 0.75 +# # default_name = "Julia" +# # debug_mode = true + + +# # Perform string processing on tags, fields, and measurements +# [[processors.strings]] +# ## Convert a field value to lowercase and store in a new field +# # [[processors.strings.lowercase]] +# # field = "uri_stem" +# # dest = "uri_stem_normalised" +# +# ## Convert a tag value to uppercase +# # [[processors.strings.uppercase]] +# # tag = "method" +# +# ## Convert a field value to titlecase +# # [[processors.strings.titlecase]] +# # field = "status" +# +# ## Trim leading and trailing whitespace using the default cutset +# # [[processors.strings.trim]] +# # field = "message" +# +# ## Trim leading characters in cutset +# # [[processors.strings.trim_left]] +# # field = "message" +# # cutset = "\t" +# +# ## Trim trailing characters in cutset +# # [[processors.strings.trim_right]] +# # field = "message" +# # cutset = "\r\n" +# +# ## Trim the given prefix from the field +# # [[processors.strings.trim_prefix]] +# # field = "my_value" +# # prefix = "my_" +# +# ## Trim the given suffix from the field +# # [[processors.strings.trim_suffix]] +# # field = "read_count" +# # suffix = "_count" +# +# ## Replace all non-overlapping instances of old with new +# # [[processors.strings.replace]] +# # measurement = "*" +# # old = ":" +# # new = "_" +# +# ## Trims strings based on width +# # [[processors.strings.left]] +# # field = "message" +# # width = 10 +# +# ## Decode a base64 encoded utf-8 string +# # [[processors.strings.base64decode]] +# # field = "message" +# +# ## Sanitize a string to ensure it is a valid utf-8 string +# ## Each run of invalid UTF-8 byte sequences is replaced by the replacement string, which may be empty +# # [[processors.strings.valid_utf8]] +# # field = "message" +# # replacement = "" + + +# # Restricts the number of tags that can pass through this filter and chooses which tags to preserve when over the limit. +# [[processors.tag_limit]] +# ## Maximum number of tags to preserve +# limit = 3 +# +# ## List of tags to preferentially preserve +# keep = ["environment", "region"] + + +# # Uses a Go template to create a new tag +# [[processors.template]] +# ## Go template used to create the tag name of the output. In order to +# ## ease TOML escaping requirements, you should use single quotes around +# ## the template string. +# tag = "topic" +# +# ## Go template used to create the tag value of the output. In order to +# ## ease TOML escaping requirements, you should use single quotes around +# ## the template string. +# template = '{{ .Tag "hostname" }}.{{ .Tag "level" }}' + + +# # Print all metrics that pass through this filter. +# [[processors.topk]] +# ## How many seconds between aggregations +# # period = 10 +# +# ## How many top buckets to return per field +# ## Every field specified to aggregate over will return k number of results. +# ## For example, 1 field with k of 10 will return 10 buckets. While 2 fields +# ## with k of 3 will return 6 buckets. +# # k = 10 +# +# ## Over which tags should the aggregation be done. Globs can be specified, in +# ## which case any tag matching the glob will aggregated over. If set to an +# ## empty list is no aggregation over tags is done +# # group_by = ['*'] +# +# ## The field(s) to aggregate +# ## Each field defined is used to create an independent aggregation. Each +# ## aggregation will return k buckets. If a metric does not have a defined +# ## field the metric will be dropped from the aggregation. Considering using +# ## the defaults processor plugin to ensure fields are set if required. +# # fields = ["value"] +# +# ## What aggregation function to use. Options: sum, mean, min, max +# # aggregation = "mean" +# +# ## Instead of the top k largest metrics, return the bottom k lowest metrics +# # bottomk = false +# +# ## The plugin assigns each metric a GroupBy tag generated from its name and +# ## tags. If this setting is different than "" the plugin will add a +# ## tag (which name will be the value of this setting) to each metric with +# ## the value of the calculated GroupBy tag. Useful for debugging +# # add_groupby_tag = "" +# +# ## These settings provide a way to know the position of each metric in +# ## the top k. The 'add_rank_field' setting allows to specify for which +# ## fields the position is required. If the list is non empty, then a field +# ## will be added to each and every metric for each string present in this +# ## setting. This field will contain the ranking of the group that +# ## the metric belonged to when aggregated over that field. +# ## The name of the field will be set to the name of the aggregation field, +# ## suffixed with the string '_topk_rank' +# # add_rank_fields = [] +# +# ## These settings provide a way to know what values the plugin is generating +# ## when aggregating metrics. The 'add_aggregate_field' setting allows to +# ## specify for which fields the final aggregation value is required. If the +# ## list is non empty, then a field will be added to each every metric for +# ## each field present in this setting. This field will contain +# ## the computed aggregation for the group that the metric belonged to when +# ## aggregated over that field. +# ## The name of the field will be set to the name of the aggregation field, +# ## suffixed with the string '_topk_aggregate' +# # add_aggregate_fields = [] + + +# # Rotate multi field metric into several single field metrics +# [[processors.unpivot]] +# ## Metric mode to pivot to +# ## Set to "tag", metrics are pivoted as a tag and the metric is kept as +# ## the original measurement name. Tag key name is set by tag_key value. +# ## Set to "metric" creates a new metric named the field name. With this +# ## option the tag_key is ignored. Be aware that this could lead to metric +# ## name conflicts! +# # use_fieldname_as = "tag" +# +# ## Tag to use for the name. +# # tag_key = "name" +# +# ## Field to use for the name of the value. +# # value_key = "value" + + +############################################################################### +# AGGREGATOR PLUGINS # +############################################################################### + + +# # Keep the aggregate basicstats of each metric passing through. +# [[aggregators.basicstats]] +# ## The period on which to flush & clear the aggregator. +# period = "30s" +# +# ## If true, the original metric will be dropped by the +# ## aggregator and will not get sent to the output plugins. +# drop_original = false +# +# ## Configures which basic stats to push as fields +# # stats = ["count","diff","rate","min","max","mean","non_negative_diff","non_negative_rate","percent_change","stdev","s2","sum","interval"] + + +# # Calculates a derivative for every field. +# [[aggregators.derivative]] +# ## The period in which to flush the aggregator. +# period = "30s" +# ## +# ## Suffix to append for the resulting derivative field. +# # suffix = "_rate" +# ## +# ## Field to use for the quotient when computing the derivative. +# ## When using a field as the derivation parameter the name of that field will +# ## be used for the resulting derivative, e.g. *fieldname_by_parameter*. +# ## By default the timestamps of the metrics are used and the suffix is omitted. +# # variable = "" +# ## +# ## Maximum number of roll-overs in case only one measurement is found during a period. +# # max_roll_over = 10 + + +# # Report the final metric of a series +# [[aggregators.final]] +# ## The period on which to flush & clear the aggregator. +# period = "30s" +# ## If true, the original metric will be dropped by the +# ## aggregator and will not get sent to the output plugins. +# drop_original = false +# +# ## The time that a series is not updated until considering it final. +# series_timeout = "5m" + + +# # Configuration for aggregate histogram metrics +# [[aggregators.histogram]] +# ## The period in which to flush the aggregator. +# period = "30s" +# +# ## If true, the original metric will be dropped by the +# ## aggregator and will not get sent to the output plugins. +# drop_original = false +# +# ## If true, the histogram will be reset on flush instead +# ## of accumulating the results. +# reset = false +# +# ## Whether bucket values should be accumulated. If set to false, "gt" tag will be added. +# ## Defaults to true. +# cumulative = true +# +# ## Expiration interval for each histogram. The histogram will be expired if +# ## there are no changes in any buckets for this time interval. 0 == no expiration. +# # expiration_interval = "0m" +# +# ## If true, aggregated histogram are pushed to output only if it was updated since +# ## previous push. Defaults to false. +# # push_only_on_update = false +# +# ## Example config that aggregates all fields of the metric. +# # [[aggregators.histogram.config]] +# # ## Right borders of buckets (with +Inf implicitly added). +# # buckets = [0.0, 15.6, 34.5, 49.1, 71.5, 80.5, 94.5, 100.0] +# # ## The name of metric. +# # measurement_name = "cpu" +# +# ## Example config that aggregates only specific fields of the metric. +# # [[aggregators.histogram.config]] +# # ## Right borders of buckets (with +Inf implicitly added). +# # buckets = [0.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0] +# # ## The name of metric. +# # measurement_name = "diskio" +# # ## The concrete fields of metric +# # fields = ["io_time", "read_time", "write_time"] + + +# # Merge metrics into multifield metrics by series key +# [[aggregators.merge]] +# ## If true, the original metric will be dropped by the +# ## aggregator and will not get sent to the output plugins. +# drop_original = true + + +# # Keep the aggregate min/max of each metric passing through. +# [[aggregators.minmax]] +# ## General Aggregator Arguments: +# ## The period on which to flush & clear the aggregator. +# period = "30s" +# ## If true, the original metric will be dropped by the +# ## aggregator and will not get sent to the output plugins. +# drop_original = false + + +# # Keep the aggregate quantiles of each metric passing through. +# [[aggregators.quantile]] +# ## General Aggregator Arguments: +# ## The period on which to flush & clear the aggregator. +# period = "30s" +# +# ## If true, the original metric will be dropped by the +# ## aggregator and will not get sent to the output plugins. +# drop_original = false +# +# ## Quantiles to output in the range [0,1] +# # quantiles = [0.25, 0.5, 0.75] +# +# ## Type of aggregation algorithm +# ## Supported are: +# ## "t-digest" -- approximation using centroids, can cope with large number of samples +# ## "exact R7" -- exact computation also used by Excel or NumPy (Hyndman & Fan 1996 R7) +# ## "exact R8" -- exact computation (Hyndman & Fan 1996 R8) +# ## NOTE: Do not use "exact" algorithms with large number of samples +# ## to not impair performance or memory consumption! +# # algorithm = "t-digest" +# +# ## Compression for approximation (t-digest). The value needs to be +# ## greater or equal to 1.0. Smaller values will result in more +# ## performance but less accuracy. +# # compression = 100.0 + + +# # Aggregate metrics using a Starlark script +# [[aggregators.starlark]] +# ## The Starlark source can be set as a string in this configuration file, or +# ## by referencing a file containing the script. Only one source or script +# ## should be set at once. +# ## +# ## Source of the Starlark script. +# source = ''' +# state = {} +# +# def add(metric): +# state["last"] = metric +# +# def push(): +# return state.get("last") +# +# def reset(): +# state.clear() +# ''' +# +# ## File containing a Starlark script. +# # script = "/usr/local/bin/myscript.star" +# +# ## The constants of the Starlark script. +# # [aggregators.starlark.constants] +# # max_size = 10 +# # threshold = 0.75 +# # default_name = "Julia" +# # debug_mode = true + + +# # Count the occurrence of values in fields. +# [[aggregators.valuecounter]] +# ## General Aggregator Arguments: +# ## The period on which to flush & clear the aggregator. +# period = "30s" +# ## If true, the original metric will be dropped by the +# ## aggregator and will not get sent to the output plugins. +# drop_original = false +# ## The fields for which the values will be counted +# fields = ["status"] + + +############################################################################### +# INPUT PLUGINS # +############################################################################### + + +# Read metrics about cpu usage +[[inputs.cpu]] + ## Whether to report per-cpu stats or not + percpu = true + ## Whether to report total system cpu stats or not + totalcpu = true + ## If true, collect raw CPU time metrics + collect_cpu_time = false + ## If true, compute and report the sum of all non-idle CPU states + report_active = false + ## If true and the info is available then add core_id and physical_id tags + core_tags = false + + +# Read metrics about disk usage by mount point +[[inputs.disk]] + ## By default stats will be gathered for all mount points. + ## Set mount_points will restrict the stats to only the specified mount points. + # mount_points = ["/"] + + ## Ignore mount points by filesystem type. + ignore_fs = ["tmpfs", "devtmpfs", "devfs", "iso9660", "overlay", "aufs", "squashfs"] + + ## Ignore mount points by mount options. + ## The 'mount' command reports options of all mounts in parathesis. + ## Bind mounts can be ignored with the special 'bind' option. + # ignore_mount_opts = [] + + +# Read metrics about disk IO by device +[[inputs.diskio]] + ## By default, telegraf will gather stats for all devices including + ## disk partitions. + ## Setting devices will restrict the stats to the specified devices. + ## NOTE: Globbing expressions (e.g. asterix) are not supported for + ## disk synonyms like '/dev/disk/by-id'. + # devices = ["sda", "sdb", "vd*", "/dev/disk/by-id/nvme-eui.00123deadc0de123"] + ## Uncomment the following line if you need disk serial numbers. + # skip_serial_number = false + # + ## On systems which support it, device metadata can be added in the form of + ## tags. + ## Currently only Linux is supported via udev properties. You can view + ## available properties for a device by running: + ## 'udevadm info -q property -n /dev/sda' + ## Note: Most, but not all, udev properties can be accessed this way. Properties + ## that are currently inaccessible include DEVTYPE, DEVNAME, and DEVPATH. + # device_tags = ["ID_FS_TYPE", "ID_FS_USAGE"] + # + ## Using the same metadata source as device_tags, you can also customize the + ## name of the device via templates. + ## The 'name_templates' parameter is a list of templates to try and apply to + ## the device. The template may contain variables in the form of '$PROPERTY' or + ## '${PROPERTY}'. The first template which does not contain any variables not + ## present for the device is used as the device name tag. + ## The typical use case is for LVM volumes, to get the VG/LV name instead of + ## the near-meaningless DM-0 name. + # name_templates = ["$ID_FS_LABEL","$DM_VG_NAME/$DM_LV_NAME"] + + +# Plugin to collect various Linux kernel statistics. +# This plugin ONLY supports Linux +[[inputs.kernel]] + ## Additional gather options + ## Possible options include: + ## * ksm - kernel same-page merging + # collect = [] + + +# Read metrics about memory usage +[[inputs.mem]] + # no configuration + + +# Get the number of processes and group them by status +# This plugin ONLY supports non-Windows +[[inputs.processes]] + ## Use sudo to run ps command on *BSD systems. Linux systems will read + ## /proc, so this does not apply there. + # use_sudo = false + + +# Read metrics about swap memory usage +[[inputs.swap]] + # no configuration + + +# Read metrics about system load & uptime +[[inputs.system]] + # no configuration + + +# # Gather ActiveMQ metrics +# [[inputs.activemq]] +# ## ActiveMQ WebConsole URL +# url = "http://127.0.0.1:8161" +# +# ## Required ActiveMQ Endpoint +# ## deprecated in 1.11; use the url option +# # server = "192.168.50.10" +# # port = 8161 +# +# ## Credentials for basic HTTP authentication +# # username = "admin" +# # password = "admin" +# +# ## Required ActiveMQ webadmin root path +# # webadmin = "admin" +# +# ## Maximum time to receive response. +# # response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read stats from aerospike server(s) +# [[inputs.aerospike]] +# ## Aerospike servers to connect to (with port) +# ## This plugin will query all namespaces the aerospike +# ## server has configured and get stats for them. +# servers = ["localhost:3000"] +# +# # username = "telegraf" +# # password = "pa$$word" +# +# ## Optional TLS Config +# # enable_tls = false +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# # tls_name = "tlsname" +# ## If false, skip chain & host verification +# # insecure_skip_verify = true +# +# # Feature Options +# # Add namespace variable to limit the namespaces executed on +# # Leave blank to do all +# # disable_query_namespaces = true # default false +# # namespaces = ["namespace1", "namespace2"] +# +# # Enable set level telemetry +# # query_sets = true # default: false +# # Add namespace set combinations to limit sets executed on +# # Leave blank to do all sets +# # sets = ["namespace1/set1", "namespace1/set2", "namespace3"] +# +# # Histograms +# # enable_ttl_histogram = true # default: false +# # enable_object_size_linear_histogram = true # default: false +# +# # by default, aerospike produces a 100 bucket histogram +# # this is not great for most graphing tools, this will allow +# # the ability to squash this to a smaller number of buckets +# # To have a balanced histogram, the number of buckets chosen +# # should divide evenly into 100. +# # num_histogram_buckets = 100 # default: 10 + + +# # Query statistics from AMD Graphics cards using rocm-smi binary +# [[inputs.amd_rocm_smi]] +# ## Optional: path to rocm-smi binary, defaults to $PATH via exec.LookPath +# # bin_path = "/opt/rocm/bin/rocm-smi" +# +# ## Optional: timeout for GPU polling +# # timeout = "5s" + + +# # Read Apache status information (mod_status) +# [[inputs.apache]] +# ## An array of URLs to gather from, must be directed at the machine +# ## readable version of the mod_status page including the auto query string. +# ## Default is "http://localhost/server-status?auto". +# urls = ["http://localhost/server-status?auto"] +# +# ## Credentials for basic HTTP authentication. +# # username = "myuser" +# # password = "mypassword" +# +# ## Maximum time to receive response. +# # response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Monitor APC UPSes connected to apcupsd +# [[inputs.apcupsd]] +# # A list of running apcupsd server to connect to. +# # If not provided will default to tcp://127.0.0.1:3551 +# servers = ["tcp://127.0.0.1:3551"] +# +# ## Timeout for dialing server. +# timeout = "5s" + + +# # Gather metrics from Apache Aurora schedulers +# [[inputs.aurora]] +# ## Schedulers are the base addresses of your Aurora Schedulers +# schedulers = ["http://127.0.0.1:8081"] +# +# ## Set of role types to collect metrics from. +# ## +# ## The scheduler roles are checked each interval by contacting the +# ## scheduler nodes; zookeeper is not contacted. +# # roles = ["leader", "follower"] +# +# ## Timeout is the max time for total network operations. +# # timeout = "5s" +# +# ## Username and password are sent using HTTP Basic Auth. +# # username = "username" +# # password = "pa$$word" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Gather Azure resources metrics from Azure Monitor API +# [[inputs.azure_monitor]] +# # can be found under Overview->Essentials in the Azure portal for your application/service +# subscription_id = "<>" +# # can be obtained by registering an application under Azure Active Directory +# client_id = "<>" +# # can be obtained by registering an application under Azure Active Directory +# client_secret = "<>" +# # can be found under Azure Active Directory->Properties +# tenant_id = "<>" +# +# # resource target #1 to collect metrics from +# [[inputs.azure_monitor.resource_target]] +# # can be found undet Overview->Essentials->JSON View in the Azure portal for your application/service +# # must start with 'resourceGroups/...' ('/subscriptions/xxxxxxxx-xxxx-xxxx-xxx-xxxxxxxxxxxx' +# # must be removed from the beginning of Resource ID property value) +# resource_id = "<>" +# # the metric names to collect +# # leave the array empty to use all metrics available to this resource +# metrics = [ "<>", "<>" ] +# # metrics aggregation type value to collect +# # can be 'Total', 'Count', 'Average', 'Minimum', 'Maximum' +# # leave the array empty to collect all aggregation types values for each metric +# aggregations = [ "<>", "<>" ] +# +# # resource target #2 to collect metrics from +# [[inputs.azure_monitor.resource_target]] +# resource_id = "<>" +# metrics = [ "<>", "<>" ] +# aggregations = [ "<>", "<>" ] +# +# # resource group target #1 to collect metrics from resources under it with resource type +# [[inputs.azure_monitor.resource_group_target]] +# # the resource group name +# resource_group = "<>" +# +# # defines the resources to collect metrics from +# [[inputs.azure_monitor.resource_group_target.resource]] +# # the resource type +# resource_type = "<>" +# metrics = [ "<>", "<>" ] +# aggregations = [ "<>", "<>" ] +# +# # defines the resources to collect metrics from +# [[inputs.azure_monitor.resource_group_target.resource]] +# resource_type = "<>" +# metrics = [ "<>", "<>" ] +# aggregations = [ "<>", "<>" ] +# +# # resource group target #2 to collect metrics from resources under it with resource type +# [[inputs.azure_monitor.resource_group_target]] +# resource_group = "<>" +# +# [[inputs.azure_monitor.resource_group_target.resource]] +# resource_type = "<>" +# metrics = [ "<>", "<>" ] +# aggregations = [ "<>", "<>" ] +# +# # subscription target #1 to collect metrics from resources under it with resource type +# [[inputs.azure_monitor.subscription_target]] +# resource_type = "<>" +# metrics = [ "<>", "<>" ] +# aggregations = [ "<>", "<>" ] +# +# # subscription target #2 to collect metrics from resources under it with resource type +# [[inputs.azure_monitor.subscription_target]] +# resource_type = "<>" +# metrics = [ "<>", "<>" ] +# aggregations = [ "<>", "<>" ] + + +# # Gather Azure Storage Queue metrics +# [[inputs.azure_storage_queue]] +# ## Required Azure Storage Account name +# account_name = "mystorageaccount" +# +# ## Required Azure Storage Account access key +# account_key = "storageaccountaccesskey" +# +# ## Set to false to disable peeking age of oldest message (executes faster) +# # peek_oldest_message_age = true + + +# # Read metrics of bcache from stats_total and dirty_data +# # This plugin ONLY supports Linux +# [[inputs.bcache]] +# ## Bcache sets path +# ## If not specified, then default is: +# bcachePath = "/sys/fs/bcache" +# +# ## By default, Telegraf gather stats for all bcache devices +# ## Setting devices will restrict the stats to the specified +# ## bcache devices. +# bcacheDevs = ["bcache0"] + + +# # Collects Beanstalkd server and tubes stats +# [[inputs.beanstalkd]] +# ## Server to collect data from +# server = "localhost:11300" +# +# ## List of tubes to gather stats about. +# ## If no tubes specified then data gathered for each tube on server reported by list-tubes command +# tubes = ["notifications"] + + +# # Read metrics exposed by Beat +# [[inputs.beat]] +# ## An URL from which to read Beat-formatted JSON +# ## Default is "http://127.0.0.1:5066". +# url = "http://127.0.0.1:5066" +# +# ## Enable collection of the listed stats +# ## An empty list means collect all. Available options are currently +# ## "beat", "libbeat", "system" and "filebeat". +# # include = ["beat", "libbeat", "filebeat"] +# +# ## HTTP method +# # method = "GET" +# +# ## Optional HTTP headers +# # headers = {"X-Special-Header" = "Special-Value"} +# +# ## Override HTTP "Host" header +# # host_header = "logstash.example.com" +# +# ## Timeout for HTTP requests +# # timeout = "5s" +# +# ## Optional HTTP Basic Auth credentials +# # username = "username" +# # password = "pa$$word" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read BIND nameserver XML statistics +# [[inputs.bind]] +# ## An array of BIND XML statistics URI to gather stats. +# ## Default is "http://localhost:8053/xml/v3". +# # urls = ["http://localhost:8053/xml/v3"] +# # gather_memory_contexts = false +# # gather_views = false +# +# ## Timeout for http requests made by bind nameserver +# # timeout = "4s" + + +# # Collect bond interface status, slaves statuses and failures count +# [[inputs.bond]] +# ## Sets 'proc' directory path +# ## If not specified, then default is /proc +# # host_proc = "/proc" +# +# ## Sets 'sys' directory path +# ## If not specified, then default is /sys +# # host_sys = "/sys" +# +# ## By default, telegraf gather stats for all bond interfaces +# ## Setting interfaces will restrict the stats to the specified +# ## bond interfaces. +# # bond_interfaces = ["bond0"] +# +# ## Tries to collect additional bond details from /sys/class/net/{bond} +# ## currently only useful for LACP (mode 4) bonds +# # collect_sys_details = false + + +# # Collect Kafka topics and consumers status from Burrow HTTP API. +# [[inputs.burrow]] +# ## Burrow API endpoints in format "schema://host:port". +# ## Default is "http://localhost:8000". +# servers = ["http://localhost:8000"] +# +# ## Override Burrow API prefix. +# ## Useful when Burrow is behind reverse-proxy. +# # api_prefix = "/v3/kafka" +# +# ## Maximum time to receive response. +# # response_timeout = "5s" +# +# ## Limit per-server concurrent connections. +# ## Useful in case of large number of topics or consumer groups. +# # concurrent_connections = 20 +# +# ## Filter clusters, default is no filtering. +# ## Values can be specified as glob patterns. +# # clusters_include = [] +# # clusters_exclude = [] +# +# ## Filter consumer groups, default is no filtering. +# ## Values can be specified as glob patterns. +# # groups_include = [] +# # groups_exclude = [] +# +# ## Filter topics, default is no filtering. +# ## Values can be specified as glob patterns. +# # topics_include = [] +# # topics_exclude = [] +# +# ## Credentials for basic HTTP authentication. +# # username = "" +# # password = "" +# +# ## Optional SSL config +# # ssl_ca = "/etc/telegraf/ca.pem" +# # ssl_cert = "/etc/telegraf/cert.pem" +# # ssl_key = "/etc/telegraf/key.pem" +# # insecure_skip_verify = false + + +# # Collects performance metrics from the MON, OSD, MDS and RGW nodes +# # in a Ceph storage cluster. +# [[inputs.ceph]] +# ## This is the recommended interval to poll. Too frequent and you +# ## will lose data points due to timeouts during rebalancing and recovery +# interval = '1m' +# +# ## All configuration values are optional, defaults are shown below +# +# ## location of ceph binary +# ceph_binary = "/usr/bin/ceph" +# +# ## directory in which to look for socket files +# socket_dir = "/var/run/ceph" +# +# ## prefix of MON and OSD socket files, used to determine socket type +# mon_prefix = "ceph-mon" +# osd_prefix = "ceph-osd" +# mds_prefix = "ceph-mds" +# rgw_prefix = "ceph-client" +# +# ## suffix used to identify socket files +# socket_suffix = "asok" +# +# ## Ceph user to authenticate as, ceph will search for the corresponding +# ## keyring e.g. client.admin.keyring in /etc/ceph, or the explicit path +# ## defined in the client section of ceph.conf for example: +# ## +# ## [client.telegraf] +# ## keyring = /etc/ceph/client.telegraf.keyring +# ## +# ## Consult the ceph documentation for more detail on keyring generation. +# ceph_user = "client.admin" +# +# ## Ceph configuration to use to locate the cluster +# ceph_config = "/etc/ceph/ceph.conf" +# +# ## Whether to gather statistics via the admin socket +# gather_admin_socket_stats = true +# +# ## Whether to gather statistics via ceph commands, requires ceph_user +# ## and ceph_config to be specified +# gather_cluster_stats = false + + +# # Read specific statistics per cgroup +# # This plugin ONLY supports Linux +# [[inputs.cgroup]] +# ## Directories in which to look for files, globs are supported. +# ## Consider restricting paths to the set of cgroups you really +# ## want to monitor if you have a large number of cgroups, to avoid +# ## any cardinality issues. +# # paths = [ +# # "/sys/fs/cgroup/memory", +# # "/sys/fs/cgroup/memory/child1", +# # "/sys/fs/cgroup/memory/child2/*", +# # ] +# ## cgroup stat fields, as file names, globs are supported. +# ## these file names are appended to each path from above. +# # files = ["memory.*usage*", "memory.limit_in_bytes"] + + +# # Get standard chrony metrics, requires chronyc executable. +# [[inputs.chrony]] +# ## If true, chronyc tries to perform a DNS lookup for the time server. +# # dns_lookup = false + + +# # Pull Metric Statistics from Amazon CloudWatch +# [[inputs.cloudwatch]] +# ## Amazon Region +# region = "us-east-1" +# +# ## Amazon Credentials +# ## Credentials are loaded in the following order +# ## 1) Web identity provider credentials via STS if role_arn and +# ## web_identity_token_file are specified +# ## 2) Assumed credentials via STS if role_arn is specified +# ## 3) explicit credentials from 'access_key' and 'secret_key' +# ## 4) shared profile from 'profile' +# ## 5) environment variables +# ## 6) shared credentials file +# ## 7) EC2 Instance Profile +# # access_key = "" +# # secret_key = "" +# # token = "" +# # role_arn = "" +# # web_identity_token_file = "" +# # role_session_name = "" +# # profile = "" +# # shared_credential_file = "" +# +# ## If you are using CloudWatch cross-account observability, you can +# ## set IncludeLinkedAccounts to true in a monitoring account +# ## and collect metrics from the linked source accounts +# # include_linked_accounts = false +# +# ## Endpoint to make request against, the correct endpoint is automatically +# ## determined and this option should only be set if you wish to override the +# ## default. +# ## ex: endpoint_url = "http://localhost:8000" +# # endpoint_url = "" +# +# ## Set http_proxy +# # use_system_proxy = false +# # http_proxy_url = "http://localhost:8888" +# +# ## The minimum period for Cloudwatch metrics is 1 minute (60s). However not +# ## all metrics are made available to the 1 minute period. Some are collected +# ## at 3 minute, 5 minute, or larger intervals. +# ## See https://aws.amazon.com/cloudwatch/faqs/#monitoring. +# ## Note that if a period is configured that is smaller than the minimum for a +# ## particular metric, that metric will not be returned by the Cloudwatch API +# ## and will not be collected by Telegraf. +# # +# ## Requested CloudWatch aggregation Period (required) +# ## Must be a multiple of 60s. +# period = "5m" +# +# ## Collection Delay (required) +# ## Must account for metrics availability via CloudWatch API +# delay = "5m" +# +# ## Recommended: use metric 'interval' that is a multiple of 'period' to avoid +# ## gaps or overlap in pulled data +# interval = "5m" +# +# ## Recommended if "delay" and "period" are both within 3 hours of request +# ## time. Invalid values will be ignored. Recently Active feature will only +# ## poll for CloudWatch ListMetrics values that occurred within the last 3h. +# ## If enabled, it will reduce total API usage of the CloudWatch ListMetrics +# ## API and require less memory to retain. +# ## Do not enable if "period" or "delay" is longer than 3 hours, as it will +# ## not return data more than 3 hours old. +# ## See https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_ListMetrics.html +# #recently_active = "PT3H" +# +# ## Configure the TTL for the internal cache of metrics. +# # cache_ttl = "1h" +# +# ## Metric Statistic Namespaces (required) +# namespaces = ["AWS/ELB"] +# +# ## Maximum requests per second. Note that the global default AWS rate limit +# ## is 50 reqs/sec, so if you define multiple namespaces, these should add up +# ## to a maximum of 50. +# ## See http://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_limits.html +# # ratelimit = 25 +# +# ## Timeout for http requests made by the cloudwatch client. +# # timeout = "5s" +# +# ## Batch Size +# ## The size of each batch to send requests to Cloudwatch. 500 is the +# ## suggested largest size. If a request gets to large (413 errors), consider +# ## reducing this amount. +# # batch_size = 500 +# +# ## Namespace-wide statistic filters. These allow fewer queries to be made to +# ## cloudwatch. +# # statistic_include = ["average", "sum", "minimum", "maximum", sample_count"] +# # statistic_exclude = [] +# +# ## Metrics to Pull +# ## Defaults to all Metrics in Namespace if nothing is provided +# ## Refreshes Namespace available metrics every 1h +# #[[inputs.cloudwatch.metrics]] +# # names = ["Latency", "RequestCount"] +# # +# # ## Statistic filters for Metric. These allow for retrieving specific +# # ## statistics for an individual metric. +# # # statistic_include = ["average", "sum", "minimum", "maximum", sample_count"] +# # # statistic_exclude = [] +# # +# # ## Dimension filters for Metric. +# # ## All dimensions defined for the metric names must be specified in order +# # ## to retrieve the metric statistics. +# # ## 'value' has wildcard / 'glob' matching support such as 'p-*'. +# # [[inputs.cloudwatch.metrics.dimensions]] +# # name = "LoadBalancerName" +# # value = "p-example" + + +# # Collects conntrack stats from the configured directories and files. +# # This plugin ONLY supports Linux +# [[inputs.conntrack]] +# ## The following defaults would work with multiple versions of conntrack. +# ## Note the nf_ and ip_ filename prefixes are mutually exclusive across +# ## kernel versions, as are the directory locations. +# +# ## Look through /proc/net/stat/nf_conntrack for these metrics +# ## all - aggregated statistics +# ## percpu - include detailed statistics with cpu tag +# collect = ["all", "percpu"] +# +# ## User-specified directories and files to look through +# ## Directories to search within for the conntrack files above. +# ## Missing directories will be ignored. +# dirs = ["/proc/sys/net/ipv4/netfilter","/proc/sys/net/netfilter"] +# +# ## Superset of filenames to look for within the conntrack dirs. +# ## Missing files will be ignored. +# files = ["ip_conntrack_count","ip_conntrack_max", +# "nf_conntrack_count","nf_conntrack_max"] + + +# # Gather health check statuses from services registered in Consul +# [[inputs.consul]] +# ## Consul server address +# # address = "localhost:8500" +# +# ## URI scheme for the Consul server, one of "http", "https" +# # scheme = "http" +# +# ## Metric version controls the mapping from Consul metrics into +# ## Telegraf metrics. Version 2 moved all fields with string values +# ## to tags. +# ## +# ## example: metric_version = 1; deprecated in 1.16 +# ## metric_version = 2; recommended version +# # metric_version = 1 +# +# ## ACL token used in every request +# # token = "" +# +# ## HTTP Basic Authentication username and password. +# # username = "" +# # password = "" +# +# ## Data center to query the health checks from +# # datacenter = "" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = true +# +# ## Consul checks' tag splitting +# # When tags are formatted like "key:value" with ":" as a delimiter then +# # they will be splitted and reported as proper key:value in Telegraf +# # tag_delimiter = ":" + + +# # Read metrics from the Consul Agent API +# [[inputs.consul_agent]] +# ## URL for the Consul agent +# # url = "http://127.0.0.1:8500" +# +# ## Use auth token for authorization. +# ## If both are set, an error is thrown. +# ## If both are empty, no token will be used. +# # token_file = "/path/to/auth/token" +# ## OR +# # token = "a1234567-40c7-9048-7bae-378687048181" +# +# ## Set timeout (default 5 seconds) +# # timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = /path/to/cafile +# # tls_cert = /path/to/certfile +# # tls_key = /path/to/keyfile + + +# # Read per-node and per-bucket metrics from Couchbase +# [[inputs.couchbase]] +# ## specify servers via a url matching: +# ## [protocol://][:password]@address[:port] +# ## e.g. +# ## http://couchbase-0.example.com/ +# ## http://admin:secret@couchbase-0.example.com:8091/ +# ## +# ## If no servers are specified, then localhost is used as the host. +# ## If no protocol is specified, HTTP is used. +# ## If no port is specified, 8091 is used. +# servers = ["http://localhost:8091"] +# +# ## Filter bucket fields to include only here. +# # bucket_stats_included = ["quota_percent_used", "ops_per_sec", "disk_fetches", "item_count", "disk_used", "data_used", "mem_used"] +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification (defaults to false) +# ## If set to false, tls_cert and tls_key are required +# # insecure_skip_verify = false +# +# ## Whether to collect cluster-wide bucket statistics +# ## It is recommended to disable this in favor of node_stats +# ## to get a better view of the cluster. +# # cluster_bucket_stats = true +# +# ## Whether to collect bucket stats for each individual node +# # node_bucket_stats = false +# +# ## List of additional stats to collect, choose from: +# ## * autofailover +# # additional_stats = [] + + +# # Read CouchDB Stats from one or more servers +# [[inputs.couchdb]] +# ## Works with CouchDB stats endpoints out of the box +# ## Multiple Hosts from which to read CouchDB stats: +# hosts = ["http://localhost:8086/_stats"] +# +# ## Use HTTP Basic Authentication. +# # basic_username = "telegraf" +# # basic_password = "p@ssw0rd" + + +# # Fetch metrics from a CSGO SRCDS +# [[inputs.csgo]] +# ## Specify servers using the following format: +# ## servers = [ +# ## ["ip1:port1", "rcon_password1"], +# ## ["ip2:port2", "rcon_password2"], +# ## ] +# # +# ## If no servers are specified, no data will be collected +# servers = [] + + +# # Input plugin for DC/OS metrics +# [[inputs.dcos]] +# ## The DC/OS cluster URL. +# cluster_url = "https://dcos-master-1" +# +# ## The ID of the service account. +# service_account_id = "telegraf" +# ## The private key file for the service account. +# service_account_private_key = "/etc/telegraf/telegraf-sa-key.pem" +# +# ## Path containing login token. If set, will read on every gather. +# # token_file = "/home/dcos/.dcos/token" +# +# ## In all filter options if both include and exclude are empty all items +# ## will be collected. Arrays may contain glob patterns. +# ## +# ## Node IDs to collect metrics from. If a node is excluded, no metrics will +# ## be collected for its containers or apps. +# # node_include = [] +# # node_exclude = [] +# ## Container IDs to collect container metrics from. +# # container_include = [] +# # container_exclude = [] +# ## Container IDs to collect app metrics from. +# # app_include = [] +# # app_exclude = [] +# +# ## Maximum concurrent connections to the cluster. +# # max_connections = 10 +# ## Maximum time to receive a response from cluster. +# # response_timeout = "20s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## If false, skip chain & host verification +# # insecure_skip_verify = true +# +# ## Recommended filtering to reduce series cardinality. +# # [inputs.dcos.tagdrop] +# # path = ["/var/lib/mesos/slave/slaves/*"] + + +# # Read metrics from one or many disque servers +# [[inputs.disque]] +# ## An array of URI to gather stats about. Specify an ip or hostname +# ## with optional port and password. +# ## ie disque://localhost, disque://10.10.3.33:18832, 10.0.0.1:10000, etc. +# ## If no servers are specified, then localhost is used as the host. +# servers = ["localhost"] + + +# # Provide a native collection for dmsetup based statistics for dm-cache +# # This plugin ONLY supports Linux +# [[inputs.dmcache]] +# ## Whether to report per-device stats or not +# per_device = true + + +# # Query given DNS server and gives statistics +# [[inputs.dns_query]] +# ## servers to query +# servers = ["8.8.8.8"] +# +# ## Network is the network protocol name. +# # network = "udp" +# +# ## Domains or subdomains to query. +# # domains = ["."] +# +# ## Query record type. +# ## Possible values: A, AAAA, CNAME, MX, NS, PTR, TXT, SOA, SPF, SRV. +# # record_type = "A" +# +# ## Dns server port. +# # port = 53 +# +# ## Query timeout +# # timeout = "2s" +# +# ## Include the specified additional properties in the resulting metric. +# ## The following values are supported: +# ## "first_ip" -- return IP of the first A and AAAA answer +# ## "all_ips" -- return IPs of all A and AAAA answers +# # include_fields = [] + + +# # Read metrics about docker containers +# [[inputs.docker]] +# ## Docker Endpoint +# ## To use TCP, set endpoint = "tcp://[ip]:[port]" +# ## To use environment variables (ie, docker-machine), set endpoint = "ENV" +# endpoint = "unix:///var/run/docker.sock" +# +# ## Set to true to collect Swarm metrics(desired_replicas, running_replicas) +# ## Note: configure this in one of the manager nodes in a Swarm cluster. +# ## configuring in multiple Swarm managers results in duplication of metrics. +# gather_services = false +# +# ## Only collect metrics for these containers. Values will be appended to +# ## container_name_include. +# ## Deprecated (1.4.0), use container_name_include +# container_names = [] +# +# ## Set the source tag for the metrics to the container ID hostname, eg first 12 chars +# source_tag = false +# +# ## Containers to include and exclude. Collect all if empty. Globs accepted. +# container_name_include = [] +# container_name_exclude = [] +# +# ## Container states to include and exclude. Globs accepted. +# ## When empty only containers in the "running" state will be captured. +# ## example: container_state_include = ["created", "restarting", "running", "removing", "paused", "exited", "dead"] +# ## example: container_state_exclude = ["created", "restarting", "running", "removing", "paused", "exited", "dead"] +# # container_state_include = [] +# # container_state_exclude = [] +# +# ## Timeout for docker list, info, and stats commands +# timeout = "5s" +# +# ## Whether to report for each container per-device blkio (8:0, 8:1...), +# ## network (eth0, eth1, ...) and cpu (cpu0, cpu1, ...) stats or not. +# ## Usage of this setting is discouraged since it will be deprecated in favor of 'perdevice_include'. +# ## Default value is 'true' for backwards compatibility, please set it to 'false' so that 'perdevice_include' setting +# ## is honored. +# perdevice = true +# +# ## Specifies for which classes a per-device metric should be issued +# ## Possible values are 'cpu' (cpu0, cpu1, ...), 'blkio' (8:0, 8:1, ...) and 'network' (eth0, eth1, ...) +# ## Please note that this setting has no effect if 'perdevice' is set to 'true' +# # perdevice_include = ["cpu"] +# +# ## Whether to report for each container total blkio and network stats or not. +# ## Usage of this setting is discouraged since it will be deprecated in favor of 'total_include'. +# ## Default value is 'false' for backwards compatibility, please set it to 'true' so that 'total_include' setting +# ## is honored. +# total = false +# +# ## Specifies for which classes a total metric should be issued. Total is an aggregated of the 'perdevice' values. +# ## Possible values are 'cpu', 'blkio' and 'network' +# ## Total 'cpu' is reported directly by Docker daemon, and 'network' and 'blkio' totals are aggregated by this plugin. +# ## Please note that this setting has no effect if 'total' is set to 'false' +# # total_include = ["cpu", "blkio", "network"] +# +# ## docker labels to include and exclude as tags. Globs accepted. +# ## Note that an empty array for both will include all labels as tags +# docker_label_include = [] +# docker_label_exclude = [] +# +# ## Which environment variables should we use as a tag +# tag_env = ["JAVA_HOME", "HEAP_SIZE"] +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read metrics about dovecot servers +# [[inputs.dovecot]] +# ## specify dovecot servers via an address:port list +# ## e.g. +# ## localhost:24242 +# ## or as an UDS socket +# ## e.g. +# ## /var/run/dovecot/old-stats +# ## +# ## If no servers are specified, then localhost is used as the host. +# servers = ["localhost:24242"] +# +# ## Type is one of "user", "domain", "ip", or "global" +# type = "global" +# +# ## Wildcard matches like "*.com". An empty string "" is same as "*" +# ## If type = "ip" filters should be +# filters = [""] + + +# # Reads metrics from DPDK applications using v2 telemetry interface. +# # This plugin ONLY supports Linux +# [[inputs.dpdk]] +# ## Path to DPDK telemetry socket. This shall point to v2 version of DPDK +# ## telemetry interface. +# # socket_path = "/var/run/dpdk/rte/dpdk_telemetry.v2" +# +# ## Duration that defines how long the connected socket client will wait for +# ## a response before terminating connection. +# ## This includes both writing to and reading from socket. Since it's local +# ## socket access to a fast packet processing application, the timeout should +# ## be sufficient for most users. +# ## Setting the value to 0 disables the timeout (not recommended) +# # socket_access_timeout = "200ms" +# +# ## Enables telemetry data collection for selected device types. +# ## Adding "ethdev" enables collection of telemetry from DPDK NICs +# ## (stats, xstats, link_status). +# ## Adding "rawdev" enables collection of telemetry from DPDK Raw Devices +# ## (xstats). +# # device_types = ["ethdev"] +# +# ## List of custom, application-specific telemetry commands to query +# ## The list of available commands depend on the application deployed. +# ## Applications can register their own commands via telemetry library API +# ## http://doc.dpdk.org/guides/prog_guide/telemetry_lib.html#registering-commands +# ## For L3 Forwarding with Power Management Sample Application this could be: +# ## additional_commands = ["/l3fwd-power/stats"] +# # additional_commands = [] +# +# ## Allows turning off collecting data for individual "ethdev" commands. +# ## Remove "/ethdev/link_status" from list to gather link status metrics. +# [inputs.dpdk.ethdev] +# exclude_commands = ["/ethdev/link_status"] +# +# ## When running multiple instances of the plugin it's recommended to add a +# ## unique tag to each instance to identify metrics exposed by an instance +# ## of DPDK application. This is useful when multiple DPDK apps run on a +# ## single host. +# ## [inputs.dpdk.tags] +# ## dpdk_instance = "my-fwd-app" + + +# # Read metrics about ECS containers +# [[inputs.ecs]] +# ## ECS metadata url. +# ## Metadata v2 API is used if set explicitly. Otherwise, +# ## v3 metadata endpoint API is used if available. +# # endpoint_url = "" +# +# ## Containers to include and exclude. Globs accepted. +# ## Note that an empty array for both will include all containers +# # container_name_include = [] +# # container_name_exclude = [] +# +# ## Container states to include and exclude. Globs accepted. +# ## When empty only containers in the "RUNNING" state will be captured. +# ## Possible values are "NONE", "PULLED", "CREATED", "RUNNING", +# ## "RESOURCES_PROVISIONED", "STOPPED". +# # container_status_include = [] +# # container_status_exclude = [] +# +# ## ecs labels to include and exclude as tags. Globs accepted. +# ## Note that an empty array for both will include all labels as tags +# ecs_label_include = [ "com.amazonaws.ecs.*" ] +# ecs_label_exclude = [] +# +# ## Timeout for queries. +# # timeout = "5s" + + +# # Read stats from one or more Elasticsearch servers or clusters +# [[inputs.elasticsearch]] +# ## specify a list of one or more Elasticsearch servers +# ## you can add username and password to your url to use basic authentication: +# ## servers = ["http://user:pass@localhost:9200"] +# servers = ["http://localhost:9200"] +# +# ## Timeout for HTTP requests to the elastic search server(s) +# http_timeout = "5s" +# +# ## When local is true (the default), the node will read only its own stats. +# ## Set local to false when you want to read the node stats from all nodes +# ## of the cluster. +# local = true +# +# ## Set cluster_health to true when you want to obtain cluster health stats +# cluster_health = false +# +# ## Adjust cluster_health_level when you want to obtain detailed health stats +# ## The options are +# ## - indices (default) +# ## - cluster +# # cluster_health_level = "indices" +# +# ## Set cluster_stats to true when you want to obtain cluster stats. +# cluster_stats = false +# +# ## Only gather cluster_stats from the master node. +# ## To work this require local = true +# cluster_stats_only_from_master = true +# +# ## Indices to collect; can be one or more indices names or _all +# ## Use of wildcards is allowed. Use a wildcard at the end to retrieve index +# ## names that end with a changing value, like a date. +# indices_include = ["_all"] +# +# ## One of "shards", "cluster", "indices" +# ## Currently only "shards" is implemented +# indices_level = "shards" +# +# ## node_stats is a list of sub-stats that you want to have gathered. +# ## Valid options are "indices", "os", "process", "jvm", "thread_pool", +# ## "fs", "transport", "http", "breaker". Per default, all stats are gathered. +# # node_stats = ["jvm", "http"] +# +# ## HTTP Basic Authentication username and password. +# # username = "" +# # password = "" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Sets the number of most recent indices to return for indices that are +# ## configured with a date-stamped suffix. Each 'indices_include' entry +# ## ending with a wildcard (*) or glob matching pattern will group together +# ## all indices that match it, and sort them by the date or number after +# ## the wildcard. Metrics then are gathered for only the +# ## 'num_most_recent_indices' amount of most recent indices. +# # num_most_recent_indices = 0 + + +# # Derive metrics from aggregating Elasticsearch query results +# [[inputs.elasticsearch_query]] +# ## The full HTTP endpoint URL for your Elasticsearch instance +# ## Multiple urls can be specified as part of the same cluster, +# ## this means that only ONE of the urls will be written to each interval. +# urls = [ "http://node1.es.example.com:9200" ] # required. +# +# ## Elasticsearch client timeout, defaults to "5s". +# # timeout = "5s" +# +# ## Set to true to ask Elasticsearch a list of all cluster nodes, +# ## thus it is not necessary to list all nodes in the urls config option +# # enable_sniffer = false +# +# ## Set the interval to check if the Elasticsearch nodes are available +# ## This option is only used if enable_sniffer is also set (0s to disable it) +# # health_check_interval = "10s" +# +# ## HTTP basic authentication details (eg. when using x-pack) +# # username = "telegraf" +# # password = "mypassword" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# [[inputs.elasticsearch_query.aggregation]] +# ## measurement name for the results of the aggregation query +# measurement_name = "measurement" +# +# ## Elasticsearch indexes to query (accept wildcards). +# index = "index-*" +# +# ## The date/time field in the Elasticsearch index (mandatory). +# date_field = "@timestamp" +# +# ## If the field used for the date/time field in Elasticsearch is also using +# ## a custom date/time format it may be required to provide the format to +# ## correctly parse the field. +# ## +# ## If using one of the built in elasticsearch formats this is not required. +# # date_field_custom_format = "" +# +# ## Time window to query (eg. "1m" to query documents from last minute). +# ## Normally should be set to same as collection interval +# query_period = "1m" +# +# ## Lucene query to filter results +# # filter_query = "*" +# +# ## Fields to aggregate values (must be numeric fields) +# # metric_fields = ["metric"] +# +# ## Aggregation function to use on the metric fields +# ## Must be set if 'metric_fields' is set +# ## Valid values are: avg, sum, min, max, sum +# # metric_function = "avg" +# +# ## Fields to be used as tags +# ## Must be text, non-analyzed fields. Metric aggregations are performed +# ## per tag +# # tags = ["field.keyword", "field2.keyword"] +# +# ## Set to true to not ignore documents when the tag(s) above are missing +# # include_missing_tag = false +# +# ## String value of the tag when the tag does not exist +# ## Used when include_missing_tag is true +# # missing_tag_value = "null" + + +# # Returns ethtool statistics for given interfaces +# # This plugin ONLY supports Linux +# [[inputs.ethtool]] +# ## List of interfaces to pull metrics for +# # interface_include = ["eth0"] +# +# ## List of interfaces to ignore when pulling metrics. +# # interface_exclude = ["eth1"] +# +# ## Plugin behavior for downed interfaces +# ## Available choices: +# ## - expose: collect & report metrics for down interfaces +# ## - skip: ignore interfaces that are marked down +# # down_interfaces = "expose" +# +# ## Reading statistics from interfaces in additional namespaces is also +# ## supported, so long as the namespaces are named (have a symlink in +# ## /var/run/netns). The telegraf process will also need the CAP_SYS_ADMIN +# ## permission. +# ## By default, only the current namespace will be used. For additional +# ## namespace support, at least one of `namespace_include` and +# ## `namespace_exclude` must be provided. +# ## To include all namespaces, set `namespace_include` to `["*"]`. +# ## The initial namespace (if anonymous) can be specified with the empty +# ## string (""). +# +# ## List of namespaces to pull metrics for +# # namespace_include = [] +# +# ## List of namespace to ignore when pulling metrics. +# # namespace_exclude = [] +# +# ## Some drivers declare statistics with extra whitespace, different spacing, +# ## and mix cases. This list, when enabled, can be used to clean the keys. +# ## Here are the current possible normalizations: +# ## * snakecase: converts fooBarBaz to foo_bar_baz +# ## * trim: removes leading and trailing whitespace +# ## * lower: changes all capitalized letters to lowercase +# ## * underscore: replaces spaces with underscores +# # normalize_keys = ["snakecase", "trim", "lower", "underscore"] + + +# # Read metrics from one or more commands that can output to stdout +# [[inputs.exec]] +# ## Commands array +# commands = [ +# "/tmp/test.sh", +# "/usr/bin/mycollector --foo=bar", +# "/tmp/collect_*.sh" +# ] +# +# ## Environment variables +# ## Array of "key=value" pairs to pass as environment variables +# ## e.g. "KEY=value", "USERNAME=John Doe", +# ## "LD_LIBRARY_PATH=/opt/custom/lib64:/usr/local/libs" +# # environment = [] +# +# ## Timeout for each command to complete. +# timeout = "5s" +# +# ## measurement name suffix (for separating different commands) +# name_suffix = "_mycollector" +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Read metrics from fail2ban. +# [[inputs.fail2ban]] +# ## Use sudo to run fail2ban-client +# # use_sudo = false +# +# ## Use the given socket instead of the default one +# # socket = "/var/run/fail2ban/fail2ban.sock" + + +# # Read devices value(s) from a Fibaro controller +# [[inputs.fibaro]] +# ## Required Fibaro controller address/hostname. +# ## Note: at the time of writing this plugin, Fibaro only implemented http - no https available +# url = "http://:80" +# +# ## Required credentials to access the API (http://) +# username = "" +# password = "" +# +# ## Amount of time allowed to complete the HTTP request +# # timeout = "5s" +# +# ## Fibaro Device Type +# ## By default, this plugin will attempt to read using the HC2 API. For HC3 +# ## devices, set this to "HC3" +# # device_type = "HC2" + + +# # Parse a complete file each interval +# [[inputs.file]] +# ## Files to parse each interval. Accept standard unix glob matching rules, +# ## as well as ** to match recursive files and directories. +# files = ["/tmp/metrics.out"] +# +# ## Character encoding to use when interpreting the file contents. Invalid +# ## characters are replaced using the unicode replacement character. When set +# ## to the empty string the data is not decoded to text. +# ## ex: character_encoding = "utf-8" +# ## character_encoding = "utf-16le" +# ## character_encoding = "utf-16be" +# ## character_encoding = "" +# # character_encoding = "" +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" +# +# +# ## Name a tag containing the name of the file the data was parsed from. Leave empty +# ## to disable. Cautious when file name variation is high, this can increase the cardinality +# ## significantly. Read more about cardinality here: +# ## https://docs.influxdata.com/influxdb/cloud/reference/glossary/#series-cardinality +# # file_tag = "" + + +# # Count files in a directory +# [[inputs.filecount]] +# ## Directories to gather stats about. +# ## This accept standard unit glob matching rules, but with the addition of +# ## ** as a "super asterisk". ie: +# ## /var/log/** -> recursively find all directories in /var/log and count files in each directories +# ## /var/log/*/* -> find all directories with a parent dir in /var/log and count files in each directories +# ## /var/log -> count all files in /var/log and all of its subdirectories +# directories = ["/var/cache/apt", "/tmp"] +# +# ## Only count files that match the name pattern. Defaults to "*". +# name = "*" +# +# ## Count files in subdirectories. Defaults to true. +# recursive = true +# +# ## Only count regular files. Defaults to true. +# regular_only = true +# +# ## Follow all symlinks while walking the directory tree. Defaults to false. +# follow_symlinks = false +# +# ## Only count files that are at least this size. If size is +# ## a negative number, only count files that are smaller than the +# ## absolute value of size. Acceptable units are B, KiB, MiB, KB, ... +# ## Without quotes and units, interpreted as size in bytes. +# size = "0B" +# +# ## Only count files that have not been touched for at least this +# ## duration. If mtime is negative, only count files that have been +# ## touched in this duration. Defaults to "0s". +# mtime = "0s" + + +# # Read stats about given file(s) +# [[inputs.filestat]] +# ## Files to gather stats about. +# ## These accept standard unix glob matching rules, but with the addition of +# ## ** as a "super asterisk". See https://github.com/gobwas/glob. +# files = ["/etc/telegraf/telegraf.conf", "/var/log/**.log"] +# +# ## If true, read the entire file and calculate an md5 checksum. +# md5 = false + + +# # Read real time temps from fireboard.io servers +# [[inputs.fireboard]] +# ## Specify auth token for your account +# auth_token = "invalidAuthToken" +# ## You can override the fireboard server URL if necessary +# # url = https://fireboard.io/api/v1/devices.json +# ## You can set a different http_timeout if you need to +# ## You should set a string using an number and time indicator +# ## for example "12s" for 12 seconds. +# # http_timeout = "4s" + + +# # Read metrics exposed by fluentd in_monitor plugin +# [[inputs.fluentd]] +# ## This plugin reads information exposed by fluentd (using /api/plugins.json endpoint). +# ## +# ## Endpoint: +# ## - only one URI is allowed +# ## - https is not supported +# endpoint = "http://localhost:24220/api/plugins.json" +# +# ## Define which plugins have to be excluded (based on "type" field - e.g. monitor_agent) +# exclude = [ +# "monitor_agent", +# "dummy", +# ] + + +# # Gather repository information from GitHub hosted repositories. +# [[inputs.github]] +# ## List of repositories to monitor +# repositories = [ +# "influxdata/telegraf", +# "influxdata/influxdb" +# ] +# +# ## Github API access token. Unauthenticated requests are limited to 60 per hour. +# # access_token = "" +# +# ## Github API enterprise url. Github Enterprise accounts must specify their base url. +# # enterprise_base_url = "" +# +# ## Timeout for HTTP requests. +# # http_timeout = "5s" +# +# ## List of additional fields to query. +# ## NOTE: Getting those fields might involve issuing additional API-calls, so please +# ## make sure you do not exceed the rate-limit of GitHub. +# ## +# ## Available fields are: +# ## - pull-requests -- number of open and closed pull requests (2 API-calls per repository) +# # additional_fields = [] + + +# # Gather metrics by iterating the files located on a Cloud Storage Bucket. +# [[inputs.google_cloud_storage]] +# ## Required. Name of Cloud Storage bucket to ingest metrics from. +# bucket = "my-bucket" +# +# ## Optional. Prefix of Cloud Storage bucket keys to list metrics from. +# # key_prefix = "my-bucket" +# +# ## Key that will store the offsets in order to pick up where the ingestion was left. +# offset_key = "offset_key" +# +# ## Key that will store the offsets in order to pick up where the ingestion was left. +# objects_per_iteration = 10 +# +# ## Required. Data format to consume. +# ## Each data format has its own unique set of configuration options. +# ## Read more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" +# +# ## Optional. Filepath for GCP credentials JSON file to authorize calls to +# ## Google Cloud Storage APIs. If not set explicitly, Telegraf will attempt to use +# ## Application Default Credentials, which is preferred. +# # credentials_file = "path/to/my/creds.json" + + +# # Read flattened metrics from one or more GrayLog HTTP endpoints +# [[inputs.graylog]] +# ## API endpoint, currently supported API: +# ## +# ## - multiple (e.g. http://:9000/api/system/metrics/multiple) +# ## - namespace (e.g. http://:9000/api/system/metrics/namespace/{namespace}) +# ## +# ## For namespace endpoint, the metrics array will be ignored for that call. +# ## Endpoint can contain namespace and multiple type calls. +# ## +# ## Please check http://[graylog-server-ip]:9000/api/api-browser for full list +# ## of endpoints +# servers = [ +# "http://[graylog-server-ip]:9000/api/system/metrics/multiple", +# ] +# +# ## Set timeout (default 5 seconds) +# # timeout = "5s" +# +# ## Metrics list +# ## List of metrics can be found on Graylog webservice documentation. +# ## Or by hitting the web service api at: +# ## http://[graylog-host]:9000/api/system/metrics +# metrics = [ +# "jvm.cl.loaded", +# "jvm.memory.pools.Metaspace.committed" +# ] +# +# ## Username and password +# username = "" +# password = "" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read metrics of HAProxy, via stats socket or http endpoints +# [[inputs.haproxy]] +# ## List of stats endpoints. Metrics can be collected from both http and socket +# ## endpoints. Examples of valid endpoints: +# ## - http://myhaproxy.com:1936/haproxy?stats +# ## - https://myhaproxy.com:8000/stats +# ## - socket:/run/haproxy/admin.sock +# ## - /run/haproxy/*.sock +# ## - tcp://127.0.0.1:1936 +# ## +# ## Server addresses not starting with 'http://', 'https://', 'tcp://' will be +# ## treated as possible sockets. When specifying local socket, glob patterns are +# ## supported. +# servers = ["http://myhaproxy.com:1936/haproxy?stats"] +# +# ## By default, some of the fields are renamed from what haproxy calls them. +# ## Setting this option to true results in the plugin keeping the original +# ## field names. +# # keep_field_names = false +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Monitor disks' temperatures using hddtemp +# [[inputs.hddtemp]] +# ## By default, telegraf gathers temps data from all disks detected by the +# ## hddtemp. +# ## +# ## Only collect temps from the selected disks. +# ## +# ## A * as the device name will return the temperature values of all disks. +# ## +# # address = "127.0.0.1:7634" +# # devices = ["sda", "*"] + + +# # Read formatted metrics from one or more HTTP endpoints +# [[inputs.http]] +# ## One or more URLs from which to read formatted metrics +# urls = [ +# "http://localhost/metrics" +# ] +# +# ## HTTP method +# # method = "GET" +# +# ## Optional HTTP headers +# # headers = {"X-Special-Header" = "Special-Value"} +# +# ## HTTP entity-body to send with POST/PUT requests. +# # body = "" +# +# ## HTTP Content-Encoding for write request body, can be set to "gzip" to +# ## compress body or "identity" to apply no encoding. +# # content_encoding = "identity" +# +# ## Optional Bearer token settings to use for the API calls. +# ## Use either the token itself or the token file if you need a token. +# # token = "eyJhbGc...Qssw5c" +# # token_file = "/path/to/file" +# +# ## Optional HTTP Basic Auth Credentials +# # username = "username" +# # password = "pa$$word" +# +# ## OAuth2 Client Credentials. The options 'client_id', 'client_secret', and 'token_url' are required to use OAuth2. +# # client_id = "clientid" +# # client_secret = "secret" +# # token_url = "https://indentityprovider/oauth2/v1/token" +# # scopes = ["urn:opc:idm:__myscopes__"] +# +# ## HTTP Proxy support +# # use_system_proxy = false +# # http_proxy_url = "" +# +# ## Optional TLS Config +# ## Set to true/false to enforce TLS being enabled/disabled. If not set, +# ## enable TLS only if any of the other options are specified. +# # tls_enable = +# ## Trusted root certificates for server +# # tls_ca = "/path/to/cafile" +# ## Used for TLS client certificate authentication +# # tls_cert = "/path/to/certfile" +# ## Used for TLS client certificate authentication +# # tls_key = "/path/to/keyfile" +# ## Send the specified TLS server name via SNI +# # tls_server_name = "kubernetes.example.com" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Optional Cookie authentication +# # cookie_auth_url = "https://localhost/authMe" +# # cookie_auth_method = "POST" +# # cookie_auth_username = "username" +# # cookie_auth_password = "pa$$word" +# # cookie_auth_headers = { Content-Type = "application/json", X-MY-HEADER = "hello" } +# # cookie_auth_body = '{"username": "user", "password": "pa$$word", "authenticate": "me"}' +# ## cookie_auth_renewal not set or set to "0" will auth once and never renew the cookie +# # cookie_auth_renewal = "5m" +# +# ## Amount of time allowed to complete the HTTP request +# # timeout = "5s" +# +# ## List of success status codes +# # success_status_codes = [200] +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# # data_format = "influx" +# + + +# # HTTP/HTTPS request given an address a method and a timeout +# [[inputs.http_response]] +# ## List of urls to query. +# # urls = ["http://localhost"] +# +# ## Set http_proxy. +# ## Telegraf uses the system wide proxy settings if it's is not set. +# # http_proxy = "http://localhost:8888" +# +# ## Set response_timeout (default 5 seconds) +# # response_timeout = "5s" +# +# ## HTTP Request Method +# # method = "GET" +# +# ## Whether to follow redirects from the server (defaults to false) +# # follow_redirects = false +# +# ## Optional file with Bearer token +# ## file content is added as an Authorization header +# # bearer_token = "/path/to/file" +# +# ## Optional HTTP Basic Auth Credentials +# # username = "username" +# # password = "pa$$word" +# +# ## Optional HTTP Request Body +# # body = ''' +# # {'fake':'data'} +# # ''' +# +# ## Optional name of the field that will contain the body of the response. +# ## By default it is set to an empty String indicating that the body's +# ## content won't be added +# # response_body_field = '' +# +# ## Maximum allowed HTTP response body size in bytes. +# ## 0 means to use the default of 32MiB. +# ## If the response body size exceeds this limit a "body_read_error" will +# ## be raised. +# # response_body_max_size = "32MiB" +# +# ## Optional substring or regex match in body of the response (case sensitive) +# # response_string_match = "\"service_status\": \"up\"" +# # response_string_match = "ok" +# # response_string_match = "\".*_status\".?:.?\"up\"" +# +# ## Expected response status code. +# ## The status code of the response is compared to this value. If they match, +# ## the field "response_status_code_match" will be 1, otherwise it will be 0. +# ## If the expected status code is 0, the check is disabled and the field +# ## won't be added. +# # response_status_code = 0 +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# ## Use the given name as the SNI server name on each URL +# # tls_server_name = "" +# ## TLS renegotiation method, choose from "never", "once", "freely" +# # tls_renegotiation_method = "never" +# +# ## HTTP Request Headers (all values must be strings) +# # [inputs.http_response.headers] +# # Host = "github.com" +# +# ## Optional setting to map response http headers into tags +# ## If the http header is not present on the request, no corresponding tag will +# ## be added. If multiple instances of the http header are present, only the +# ## first value will be used. +# # http_header_tags = {"HTTP_HEADER" = "TAG_NAME"} +# +# ## Interface to use when dialing an address +# # interface = "eth0" + + +# ## DEPRECATED: The "httpjson" plugin is deprecated in version 1.6.0 and will be removed in 1.30.0, use 'inputs.http' instead. +# # Read flattened metrics from one or more JSON HTTP endpoints +# [[inputs.httpjson]] +# ## NOTE This plugin only reads numerical measurements, strings and booleans +# ## will be ignored. +# +# ## Name for the service being polled. Will be appended to the name of the +# ## measurement e.g. "httpjson_webserver_stats". +# ## +# ## Deprecated (1.3.0): Use name_override, name_suffix, name_prefix instead. +# name = "webserver_stats" +# +# ## URL of each server in the service's cluster +# servers = [ +# "http://localhost:9999/stats/", +# "http://localhost:9998/stats/", +# ] +# ## Set response_timeout (default 5 seconds) +# response_timeout = "5s" +# +# ## HTTP method to use: GET or POST (case-sensitive) +# method = "GET" +# +# ## Tags to extract from top-level of JSON server response. +# # tag_keys = [ +# # "my_tag_1", +# # "my_tag_2" +# # ] +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## HTTP Request Parameters (all values must be strings). For "GET" requests, data +# ## will be included in the query. For "POST" requests, data will be included +# ## in the request body as "x-www-form-urlencoded". +# # [inputs.httpjson.parameters] +# # event_type = "cpu_spike" +# # threshold = "0.75" +# +# ## HTTP Request Headers (all values must be strings). +# # [inputs.httpjson.headers] +# # X-Auth-Token = "my-xauth-token" +# # apiVersion = "v1" + + +# # Gathers huge pages measurements. +# # This plugin ONLY supports Linux +# [[inputs.hugepages]] +# ## Supported huge page types: +# ## - "root" - based on root huge page control directory: +# ## /sys/kernel/mm/hugepages +# ## - "per_node" - based on per NUMA node directories: +# ## /sys/devices/system/node/node[0-9]*/hugepages +# ## - "meminfo" - based on /proc/meminfo file +# # types = ["root", "per_node"] + + +# # Gather Icinga2 status +# [[inputs.icinga2]] +# ## Required Icinga2 server address +# # server = "https://localhost:5665" +# +# ## Collected Icinga2 objects ("services", "hosts") +# ## Specify at least one object to collect from /v1/objects endpoint. +# # objects = ["services"] +# +# ## Collect metrics from /v1/status endpoint +# ## Choose from: +# ## "ApiListener", "CIB", "IdoMysqlConnection", "IdoPgsqlConnection" +# # status = [] +# +# ## Credentials for basic HTTP authentication +# # username = "admin" +# # password = "admin" +# +# ## Maximum time to receive response. +# # response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = true + + +# # Gets counters from all InfiniBand cards and ports installed +# # This plugin ONLY supports Linux +# [[inputs.infiniband]] +# # no configuration + + +# # Read InfluxDB-formatted JSON metrics from one or more HTTP endpoints +# [[inputs.influxdb]] +# ## Works with InfluxDB debug endpoints out of the box, +# ## but other services can use this format too. +# ## See the influxdb plugin's README for more details. +# +# ## Multiple URLs from which to read InfluxDB-formatted JSON +# ## Default is "http://localhost:8086/debug/vars". +# urls = [ +# "http://localhost:8086/debug/vars" +# ] +# +# ## Username and password to send using HTTP Basic Authentication. +# # username = "" +# # password = "" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## http request & header timeout +# timeout = "5s" + + +# # Intel Baseband Accelerator Input Plugin collects metrics from both dedicated and integrated +# # Intel devices that provide Wireless Baseband hardware acceleration. +# # This plugin ONLY supports Linux. +# [[inputs.intel_baseband]] +# ## Path to socket exposed by pf-bb-config for CLI interaction (mandatory). +# ## In version v23.03 of pf-bb-config the path is created according to the schema: +# ## "/tmp/pf_bb_config.0000\:\:..sock" where 0000\:\:. is the PCI device ID. +# socket_path = "" +# +# ## Path to log file exposed by pf-bb-config with telemetry to read (mandatory). +# ## In version v23.03 of pf-bb-config the path is created according to the schema: +# ## "/var/log/pf_bb_cfg_0000\:\:..log" where 0000\:\:. is the PCI device ID. +# log_file_path = "" +# +# ## Specifies plugin behavior regarding unreachable socket (which might not have been initialized yet). +# ## Available choices: +# ## - error: Telegraf will return an error on startup if socket is unreachable +# ## - ignore: Telegraf will ignore error regarding unreachable socket on both startup and gather +# # unreachable_socket_behavior = "error" +# +# ## Duration that defines how long the connected socket client will wait for +# ## a response before terminating connection. +# ## Since it's local socket access to a fast packet processing application, the timeout should +# ## be sufficient for most users. +# ## Setting the value to 0 disables the timeout (not recommended). +# # socket_access_timeout = "1s" +# +# ## Duration that defines maximum time plugin will wait for pf-bb-config to write telemetry to the log file. +# ## Timeout may differ depending on the environment. +# ## Must be equal or larger than 50ms. +# # wait_for_telemetry_timeout = "1s" + + +# ## Reads metrics from DPDK using v2 telemetry interface. +# ## This plugin ONLY supports Linux +# [[inputs.intel_dlb]] +# ## Path to DPDK telemetry socket. +# # socket_path = "/var/run/dpdk/rte/dpdk_telemetry.v2" +# +# ## Default eventdev command list, it gathers metrics from socket by given commands. +# ## Supported options: +# ## "/eventdev/dev_xstats", "/eventdev/port_xstats", +# ## "/eventdev/queue_xstats", "/eventdev/queue_links" +# # eventdev_commands = ["/eventdev/dev_xstats", "/eventdev/port_xstats", "/eventdev/queue_xstats", "/eventdev/queue_links"] +# +# ## Detect DLB devices based on device id. +# ## Currently, only supported and tested device id is `0x2710`. +# ## Configuration added to support forward compatibility. +# # dlb_device_types = ["0x2710"] +# +# ## Specifies plugin behavior regarding unreachable socket (which might not have been initialized yet). +# ## Available choices: +# ## - error: Telegraf will return an error on startup if socket is unreachable +# ## - ignore: Telegraf will ignore error regarding unreachable socket on both startup and gather +# # unreachable_socket_behavior = "error" + + +# # Intel Platform Monitoring Technology plugin exposes Intel PMT metrics available through the Intel PMT kernel space. +# # This plugin ONLY supports Linux. +# [[inputs.intel_pmt]] +# ## Filepath to PMT XML within local copies of XML files from PMT repository. +# ## The filepath should be absolute. +# spec = "/home/telegraf/Intel-PMT/xml/pmt.xml" +# +# ## Enable metrics by their datatype. +# ## See the Enabling Metrics section in README for more details. +# ## If empty, all metrics are enabled. +# ## When used, the alternative option samples_enabled should NOT be used. +# # datatypes_enabled = [] +# +# ## Enable metrics by their name. +# ## See the Enabling Metrics section in README for more details. +# ## If empty, all metrics are enabled. +# ## When used, the alternative option datatypes_enabled should NOT be used. +# # samples_enabled = [] + + +# # Intel PowerStat plugin enables monitoring of platform metrics (power, TDP) +# # and per-CPU metrics like temperature, power and utilization. +# # This plugin ONLY supports Linux +# [[inputs.intel_powerstat]] +# ## The user can choose which package metrics are monitored by the plugin with +# ## the package_metrics setting: +# ## - The default, will collect "current_power_consumption", +# ## "current_dram_power_consumption" and "thermal_design_power" +# ## - Leaving this setting empty means no package metrics will be collected +# ## - Finally, a user can specify individual metrics to capture from the +# ## supported options list +# ## Supported options: +# ## "current_power_consumption", "current_dram_power_consumption", +# ## "thermal_design_power", "max_turbo_frequency", "uncore_frequency", +# ## "cpu_base_frequency" +# # package_metrics = ["current_power_consumption", "current_dram_power_consumption", "thermal_design_power"] +# +# ## The user can choose which per-CPU metrics are monitored by the plugin in +# ## cpu_metrics array. +# ## Empty or missing array means no per-CPU specific metrics will be collected +# ## by the plugin. +# ## Supported options: +# ## "cpu_frequency", "cpu_c0_state_residency", "cpu_c1_state_residency", +# ## "cpu_c6_state_residency", "cpu_busy_cycles", "cpu_temperature", +# ## "cpu_busy_frequency" +# ## ATTENTION: cpu_busy_cycles is DEPRECATED - use cpu_c0_state_residency +# # cpu_metrics = [] + + +# # Collect statistics about itself +# [[inputs.internal]] +# ## If true, collect telegraf memory stats. +# # collect_memstats = true +# +# ## If true, collect metrics from Go's runtime.metrics. For a full list see: +# ## https://pkg.go.dev/runtime/metrics +# # collect_gostats = false + + +# # Monitors internet speed using speedtest.net service +# [[inputs.internet_speed]] +# ## This plugin downloads many MB of data each time it is run. As such +# ## consider setting a higher interval for this plugin to reduce the +# ## demand on your internet connection. +# # interval = "60m" +# +# ## Enable to reduce memory usage +# # memory_saving_mode = false +# +# ## Caches the closest server location +# # cache = false +# +# ## Number of concurrent connections +# ## By default or set to zero, the number of CPU cores is used. Use this to +# ## reduce the impact on system performance or to increase the connections on +# ## faster connections to ensure the fastest speed. +# # connections = 0 +# +# ## Test mode +# ## By default, a single sever is used for testing. This may work for most, +# ## however, setting to "multi" will reach out to multiple servers in an +# ## attempt to get closer to ideal internet speeds. +# # test_mode = "single" +# +# ## Server ID exclude filter +# ## Allows the user to exclude or include specific server IDs received by +# ## speedtest-go. Values in the exclude option will be skipped over. Values in +# ## the include option are the only options that will be picked from. +# ## +# ## See the list of servers speedtest-go will return at: +# ## https://www.speedtest.net/api/js/servers?engine=js&limit=10 +# ## +# # server_id_exclude = [] +# # server_id_include = [] + + +# # This plugin gathers interrupts data from /proc/interrupts and /proc/softirqs. +# [[inputs.interrupts]] +# ## When set to true, cpu metrics are tagged with the cpu. Otherwise cpu is +# ## stored as a field. +# ## +# ## The default is false for backwards compatibility, and will be changed to +# ## true in a future version. It is recommended to set to true on new +# ## deployments. +# # cpu_as_tag = false +# +# ## To filter which IRQs to collect, make use of tagpass / tagdrop, i.e. +# # [inputs.interrupts.tagdrop] +# # irq = [ "NET_RX", "TASKLET" ] + + +# # Read metrics from the bare metal servers via IPMI +# [[inputs.ipmi_sensor]] +# ## optionally specify the path to the ipmitool executable +# # path = "/usr/bin/ipmitool" +# ## +# ## Setting 'use_sudo' to true will make use of sudo to run ipmitool. +# ## Sudo must be configured to allow the telegraf user to run ipmitool +# ## without a password. +# # use_sudo = false +# ## +# ## optionally force session privilege level. Can be CALLBACK, USER, OPERATOR, ADMINISTRATOR +# # privilege = "ADMINISTRATOR" +# ## +# ## optionally specify one or more servers via a url matching +# ## [username[:password]@][protocol[(address)]] +# ## e.g. +# ## root:passwd@lan(127.0.0.1) +# ## +# ## if no servers are specified, local machine sensor stats will be queried +# ## +# # servers = ["USERID:PASSW0RD@lan(192.168.1.1)"] +# +# ## Recommended: use metric 'interval' that is a multiple of 'timeout' to avoid +# ## gaps or overlap in pulled data +# interval = "30s" +# +# ## Timeout for the ipmitool command to complete. Default is 20 seconds. +# timeout = "20s" +# +# ## Schema Version: (Optional, defaults to version 1) +# metric_version = 2 +# +# ## Optionally provide the hex key for the IMPI connection. +# # hex_key = "" +# +# ## If ipmitool should use a cache +# ## for me ipmitool runs about 2 to 10 times faster with cache enabled on HP G10 servers (when using ubuntu20.04) +# ## the cache file may not work well for you if some sensors come up late +# # use_cache = false +# +# ## Path to the ipmitools cache file (defaults to OS temp dir) +# ## The provided path must exist and must be writable +# # cache_path = "" + + +# # Gather packets and bytes counters from Linux ipsets +# [[inputs.ipset]] +# ## By default, we only show sets which have already matched at least 1 packet. +# ## set include_unmatched_sets = true to gather them all. +# include_unmatched_sets = false +# ## Adjust your sudo settings appropriately if using this option ("sudo ipset save") +# ## You can avoid using sudo or root, by setting appropriate privileges for +# ## the telegraf.service systemd service. +# use_sudo = false +# ## The default timeout of 1s for ipset execution can be overridden here: +# # timeout = "1s" +# + + +# # Gather packets and bytes throughput from iptables +# # This plugin ONLY supports Linux +# [[inputs.iptables]] +# ## iptables require root access on most systems. +# ## Setting 'use_sudo' to true will make use of sudo to run iptables. +# ## Users must configure sudo to allow telegraf user to run iptables with +# ## no password. +# ## iptables can be restricted to only list command "iptables -nvL". +# use_sudo = false +# ## Setting 'use_lock' to true runs iptables with the "-w" option. +# ## Adjust your sudo settings appropriately if using this option +# ## ("iptables -w 5 -nvl") +# use_lock = false +# ## Define an alternate executable, such as "ip6tables". Default is "iptables". +# # binary = "ip6tables" +# ## defines the table to monitor: +# table = "filter" +# ## defines the chains to monitor. +# ## NOTE: iptables rules without a comment will not be monitored. +# ## Read the plugin documentation for more information. +# chains = [ "INPUT" ] + + +# # Collect virtual and real server stats from Linux IPVS +# # This plugin ONLY supports Linux +# [[inputs.ipvs]] +# # no configuration + + +# # Read jobs and cluster metrics from Jenkins instances +# [[inputs.jenkins]] +# ## The Jenkins URL in the format "schema://host:port" +# url = "http://my-jenkins-instance:8080" +# # username = "admin" +# # password = "admin" +# +# ## Set response_timeout +# response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use SSL but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Optional Max Job Build Age filter +# ## Default 1 hour, ignore builds older than max_build_age +# # max_build_age = "1h" +# +# ## Optional Sub Job Depth filter +# ## Jenkins can have unlimited layer of sub jobs +# ## This config will limit the layers of pulling, default value 0 means +# ## unlimited pulling until no more sub jobs +# # max_subjob_depth = 0 +# +# ## Optional Sub Job Per Layer +# ## In workflow-multibranch-plugin, each branch will be created as a sub job. +# ## This config will limit to call only the lasted branches in each layer, +# ## empty will use default value 10 +# # max_subjob_per_layer = 10 +# +# ## Jobs to include or exclude from gathering +# ## When using both lists, job_exclude has priority. +# ## Wildcards are supported: [ "jobA/*", "jobB/subjob1/*"] +# # job_include = [ "*" ] +# # job_exclude = [ ] +# +# ## Nodes to include or exclude from gathering +# ## When using both lists, node_exclude has priority. +# # node_include = [ "*" ] +# # node_exclude = [ ] +# +# ## Worker pool for jenkins plugin only +# ## Empty this field will use default value 5 +# # max_connections = 5 +# +# ## When set to true will add node labels as a comma-seperated tag. If none, +# ## are found, then a tag with the value of 'none' is used. Finally, if a +# ## lable contains a comma it is replaced with an underscore. +# # node_labels_as_tag = false + + +# ## DEPRECATED: The "jolokia" plugin is deprecated in version 1.5.0 and will be removed in 1.30.0, use 'inputs.jolokia2' instead. +# # Read JMX metrics through Jolokia +# [[inputs.jolokia]] +# ## This is the context root used to compose the jolokia url +# ## NOTE that Jolokia requires a trailing slash at the end of the context root +# context = "/jolokia/" +# +# ## This specifies the mode used +# # mode = "proxy" +# # +# ## When in proxy mode this section is used to specify further +# ## proxy address configurations. +# ## Remember to change host address to fit your environment. +# # [inputs.jolokia.proxy] +# # host = "127.0.0.1" +# # port = "8080" +# +# ## Optional http timeouts +# ## +# ## response_header_timeout, if non-zero, specifies the amount of time to wait +# ## for a server's response headers after fully writing the request. +# # response_header_timeout = "3s" +# ## +# ## client_timeout specifies a time limit for requests made by this client. +# ## Includes connection time, any redirects, and reading the response body. +# # client_timeout = "4s" +# +# ## List of servers exposing jolokia read service +# [[inputs.jolokia.servers]] +# name = "as-server-01" +# host = "127.0.0.1" +# port = "8080" +# # username = "myuser" +# # password = "mypassword" +# +# ## List of metrics collected on above servers +# ## Each metric consists in a name, a jmx path and either +# ## a pass or drop slice attribute. +# ## This collect all heap memory usage metrics. +# [[inputs.jolokia.metrics]] +# name = "heap_memory_usage" +# mbean = "java.lang:type=Memory" +# attribute = "HeapMemoryUsage" +# +# ## This collect thread counts metrics. +# [[inputs.jolokia.metrics]] +# name = "thread_count" +# mbean = "java.lang:type=Threading" +# attribute = "TotalStartedThreadCount,ThreadCount,DaemonThreadCount,PeakThreadCount" +# +# ## This collect number of class loaded/unloaded counts metrics. +# [[inputs.jolokia.metrics]] +# name = "class_count" +# mbean = "java.lang:type=ClassLoading" +# attribute = "LoadedClassCount,UnloadedClassCount,TotalLoadedClassCount" + + +# # Read JMX metrics from a Jolokia REST agent endpoint +# [[inputs.jolokia2_agent]] +# # default_tag_prefix = "" +# # default_field_prefix = "" +# # default_field_separator = "." +# +# # Add agents URLs to query +# urls = ["http://localhost:8080/jolokia"] +# # username = "" +# # password = "" +# # response_timeout = "5s" +# +# ## Optional origin URL to include as a header in the request. Some endpoints +# ## may reject an empty origin. +# # origin = "" +# +# ## Optional TLS config +# # tls_ca = "/var/private/ca.pem" +# # tls_cert = "/var/private/client.pem" +# # tls_key = "/var/private/client-key.pem" +# # insecure_skip_verify = false +# +# ## Add metrics to read +# [[inputs.jolokia2_agent.metric]] +# name = "java_runtime" +# mbean = "java.lang:type=Runtime" +# paths = ["Uptime"] + + +# # Read JMX metrics from a Jolokia REST proxy endpoint +# [[inputs.jolokia2_proxy]] +# # default_tag_prefix = "" +# # default_field_prefix = "" +# # default_field_separator = "." +# +# ## Proxy agent +# url = "http://localhost:8080/jolokia" +# # username = "" +# # password = "" +# # response_timeout = "5s" +# +# ## Optional origin URL to include as a header in the request. Some endpoints +# ## may reject an empty origin. +# # origin = "" +# +# ## Optional TLS config +# # tls_ca = "/var/private/ca.pem" +# # tls_cert = "/var/private/client.pem" +# # tls_key = "/var/private/client-key.pem" +# # insecure_skip_verify = false +# +# ## Add proxy targets to query +# # default_target_username = "" +# # default_target_password = "" +# [[inputs.jolokia2_proxy.target]] +# url = "service:jmx:rmi:///jndi/rmi://targethost:9999/jmxrmi" +# # username = "" +# # password = "" +# +# ## Add metrics to read +# [[inputs.jolokia2_proxy.metric]] +# name = "java_runtime" +# mbean = "java.lang:type=Runtime" +# paths = ["Uptime"] + + +# # Read Kapacitor-formatted JSON metrics from one or more HTTP endpoints +# [[inputs.kapacitor]] +# ## Multiple URLs from which to read Kapacitor-formatted JSON +# ## Default is "http://localhost:9092/kapacitor/v1/debug/vars". +# urls = [ +# "http://localhost:9092/kapacitor/v1/debug/vars" +# ] +# +# ## Time limit for http requests +# timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Get kernel statistics from /proc/vmstat +# # This plugin ONLY supports Linux +# [[inputs.kernel_vmstat]] +# # no configuration + + +# # Read status information from one or more Kibana servers +# [[inputs.kibana]] +# ## Specify a list of one or more Kibana servers +# servers = ["http://localhost:5601"] +# +# ## Timeout for HTTP requests +# timeout = "5s" +# +# ## HTTP Basic Auth credentials +# # username = "username" +# # password = "pa$$word" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read metrics from the Kubernetes api +# [[inputs.kube_inventory]] +# ## URL for the Kubernetes API. +# ## If empty in-cluster config with POD's service account token will be used. +# # url = "" +# +# ## Namespace to use. Set to "" to use all namespaces. +# # namespace = "default" +# +# ## Use bearer token for authorization. ('bearer_token' takes priority) +# ## +# ## Ignored if url is empty and in-cluster config is used. +# ## +# ## If both of these are empty, we'll use the default serviceaccount: +# ## at: /var/run/secrets/kubernetes.io/serviceaccount/token +# ## +# ## To auto-refresh the token, please use a file with the bearer_token option. +# ## If given a string, Telegraf cannot refresh the token periodically. +# # bearer_token = "/var/run/secrets/kubernetes.io/serviceaccount/token" +# ## OR +# ## deprecated in 1.24.0; use bearer_token with a file +# # bearer_token_string = "abc_123" +# +# ## Set response_timeout (default 5 seconds) +# # response_timeout = "5s" +# +# ## Optional Resources to exclude from gathering +# ## Leave them with blank with try to gather everything available. +# ## Values can be - "daemonsets", deployments", "endpoints", "ingress", +# ## "nodes", "persistentvolumes", "persistentvolumeclaims", "pods", "services", +# ## "statefulsets" +# # resource_exclude = [ "deployments", "nodes", "statefulsets" ] +# +# ## Optional Resources to include when gathering +# ## Overrides resource_exclude if both set. +# # resource_include = [ "deployments", "nodes", "statefulsets" ] +# +# ## selectors to include and exclude as tags. Globs accepted. +# ## Note that an empty array for both will include all selectors as tags +# ## selector_exclude overrides selector_include if both set. +# # selector_include = [] +# # selector_exclude = ["*"] +# +# ## Optional TLS Config +# ## Trusted root certificates for server +# # tls_ca = "/path/to/cafile" +# ## Used for TLS client certificate authentication +# # tls_cert = "/path/to/certfile" +# ## Used for TLS client certificate authentication +# # tls_key = "/path/to/keyfile" +# ## Send the specified TLS server name via SNI +# # tls_server_name = "kubernetes.example.com" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Uncomment to remove deprecated metrics. +# # fielddrop = ["terminated_reason"] + + +# # Read metrics from the kubernetes kubelet api +# [[inputs.kubernetes]] +# ## URL for the kubelet, if empty read metrics from all nodes in the cluster +# url = "http://127.0.0.1:10255" +# +# ## Use bearer token for authorization. ('bearer_token' takes priority) +# ## If both of these are empty, we'll use the default serviceaccount: +# ## at: /var/run/secrets/kubernetes.io/serviceaccount/token +# ## +# ## To re-read the token at each interval, please use a file with the +# ## bearer_token option. If given a string, Telegraf will always use that +# ## token. +# # bearer_token = "/var/run/secrets/kubernetes.io/serviceaccount/token" +# ## OR +# # bearer_token_string = "abc_123" +# +# ## Pod labels to be added as tags. An empty array for both include and +# ## exclude will include all labels. +# # label_include = [] +# # label_exclude = ["*"] +# +# ## Set response_timeout (default 5 seconds) +# # response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = /path/to/cafile +# # tls_cert = /path/to/certfile +# # tls_key = /path/to/keyfile +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read metrics from a LeoFS Server via SNMP +# [[inputs.leofs]] +# ## An array of URLs of the form: +# ## host [ ":" port] +# servers = ["127.0.0.1:4010"] + + +# # The libvirt plugin collects statistics from virtualized guests using virtualization libvirt API. +# [[inputs.libvirt]] +# ## Domain names from which libvirt gather statistics. +# ## By default (empty or missing array) the plugin gather statistics from each domain registered in the host system. +# # domains = [] +# +# ## Libvirt connection URI with hypervisor. +# ## The plugin supports multiple transport protocols and approaches which are configurable via the URI. +# ## The general URI form: driver[+transport]://[username@][hostname][:port]/[path][?extraparameters] +# ## Supported transport protocols: ssh, tcp, tls, unix +# ## URI examples for each type of transport protocol: +# ## 1. SSH: qemu+ssh:///system?keyfile=/&known_hosts=/ +# ## 2. TCP: qemu+tcp:///system +# ## 3. TLS: qemu+tls:///system?pkipath=/certs_dir/ +# ## 4. UNIX: qemu+unix:///system?socket=/ +# ## Default URI is qemu:///system +# # libvirt_uri = "qemu:///system" +# +# ## Statistics groups for which libvirt plugin will gather statistics. +# ## Supported statistics groups: state, cpu_total, balloon, vcpu, interface, block, perf, iothread, memory, dirtyrate +# ## Empty array means no metrics for statistics groups will be exposed by the plugin. +# ## By default the plugin will gather all available statistics. +# # statistics_groups = ["state", "cpu_total", "balloon", "vcpu", "interface", "block", "perf", "iothread", "memory", "dirtyrate"] +# +# ## A list containing additional statistics to be exposed by libvirt plugin. +# ## Supported additional statistics: vcpu_mapping +# ## By default (empty or missing array) the plugin will not collect additional statistics. +# # additional_statistics = [] +# + + +# # Provides Linux CPU metrics +# # This plugin ONLY supports Linux +# [[inputs.linux_cpu]] +# ## Path for sysfs filesystem. +# ## See https://www.kernel.org/doc/Documentation/filesystems/sysfs.txt +# ## Defaults: +# # host_sys = "/sys" +# +# ## CPU metrics collected by the plugin. +# ## Supported options: +# ## "cpufreq", "thermal" +# ## Defaults: +# # metrics = ["cpufreq"] + + +# # Provides Linux sysctl fs metrics +# [[inputs.linux_sysctl_fs]] +# # no configuration + + +# # Read metrics exposed by Logstash +# [[inputs.logstash]] +# ## The URL of the exposed Logstash API endpoint. +# url = "http://127.0.0.1:9600" +# +# ## Use Logstash 5 single pipeline API, set to true when monitoring +# ## Logstash 5. +# # single_pipeline = false +# +# ## Enable optional collection components. Can contain +# ## "pipelines", "process", and "jvm". +# # collect = ["pipelines", "process", "jvm"] +# +# ## Timeout for HTTP requests. +# # timeout = "5s" +# +# ## Optional HTTP Basic Auth credentials. +# # username = "username" +# # password = "pa$$word" +# +# ## Optional TLS Config. +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## Use TLS but skip chain & host verification. +# # insecure_skip_verify = false +# +# ## Optional HTTP headers. +# # [inputs.logstash.headers] +# # "X-Special-Header" = "Special-Value" + + +# # Read metrics from local Lustre service on OST, MDS +# # This plugin ONLY supports Linux +# [[inputs.lustre2]] +# ## An array of /proc globs to search for Lustre stats +# ## If not specified, the default will work on Lustre 2.5.x +# ## +# # ost_procfiles = [ +# # "/proc/fs/lustre/obdfilter/*/stats", +# # "/proc/fs/lustre/osd-ldiskfs/*/stats", +# # "/proc/fs/lustre/obdfilter/*/job_stats", +# # "/proc/fs/lustre/obdfilter/*/exports/*/stats", +# # ] +# # mds_procfiles = [ +# # "/proc/fs/lustre/mdt/*/md_stats", +# # "/proc/fs/lustre/mdt/*/job_stats", +# # "/proc/fs/lustre/mdt/*/exports/*/stats", +# # ] + + +# # Read metrics about LVM physical volumes, volume groups, logical volumes. +# [[inputs.lvm]] +# ## Use sudo to run LVM commands +# use_sudo = false +# +# ## The default location of the pvs binary can be overridden with: +# #pvs_binary = "/usr/sbin/pvs" +# +# ## The default location of the vgs binary can be overridden with: +# #vgs_binary = "/usr/sbin/vgs" +# +# ## The default location of the lvs binary can be overridden with: +# #lvs_binary = "/usr/sbin/lvs" + + +# # Gathers metrics from the /3.0/reports MailChimp API +# [[inputs.mailchimp]] +# ## MailChimp API key +# ## get from https://admin.mailchimp.com/account/api/ +# api_key = "" # required +# +# ## Reports for campaigns sent more than days_old ago will not be collected. +# ## 0 means collect all and is the default value. +# days_old = 0 +# +# ## Campaign ID to get, if empty gets all campaigns, this option overrides days_old +# # campaign_id = "" + + +# # Retrieves information on a specific host in a MarkLogic Cluster +# [[inputs.marklogic]] +# ## Base URL of the MarkLogic HTTP Server. +# url = "http://localhost:8002" +# +# ## List of specific hostnames to retrieve information. At least (1) required. +# # hosts = ["hostname1", "hostname2"] +# +# ## Using HTTP Basic Authentication. Management API requires 'manage-user' role privileges +# # username = "myuser" +# # password = "mypassword" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read metrics from one or many mcrouter servers. +# [[inputs.mcrouter]] +# ## An array of address to gather stats about. Specify an ip or hostname +# ## with port. ie tcp://localhost:11211, tcp://10.0.0.1:11211, etc. +# servers = ["tcp://localhost:11211", "unix:///var/run/mcrouter.sock"] +# +# ## Timeout for metric collections from all servers. Minimum timeout is "1s". +# # timeout = "5s" + + +# # Get kernel statistics from /proc/mdstat +# # This plugin ONLY supports Linux +# [[inputs.mdstat]] +# ## Sets file path +# ## If not specified, then default is /proc/mdstat +# # file_name = "/proc/mdstat" + + +# # Read metrics from one or many memcached servers. +# [[inputs.memcached]] +# # An array of address to gather stats about. Specify an ip on hostname +# # with optional port. ie localhost, 10.0.0.1:11211, etc. +# servers = ["localhost:11211"] +# # An array of unix memcached sockets to gather stats about. +# # unix_sockets = ["/var/run/memcached.sock"] +# +# ## Optional TLS Config +# # enable_tls = false +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## If false, skip chain & host verification +# # insecure_skip_verify = true + + +# # Telegraf plugin for gathering metrics from N Mesos masters +# [[inputs.mesos]] +# ## Timeout, in ms. +# timeout = 100 +# +# ## A list of Mesos masters. +# masters = ["http://localhost:5050"] +# +# ## Master metrics groups to be collected, by default, all enabled. +# master_collections = [ +# "resources", +# "master", +# "system", +# "agents", +# "frameworks", +# "framework_offers", +# "tasks", +# "messages", +# "evqueue", +# "registrar", +# "allocator", +# ] +# +# ## A list of Mesos slaves, default is [] +# # slaves = [] +# +# ## Slave metrics groups to be collected, by default, all enabled. +# # slave_collections = [ +# # "resources", +# # "agent", +# # "system", +# # "executors", +# # "tasks", +# # "messages", +# # ] +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Collects scores from a Minecraft server's scoreboard using the RCON protocol +# [[inputs.minecraft]] +# ## Address of the Minecraft server. +# # server = "localhost" +# +# ## Server RCON Port. +# # port = "25575" +# +# ## Server RCON Password. +# password = "" +# +# ## Uncomment to remove deprecated metric components. +# # tagdrop = ["server"] + + +# # Generate metrics for test and demonstration purposes +# [[inputs.mock]] +# ## Set the metric name to use for reporting +# metric_name = "mock" +# +# ## Optional string key-value pairs of tags to add to all metrics +# # [inputs.mock.tags] +# # "key" = "value" +# +# ## One or more mock data fields *must* be defined. +# # [[inputs.mock.constant]] +# # name = "constant" +# # value = value_of_any_type +# # [[inputs.mock.random]] +# # name = "rand" +# # min = 1.0 +# # max = 6.0 +# # [[inputs.mock.sine_wave]] +# # name = "wave" +# # amplitude = 1.0 +# # period = 0.5 +# # [[inputs.mock.step]] +# # name = "plus_one" +# # start = 0.0 +# # step = 1.0 +# # [[inputs.mock.stock]] +# # name = "abc" +# # price = 50.00 +# # volatility = 0.2 + + +# # Retrieve data from MODBUS slave devices +# [[inputs.modbus]] +# ## Connection Configuration +# ## +# ## The plugin supports connections to PLCs via MODBUS/TCP, RTU over TCP, ASCII over TCP or +# ## via serial line communication in binary (RTU) or readable (ASCII) encoding +# ## +# ## Device name +# name = "Device" +# +# ## Slave ID - addresses a MODBUS device on the bus +# ## Range: 0 - 255 [0 = broadcast; 248 - 255 = reserved] +# slave_id = 1 +# +# ## Timeout for each request +# timeout = "1s" +# +# ## Maximum number of retries and the time to wait between retries +# ## when a slave-device is busy. +# # busy_retries = 0 +# # busy_retries_wait = "100ms" +# +# # TCP - connect via Modbus/TCP +# controller = "tcp://localhost:502" +# +# ## Serial (RS485; RS232) +# ## For RS485 specific setting check the end of the configuration. +# ## For unix-like operating systems use: +# # controller = "file:///dev/ttyUSB0" +# ## For Windows operating systems use: +# # controller = "COM1" +# # baud_rate = 9600 +# # data_bits = 8 +# # parity = "N" +# # stop_bits = 1 +# +# ## Transmission mode for Modbus packets depending on the controller type. +# ## For Modbus over TCP you can choose between "TCP" , "RTUoverTCP" and +# ## "ASCIIoverTCP". +# ## For Serial controllers you can choose between "RTU" and "ASCII". +# ## By default this is set to "auto" selecting "TCP" for ModbusTCP connections +# ## and "RTU" for serial connections. +# # transmission_mode = "auto" +# +# ## Trace the connection to the modbus device as debug messages +# ## Note: You have to enable telegraf's debug mode to see those messages! +# # debug_connection = false +# +# ## Define the configuration schema +# ## |---register -- define fields per register type in the original style (only supports one slave ID) +# ## |---request -- define fields on a requests base +# ## |---metric -- define fields on a metric base +# configuration_type = "register" +# ## --- "register" configuration style --- +# +# ## Measurements +# ## +# +# ## Digital Variables, Discrete Inputs and Coils +# ## measurement - the (optional) measurement name, defaults to "modbus" +# ## name - the variable name +# ## data_type - the (optional) output type, can be BOOL or UINT16 (default) +# ## address - variable address +# +# discrete_inputs = [ +# { name = "start", address = [0]}, +# { name = "stop", address = [1]}, +# { name = "reset", address = [2]}, +# { name = "emergency_stop", address = [3]}, +# ] +# coils = [ +# { name = "motor1_run", address = [0]}, +# { name = "motor1_jog", address = [1]}, +# { name = "motor1_stop", address = [2]}, +# ] +# +# ## Analog Variables, Input Registers and Holding Registers +# ## measurement - the (optional) measurement name, defaults to "modbus" +# ## name - the variable name +# ## byte_order - the ordering of bytes +# ## |---AB, ABCD - Big Endian +# ## |---BA, DCBA - Little Endian +# ## |---BADC - Mid-Big Endian +# ## |---CDAB - Mid-Little Endian +# ## data_type - INT8L, INT8H, UINT8L, UINT8H (low and high byte variants) +# ## INT16, UINT16, INT32, UINT32, INT64, UINT64, +# ## FLOAT16-IEEE, FLOAT32-IEEE, FLOAT64-IEEE (IEEE 754 binary representation) +# ## FIXED, UFIXED (fixed-point representation on input) +# ## FLOAT32 is a deprecated alias for UFIXED for historic reasons, should be avoided +# ## scale - the final numeric variable representation +# ## address - variable address +# +# holding_registers = [ +# { name = "power_factor", byte_order = "AB", data_type = "FIXED", scale=0.01, address = [8]}, +# { name = "voltage", byte_order = "AB", data_type = "FIXED", scale=0.1, address = [0]}, +# { name = "energy", byte_order = "ABCD", data_type = "FIXED", scale=0.001, address = [5,6]}, +# { name = "current", byte_order = "ABCD", data_type = "FIXED", scale=0.001, address = [1,2]}, +# { name = "frequency", byte_order = "AB", data_type = "UFIXED", scale=0.1, address = [7]}, +# { name = "power", byte_order = "ABCD", data_type = "UFIXED", scale=0.1, address = [3,4]}, +# ] +# input_registers = [ +# { name = "tank_level", byte_order = "AB", data_type = "INT16", scale=1.0, address = [0]}, +# { name = "tank_ph", byte_order = "AB", data_type = "INT16", scale=1.0, address = [1]}, +# { name = "pump1_speed", byte_order = "ABCD", data_type = "INT32", scale=1.0, address = [3,4]}, +# ] +# +# ## --- "request" configuration style --- +# +# ## Per request definition +# ## +# +# ## Define a request sent to the device +# ## Multiple of those requests can be defined. Data will be collated into metrics at the end of data collection. +# [[inputs.modbus.request]] +# ## ID of the modbus slave device to query. +# ## If you need to query multiple slave-devices, create several "request" definitions. +# slave_id = 1 +# +# ## Byte order of the data. +# ## |---ABCD -- Big Endian (Motorola) +# ## |---DCBA -- Little Endian (Intel) +# ## |---BADC -- Big Endian with byte swap +# ## |---CDAB -- Little Endian with byte swap +# byte_order = "ABCD" +# +# ## Type of the register for the request +# ## Can be "coil", "discrete", "holding" or "input" +# register = "coil" +# +# ## Name of the measurement. +# ## Can be overriden by the individual field definitions. Defaults to "modbus" +# # measurement = "modbus" +# +# ## Request optimization algorithm. +# ## |---none -- Do not perform any optimization and use the given layout(default) +# ## |---shrink -- Shrink requests to actually requested fields +# ## | by stripping leading and trailing omits +# ## |---rearrange -- Rearrange request boundaries within consecutive address ranges +# ## | to reduce the number of requested registers by keeping +# ## | the number of requests. +# ## |---max_insert -- Rearrange request keeping the number of extra fields below the value +# ## provided in "optimization_max_register_fill". It is not necessary to define 'omitted' +# ## fields as the optimisation will add such field only where needed. +# # optimization = "none" +# +# ## Maximum number register the optimizer is allowed to insert between two fields to +# ## save requests. +# ## This option is only used for the 'max_insert' optimization strategy. +# ## NOTE: All omitted fields are ignored, so this option denotes the effective hole +# ## size to fill. +# # optimization_max_register_fill = 50 +# +# ## Field definitions +# ## Analog Variables, Input Registers and Holding Registers +# ## address - address of the register to query. For coil and discrete inputs this is the bit address. +# ## name *1 - field name +# ## type *1,2 - type of the modbus field, can be +# ## INT8L, INT8H, UINT8L, UINT8H (low and high byte variants) +# ## INT16, UINT16, INT32, UINT32, INT64, UINT64 and +# ## FLOAT16, FLOAT32, FLOAT64 (IEEE 754 binary representation) +# ## scale *1,2 - (optional) factor to scale the variable with +# ## output *1,3 - (optional) type of resulting field, can be INT64, UINT64 or FLOAT64. Defaults to FLOAT64 if +# ## "scale" is provided and to the input "type" class otherwise (i.e. INT* -> INT64, etc). +# ## measurement *1 - (optional) measurement name, defaults to the setting of the request +# ## omit - (optional) omit this field. Useful to leave out single values when querying many registers +# ## with a single request. Defaults to "false". +# ## +# ## *1: These fields are ignored if field is omitted ("omit"=true) +# ## *2: These fields are ignored for both "coil" and "discrete"-input type of registers. +# ## *3: This field can only be "UINT16" or "BOOL" if specified for both "coil" +# ## and "discrete"-input type of registers. By default the fields are +# ## output as zero or one in UINT16 format unless "BOOL" is used. +# +# ## Coil / discrete input example +# fields = [ +# { address=0, name="motor1_run"}, +# { address=1, name="jog", measurement="motor"}, +# { address=2, name="motor1_stop", omit=true}, +# { address=3, name="motor1_overheating", output="BOOL"}, +# ] +# +# [inputs.modbus.request.tags] +# machine = "impresser" +# location = "main building" +# +# [[inputs.modbus.request]] +# ## Holding example +# ## All of those examples will result in FLOAT64 field outputs +# slave_id = 1 +# byte_order = "DCBA" +# register = "holding" +# fields = [ +# { address=0, name="voltage", type="INT16", scale=0.1 }, +# { address=1, name="current", type="INT32", scale=0.001 }, +# { address=3, name="power", type="UINT32", omit=true }, +# { address=5, name="energy", type="FLOAT32", scale=0.001, measurement="W" }, +# { address=7, name="frequency", type="UINT32", scale=0.1 }, +# { address=8, name="power_factor", type="INT64", scale=0.01 }, +# ] +# +# [inputs.modbus.request.tags] +# machine = "impresser" +# location = "main building" +# +# [[inputs.modbus.request]] +# ## Input example with type conversions +# slave_id = 1 +# byte_order = "ABCD" +# register = "input" +# fields = [ +# { address=0, name="rpm", type="INT16" }, # will result in INT64 field +# { address=1, name="temperature", type="INT16", scale=0.1 }, # will result in FLOAT64 field +# { address=2, name="force", type="INT32", output="FLOAT64" }, # will result in FLOAT64 field +# { address=4, name="hours", type="UINT32" }, # will result in UIN64 field +# ] +# +# [inputs.modbus.request.tags] +# machine = "impresser" +# location = "main building" +# +# ## --- "metric" configuration style --- +# +# ## Per metric definition +# ## +# +# ## Request optimization algorithm across metrics +# ## |---none -- Do not perform any optimization and just group requests +# ## | within metrics (default) +# ## |---max_insert -- Collate registers across all defined metrics and fill in +# ## holes to optimize the number of requests. +# # optimization = "none" +# +# ## Maximum number of registers the optimizer is allowed to insert between +# ## non-consecutive registers to save requests. +# ## This option is only used for the 'max_insert' optimization strategy and +# ## effectively denotes the hole size between registers to fill. +# # optimization_max_register_fill = 50 +# +# ## Define a metric produced by the requests to the device +# ## Multiple of those metrics can be defined. The referenced registers will +# ## be collated into requests send to the device +# [[inputs.modbus.metric]] +# ## ID of the modbus slave device to query +# ## If you need to query multiple slave-devices, create several "metric" definitions. +# slave_id = 1 +# +# ## Byte order of the data +# ## |---ABCD -- Big Endian (Motorola) +# ## |---DCBA -- Little Endian (Intel) +# ## |---BADC -- Big Endian with byte swap +# ## |---CDAB -- Little Endian with byte swap +# # byte_order = "ABCD" +# +# ## Name of the measurement +# # measurement = "modbus" +# +# ## Field definitions +# ## register - type of the modbus register, can be "coil", "discrete", +# ## "holding" or "input". Defaults to "holding". +# ## address - address of the register to query. For coil and discrete inputs this is the bit address. +# ## name - field name +# ## type *1 - type of the modbus field, can be +# ## INT8L, INT8H, UINT8L, UINT8H (low and high byte variants) +# ## INT16, UINT16, INT32, UINT32, INT64, UINT64 and +# ## FLOAT16, FLOAT32, FLOAT64 (IEEE 754 binary representation) +# ## scale *1 - (optional) factor to scale the variable with +# ## output *2 - (optional) type of resulting field, can be INT64, UINT64 or FLOAT64. Defaults to FLOAT64 if +# ## "scale" is provided and to the input "type" class otherwise (i.e. INT* -> INT64, etc). +# ## +# ## *1: These fields are ignored for both "coil" and "discrete"-input type of registers. +# ## *2: This field can only be "UINT16" or "BOOL" if specified for both "coil" +# ## and "discrete"-input type of registers. By default the fields are +# ## output as zero or one in UINT16 format unless "BOOL" is used. +# fields = [ +# { register="coil", address=0, name="door_open"}, +# { register="coil", address=1, name="status_ok"}, +# { register="holding", address=0, name="voltage", type="INT16" }, +# { address=1, name="current", type="INT32", scale=0.001 }, +# { address=5, name="energy", type="FLOAT32", scale=0.001,}, +# { address=7, name="frequency", type="UINT32", scale=0.1 }, +# { address=8, name="power_factor", type="INT64", scale=0.01 }, +# ] +# +# ## Tags assigned to the metric +# # [inputs.modbus.metric.tags] +# # machine = "impresser" +# # location = "main building" +# +# +# ## RS485 specific settings. Only take effect for serial controllers. +# ## Note: This has to be at the end of the modbus configuration due to +# ## TOML constraints. +# # [inputs.modbus.rs485] +# ## Delay RTS prior to sending +# # delay_rts_before_send = "0ms" +# ## Delay RTS after to sending +# # delay_rts_after_send = "0ms" +# ## Pull RTS line to high during sending +# # rts_high_during_send = false +# ## Pull RTS line to high after sending +# # rts_high_after_send = false +# ## Enabling receiving (Rx) during transmission (Tx) +# # rx_during_tx = false +# +# ## Enable workarounds required by some devices to work correctly +# # [inputs.modbus.workarounds] +# ## Pause after connect delays the first request by the specified time. +# ## This might be necessary for (slow) devices. +# # pause_after_connect = "0ms" +# +# ## Pause between read requests sent to the device. +# ## This might be necessary for (slow) serial devices. +# # pause_between_requests = "0ms" +# +# ## Close the connection after every gather cycle. +# ## Usually the plugin closes the connection after a certain idle-timeout, +# ## however, if you query a device with limited simultaneous connectivity +# ## (e.g. serial devices) from multiple instances you might want to only +# ## stay connected during gather and disconnect afterwards. +# # close_connection_after_gather = false +# +# ## Force the plugin to read each field in a separate request. +# ## This might be necessary for devices not conforming to the spec, +# ## see https://github.com/influxdata/telegraf/issues/12071. +# # one_request_per_field = false +# +# ## Enforce the starting address to be zero for the first request on +# ## coil registers. This is necessary for some devices see +# ## https://github.com/influxdata/telegraf/issues/8905 + + +# # Read metrics and status information about processes managed by Monit +# [[inputs.monit]] +# ## Monit HTTPD address +# address = "http://127.0.0.1:2812" +# +# ## Username and Password for Monit +# # username = "" +# # password = "" +# +# ## Amount of time allowed to complete the HTTP request +# # timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Aggregates the contents of multiple files into a single point +# [[inputs.multifile]] +# ## Base directory where telegraf will look for files. +# ## Omit this option to use absolute paths. +# base_dir = "/sys/bus/i2c/devices/1-0076/iio:device0" +# +# ## If true discard all data when a single file can't be read. +# ## Else, Telegraf omits the field generated from this file. +# # fail_early = true +# +# ## Files to parse each interval. +# [[inputs.multifile.file]] +# file = "in_pressure_input" +# dest = "pressure" +# conversion = "float" +# [[inputs.multifile.file]] +# file = "in_temp_input" +# dest = "temperature" +# conversion = "float(3)" +# [[inputs.multifile.file]] +# file = "in_humidityrelative_input" +# dest = "humidityrelative" +# conversion = "float(3)" + + +# # Read metrics from one or many mysql servers +# [[inputs.mysql]] +# ## specify servers via a url matching: +# ## [username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify|custom]] +# ## see https://github.com/go-sql-driver/mysql#dsn-data-source-name +# ## e.g. +# ## servers = ["user:passwd@tcp(127.0.0.1:3306)/?tls=false"] +# ## servers = ["user@tcp(127.0.0.1:3306)/?tls=false"] +# # +# ## If no servers are specified, then localhost is used as the host. +# servers = ["tcp(127.0.0.1:3306)/"] +# +# ## Selects the metric output format. +# ## +# ## This option exists to maintain backwards compatibility, if you have +# ## existing metrics do not set or change this value until you are ready to +# ## migrate to the new format. +# ## +# ## If you do not have existing metrics from this plugin set to the latest +# ## version. +# ## +# ## Telegraf >=1.6: metric_version = 2 +# ## <1.6: metric_version = 1 (or unset) +# metric_version = 2 +# +# ## if the list is empty, then metrics are gathered from all database tables +# # table_schema_databases = [] +# +# ## gather metrics from INFORMATION_SCHEMA.TABLES for databases provided +# ## in the list above +# # gather_table_schema = false +# +# ## gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST +# # gather_process_list = false +# +# ## gather user statistics from INFORMATION_SCHEMA.USER_STATISTICS +# # gather_user_statistics = false +# +# ## gather auto_increment columns and max values from information schema +# # gather_info_schema_auto_inc = false +# +# ## gather metrics from INFORMATION_SCHEMA.INNODB_METRICS +# # gather_innodb_metrics = false +# +# ## gather metrics from all channels from SHOW SLAVE STATUS command output +# # gather_all_slave_channels = false +# +# ## gather metrics from SHOW SLAVE STATUS command output +# # gather_slave_status = false +# +# ## use SHOW ALL SLAVES STATUS command output for MariaDB +# # mariadb_dialect = false +# +# ## gather metrics from SHOW BINARY LOGS command output +# # gather_binary_logs = false +# +# ## gather metrics from SHOW GLOBAL VARIABLES command output +# # gather_global_variables = true +# +# ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_TABLE +# # gather_table_io_waits = false +# +# ## gather metrics from PERFORMANCE_SCHEMA.TABLE_LOCK_WAITS +# # gather_table_lock_waits = false +# +# ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_INDEX_USAGE +# # gather_index_io_waits = false +# +# ## gather metrics from PERFORMANCE_SCHEMA.EVENT_WAITS +# # gather_event_waits = false +# +# ## gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME +# # gather_file_events_stats = false +# +# ## gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST +# # gather_perf_events_statements = false +# # +# ## gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_ACCOUNT_BY_EVENT_NAME +# # gather_perf_sum_per_acc_per_event = false +# # +# ## list of events to be gathered for gather_perf_sum_per_acc_per_event +# ## in case of empty list all events will be gathered +# # perf_summary_events = [] +# +# ## the limits for metrics form perf_events_statements +# # perf_events_statements_digest_text_limit = 120 +# # perf_events_statements_limit = 250 +# # perf_events_statements_time_limit = 86400 +# +# ## Some queries we may want to run less often (such as SHOW GLOBAL VARIABLES) +# ## example: interval_slow = "30m" +# # interval_slow = "" +# +# ## Optional TLS Config (used if tls=custom parameter specified in server uri) +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Provides metrics about the state of a NATS server +# # This plugin does NOT support FreeBSD +# [[inputs.nats]] +# ## The address of the monitoring endpoint of the NATS server +# server = "http://localhost:8222" +# +# ## Maximum time to receive response +# # response_timeout = "5s" + + +# # Neptune Apex data collector +# [[inputs.neptune_apex]] +# ## The Neptune Apex plugin reads the publicly available status.xml data from a local Apex. +# ## Measurements will be logged under "apex". +# +# ## The base URL of the local Apex(es). If you specify more than one server, they will +# ## be differentiated by the "source" tag. +# servers = [ +# "http://apex.local", +# ] +# +# ## The response_timeout specifies how long to wait for a reply from the Apex. +# #response_timeout = "5s" +# + + +# # Gather metrics about network interfaces +# [[inputs.net]] +# ## By default, telegraf gathers stats from any up interface (excluding loopback) +# ## Setting interfaces will tell it to gather these explicit interfaces, +# ## regardless of status. When specifying an interface, glob-style +# ## patterns are also supported. +# ## +# # interfaces = ["eth*", "enp0s[0-1]", "lo"] +# ## +# ## On linux systems telegraf also collects protocol stats. +# ## Setting ignore_protocol_stats to true will skip reporting of protocol metrics. +# ## +# ## DEPRECATION NOTICE: A value of 'false' is deprecated and discouraged! +# ## Please set this to `true` and use the 'inputs.nstat' +# ## plugin instead. +# # ignore_protocol_stats = false + + +# # Collect response time of a TCP or UDP connection +# [[inputs.net_response]] +# ## Protocol, must be "tcp" or "udp" +# ## NOTE: because the "udp" protocol does not respond to requests, it requires +# ## a send/expect string pair (see below). +# protocol = "tcp" +# ## Server address (default localhost) +# address = "localhost:80" +# +# ## Set timeout +# # timeout = "1s" +# +# ## Set read timeout (only used if expecting a response) +# # read_timeout = "1s" +# +# ## The following options are required for UDP checks. For TCP, they are +# ## optional. The plugin will send the given string to the server and then +# ## expect to receive the given 'expect' string back. +# ## string sent to the server +# # send = "ssh" +# ## expected string in answer +# # expect = "ssh" +# +# ## Uncomment to remove deprecated fields; recommended for new deploys +# # fielddrop = ["result_type", "string_found"] + + +# # Read TCP metrics such as established, time wait and sockets counts. +# [[inputs.netstat]] +# # no configuration + + +# # Read per-mount NFS client metrics from /proc/self/mountstats +# [[inputs.nfsclient]] +# ## Read more low-level metrics (optional, defaults to false) +# # fullstat = false +# +# ## List of mounts to explictly include or exclude (optional) +# ## The pattern (Go regexp) is matched against the mount point (not the +# ## device being mounted). If include_mounts is set, all mounts are ignored +# ## unless present in the list. If a mount is listed in both include_mounts +# ## and exclude_mounts, it is excluded. Go regexp patterns can be used. +# # include_mounts = [] +# # exclude_mounts = [] +# +# ## List of operations to include or exclude from collecting. This applies +# ## only when fullstat=true. Symantics are similar to {include,exclude}_mounts: +# ## the default is to collect everything; when include_operations is set, only +# ## those OPs are collected; when exclude_operations is set, all are collected +# ## except those listed. If include and exclude are set, the OP is excluded. +# ## See /proc/self/mountstats for a list of valid operations; note that +# ## NFSv3 and NFSv4 have different lists. While it is not possible to +# ## have different include/exclude lists for NFSv3/4, unused elements +# ## in the list should be okay. It is possible to have different lists +# ## for different mountpoints: use mulitple [[input.nfsclient]] stanzas, +# ## with their own lists. See "include_mounts" above, and be careful of +# ## duplicate metrics. +# # include_operations = [] +# # exclude_operations = [] + + +# # Read Nginx's basic status information (ngx_http_stub_status_module) +# [[inputs.nginx]] +# ## An array of Nginx stub_status URI to gather stats. +# urls = ["http://localhost/server_status"] +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## HTTP response timeout (default: 5s) +# response_timeout = "5s" + + +# # Read Nginx Plus' advanced status information +# [[inputs.nginx_plus]] +# ## An array of Nginx status URIs to gather stats. +# urls = ["http://localhost/status"] +# +# # HTTP response timeout (default: 5s) +# response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read Nginx Plus API advanced status information +# [[inputs.nginx_plus_api]] +# ## An array of Nginx API URIs to gather stats. +# urls = ["http://localhost/api"] +# # Nginx API version, default: 3 +# # api_version = 3 +# +# # HTTP response timeout (default: 5s) +# response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read Nginx virtual host traffic status module information (nginx-module-sts) +# [[inputs.nginx_sts]] +# ## An array of ngx_http_status_module or status URI to gather stats. +# urls = ["http://localhost/status"] +# +# ## HTTP response timeout (default: 5s) +# response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read nginx_upstream_check module status information (https://github.com/yaoweibin/nginx_upstream_check_module) +# [[inputs.nginx_upstream_check]] +# ## An URL where Nginx Upstream check module is enabled +# ## It should be set to return a JSON formatted response +# url = "http://127.0.0.1/status?format=json" +# +# ## HTTP method +# # method = "GET" +# +# ## Optional HTTP headers +# # headers = {"X-Special-Header" = "Special-Value"} +# +# ## Override HTTP "Host" header +# # host_header = "check.example.com" +# +# ## Timeout for HTTP requests +# timeout = "5s" +# +# ## Optional HTTP Basic Auth credentials +# # username = "username" +# # password = "pa$$word" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read Nginx virtual host traffic status module information (nginx-module-vts) +# [[inputs.nginx_vts]] +# ## An array of ngx_http_status_module or status URI to gather stats. +# urls = ["http://localhost/status"] +# +# ## HTTP response timeout (default: 5s) +# response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read metrics from the Nomad API +# [[inputs.nomad]] +# ## URL for the Nomad agent +# # url = "http://127.0.0.1:4646" +# +# ## Set response_timeout (default 5 seconds) +# # response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = /path/to/cafile +# # tls_cert = /path/to/certfile +# # tls_key = /path/to/keyfile + + +# # A plugin to collect stats from the NSD DNS resolver +# [[inputs.nsd]] +# ## Address of server to connect to, optionally ':port'. Defaults to the +# ## address in the nsd config file. +# server = "127.0.0.1:8953" +# +# ## If running as a restricted user you can prepend sudo for additional access: +# # use_sudo = false +# +# ## The default location of the nsd-control binary can be overridden with: +# # binary = "/usr/sbin/nsd-control" +# +# ## The default location of the nsd config file can be overridden with: +# # config_file = "/etc/nsd/nsd.conf" +# +# ## The default timeout of 1s can be overridden with: +# # timeout = "1s" + + +# # Read NSQ topic and channel statistics. +# [[inputs.nsq]] +# ## An array of NSQD HTTP API endpoints +# endpoints = ["http://localhost:4151"] +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Collect kernel snmp counters and network interface statistics +# [[inputs.nstat]] +# ## file paths for proc files. If empty default paths will be used: +# ## /proc/net/netstat, /proc/net/snmp, /proc/net/snmp6 +# ## These can also be overridden with env variables, see README. +# proc_net_netstat = "/proc/net/netstat" +# proc_net_snmp = "/proc/net/snmp" +# proc_net_snmp6 = "/proc/net/snmp6" +# ## dump metrics with 0 values too +# dump_zeros = true + + +# # Get standard NTP query metrics, requires ntpq executable. +# [[inputs.ntpq]] +# ## Servers to query with ntpq. +# ## If no server is given, the local machine is queried. +# # servers = [] +# +# ## If false, set the -n ntpq flag. Can reduce metric gather time. +# ## DEPRECATED since 1.24.0: add '-n' to 'options' instead to skip DNS lookup +# # dns_lookup = true +# +# ## Options to pass to the ntpq command. +# # options = "-p" +# +# ## Output format for the 'reach' field. +# ## Available values are +# ## octal -- output as is in octal representation e.g. 377 (default) +# ## decimal -- convert value to decimal representation e.g. 371 -> 249 +# ## count -- count the number of bits in the value. This represents +# ## the number of successful reaches, e.g. 37 -> 5 +# ## ratio -- output the ratio of successful attempts e.g. 37 -> 5/8 = 0.625 +# # reach_format = "octal" + + +# # Pulls statistics from nvidia GPUs attached to the host +# [[inputs.nvidia_smi]] +# ## Optional: path to nvidia-smi binary, defaults "/usr/bin/nvidia-smi" +# ## We will first try to locate the nvidia-smi binary with the explicitly specified value (or default value), +# ## if it is not found, we will try to locate it on PATH(exec.LookPath), if it is still not found, an error will be returned +# # bin_path = "/usr/bin/nvidia-smi" +# +# ## Optional: timeout for GPU polling +# # timeout = "5s" + + +# # Retrieve data from OPCUA devices +# [[inputs.opcua]] +# ## Metric name +# # name = "opcua" +# # +# ## OPC UA Endpoint URL +# # endpoint = "opc.tcp://localhost:4840" +# # +# ## Maximum time allowed to establish a connect to the endpoint. +# # connect_timeout = "10s" +# # +# ## Maximum time allowed for a request over the established connection. +# # request_timeout = "5s" +# # +# ## Security policy, one of "None", "Basic128Rsa15", "Basic256", +# ## "Basic256Sha256", or "auto" +# # security_policy = "auto" +# # +# ## Security mode, one of "None", "Sign", "SignAndEncrypt", or "auto" +# # security_mode = "auto" +# # +# ## Path to cert.pem. Required when security mode or policy isn't "None". +# ## If cert path is not supplied, self-signed cert and key will be generated. +# # certificate = "/etc/telegraf/cert.pem" +# # +# ## Path to private key.pem. Required when security mode or policy isn't "None". +# ## If key path is not supplied, self-signed cert and key will be generated. +# # private_key = "/etc/telegraf/key.pem" +# # +# ## Authentication Method, one of "Certificate", "UserName", or "Anonymous". To +# ## authenticate using a specific ID, select 'Certificate' or 'UserName' +# # auth_method = "Anonymous" +# # +# ## Username. Required for auth_method = "UserName" +# # username = "" +# # +# ## Password. Required for auth_method = "UserName" +# # password = "" +# # +# ## Option to select the metric timestamp to use. Valid options are: +# ## "gather" -- uses the time of receiving the data in telegraf +# ## "server" -- uses the timestamp provided by the server +# ## "source" -- uses the timestamp provided by the source +# # timestamp = "gather" +# # +# ## Node ID configuration +# ## name - field name to use in the output +# ## namespace - OPC UA namespace of the node (integer value 0 thru 3) +# ## identifier_type - OPC UA ID type (s=string, i=numeric, g=guid, b=opaque) +# ## identifier - OPC UA ID (tag as shown in opcua browser) +# ## tags - extra tags to be added to the output metric (optional); deprecated in 1.25.0; use default_tags +# ## default_tags - extra tags to be added to the output metric (optional) +# ## +# ## Use either the inline notation or the bracketed notation, not both. +# # +# ## Inline notation (default_tags not supported yet) +# # nodes = [ +# # {name="", namespace="", identifier_type="", identifier="", tags=[["tag1", "value1"], ["tag2", "value2"]}, +# # {name="", namespace="", identifier_type="", identifier=""}, +# # ] +# # +# ## Bracketed notation +# # [[inputs.opcua.nodes]] +# # name = "node1" +# # namespace = "" +# # identifier_type = "" +# # identifier = "" +# # default_tags = { tag1 = "value1", tag2 = "value2" } +# # +# # [[inputs.opcua.nodes]] +# # name = "node2" +# # namespace = "" +# # identifier_type = "" +# # identifier = "" +# # +# ## Node Group +# ## Sets defaults so they aren't required in every node. +# ## Default values can be set for: +# ## * Metric name +# ## * OPC UA namespace +# ## * Identifier +# ## * Default tags +# ## +# ## Multiple node groups are allowed +# #[[inputs.opcua.group]] +# ## Group Metric name. Overrides the top level name. If unset, the +# ## top level name is used. +# # name = +# # +# ## Group default namespace. If a node in the group doesn't set its +# ## namespace, this is used. +# # namespace = +# # +# ## Group default identifier type. If a node in the group doesn't set its +# ## namespace, this is used. +# # identifier_type = +# # +# ## Default tags that are applied to every node in this group. Can be +# ## overwritten in a node by setting a different value for the tag name. +# ## example: default_tags = { tag1 = "value1" } +# # default_tags = {} +# # +# ## Node ID Configuration. Array of nodes with the same settings as above. +# ## Use either the inline notation or the bracketed notation, not both. +# # +# ## Inline notation (default_tags not supported yet) +# # nodes = [ +# # {name="node1", namespace="", identifier_type="", identifier=""}, +# # {name="node2", namespace="", identifier_type="", identifier=""}, +# #] +# # +# ## Bracketed notation +# # [[inputs.opcua.group.nodes]] +# # name = "node1" +# # namespace = "" +# # identifier_type = "" +# # identifier = "" +# # default_tags = { tag1 = "override1", tag2 = "value2" } +# # +# # [[inputs.opcua.group.nodes]] +# # name = "node2" +# # namespace = "" +# # identifier_type = "" +# # identifier = "" +# +# ## Enable workarounds required by some devices to work correctly +# # [inputs.opcua.workarounds] +# ## Set additional valid status codes, StatusOK (0x0) is always considered valid +# # additional_valid_status_codes = ["0xC0"] +# +# # [inputs.opcua.request_workarounds] +# ## Use unregistered reads instead of registered reads +# # use_unregistered_reads = false + + +# # OpenLDAP cn=Monitor plugin +# [[inputs.openldap]] +# host = "localhost" +# port = 389 +# +# # ldaps, starttls, or no encryption. default is an empty string, disabling all encryption. +# # note that port will likely need to be changed to 636 for ldaps +# # valid options: "" | "starttls" | "ldaps" +# tls = "" +# +# # skip peer certificate verification. Default is false. +# insecure_skip_verify = false +# +# # Path to PEM-encoded Root certificate to use to verify server certificate +# tls_ca = "/etc/ssl/certs.pem" +# +# # dn/password to bind with. If bind_dn is empty, an anonymous bind is performed. +# bind_dn = "" +# bind_password = "" +# +# # reverse metric names so they sort more naturally +# # Defaults to false if unset, but is set to true when generating a new config +# reverse_metric_names = true + + +# # Get standard NTP query metrics from OpenNTPD. +# [[inputs.openntpd]] +# ## Run ntpctl binary with sudo. +# # use_sudo = false +# +# ## Location of the ntpctl binary. +# # binary = "/usr/sbin/ntpctl" +# +# ## Maximum time the ntpctl binary is allowed to run. +# # timeout = "5ms" + + +# # Derive metrics from aggregating OpenSearch query results +# [[inputs.opensearch_query]] +# ## OpenSearch cluster endpoint(s). Multiple urls can be specified as part +# ## of the same cluster. Only one succesful call will be made per interval. +# urls = [ "https://node1.os.example.com:9200" ] # required. +# +# ## OpenSearch client timeout, defaults to "5s". +# # timeout = "5s" +# +# ## HTTP basic authentication details +# # username = "admin" +# # password = "admin" +# +# ## Skip TLS validation. Useful for local testing and self-signed certs. +# # insecure_skip_verify = false +# +# [[inputs.opensearch_query.aggregation]] +# ## measurement name for the results of the aggregation query +# measurement_name = "measurement" +# +# ## OpenSearch index or index pattern to search +# index = "index-*" +# +# ## The date/time field in the OpenSearch index (mandatory). +# date_field = "@timestamp" +# +# ## If the field used for the date/time field in OpenSearch is also using +# ## a custom date/time format it may be required to provide the format to +# ## correctly parse the field. +# ## +# ## If using one of the built in OpenSearch formats this is not required. +# ## https://opensearch.org/docs/2.4/opensearch/supported-field-types/date/#built-in-formats +# # date_field_custom_format = "" +# +# ## Time window to query (eg. "1m" to query documents from last minute). +# ## Normally should be set to same as collection interval +# query_period = "1m" +# +# ## Lucene query to filter results +# # filter_query = "*" +# +# ## Fields to aggregate values (must be numeric fields) +# # metric_fields = ["metric"] +# +# ## Aggregation function to use on the metric fields +# ## Must be set if 'metric_fields' is set +# ## Valid values are: avg, sum, min, max, sum +# # metric_function = "avg" +# +# ## Fields to be used as tags. Must be text, non-analyzed fields. Metric +# ## aggregations are performed per tag +# # tags = ["field.keyword", "field2.keyword"] +# +# ## Set to true to not ignore documents when the tag(s) above are missing +# # include_missing_tag = false +# +# ## String value of the tag when the tag does not exist +# ## Required when include_missing_tag is true +# # missing_tag_value = "null" + + +# # A plugin to collect stats from Opensmtpd - a validating, recursive, and caching DNS resolver +# [[inputs.opensmtpd]] +# ## If running as a restricted user you can prepend sudo for additional access: +# #use_sudo = false +# +# ## The default location of the smtpctl binary can be overridden with: +# binary = "/usr/sbin/smtpctl" +# +# # The default timeout of 1s can be overridden with: +# #timeout = "1s" + + +# # Read current weather and forecasts data from openweathermap.org +# [[inputs.openweathermap]] +# ## OpenWeatherMap API key. +# app_id = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" +# +# ## City ID's to collect weather data from. +# city_id = ["5391959"] +# +# ## Language of the description field. Can be one of "ar", "bg", +# ## "ca", "cz", "de", "el", "en", "fa", "fi", "fr", "gl", "hr", "hu", +# ## "it", "ja", "kr", "la", "lt", "mk", "nl", "pl", "pt", "ro", "ru", +# ## "se", "sk", "sl", "es", "tr", "ua", "vi", "zh_cn", "zh_tw" +# # lang = "en" +# +# ## APIs to fetch; can contain "weather" or "forecast". +# fetch = ["weather", "forecast"] +# +# ## OpenWeatherMap base URL +# # base_url = "https://api.openweathermap.org/" +# +# ## Timeout for HTTP response. +# # response_timeout = "5s" +# +# ## Preferred unit system for temperature and wind speed. Can be one of +# ## "metric", "imperial", or "standard". +# # units = "metric" +# +# ## Query interval; OpenWeatherMap weather data is updated every 10 +# ## minutes. +# interval = "10m" + + +# # P4Runtime telemetry input plugin +# [[inputs.p4runtime]] +# ## Define the endpoint of P4Runtime gRPC server to collect metrics. +# # endpoint = "127.0.0.1:9559" +# ## Set DeviceID required for Client Arbitration. +# ## https://p4.org/p4-spec/p4runtime/main/P4Runtime-Spec.html#sec-client-arbitration-and-controller-replication +# # device_id = 1 +# ## Filter counters by their names that should be observed. +# ## Example: counter_names_include=["ingressCounter", "egressCounter"] +# # counter_names_include = [] +# +# ## Optional TLS Config. +# ## Enable client-side TLS and define CA to authenticate the device. +# # enable_tls = false +# # tls_ca = "/etc/telegraf/ca.crt" +# ## Set minimal TLS version to accept by the client. +# # tls_min_version = "TLS12" +# ## Use TLS but skip chain & host verification. +# # insecure_skip_verify = true +# +# ## Define client-side TLS certificate & key to authenticate to the device. +# # tls_cert = "/etc/telegraf/client.crt" +# # tls_key = "/etc/telegraf/client.key" + + +# # Read metrics of passenger using passenger-status +# [[inputs.passenger]] +# ## Path of passenger-status. +# ## +# ## Plugin gather metric via parsing XML output of passenger-status +# ## More information about the tool: +# ## https://www.phusionpassenger.com/library/admin/apache/overall_status_report.html +# ## +# ## If no path is specified, then the plugin simply execute passenger-status +# ## hopefully it can be found in your PATH +# command = "passenger-status -v --show=xml" + + +# # Gather counters from PF +# [[inputs.pf]] +# ## PF require root access on most systems. +# ## Setting 'use_sudo' to true will make use of sudo to run pfctl. +# ## Users must configure sudo to allow telegraf user to run pfctl with no password. +# ## pfctl can be restricted to only list command "pfctl -s info". +# use_sudo = false + + +# # Read metrics of phpfpm, via HTTP status page or socket +# [[inputs.phpfpm]] +# ## An array of addresses to gather stats about. Specify an ip or hostname +# ## with optional port and path +# ## +# ## Plugin can be configured in three modes (either can be used): +# ## - http: the URL must start with http:// or https://, ie: +# ## "http://localhost/status" +# ## "http://192.168.130.1/status?full" +# ## +# ## - unixsocket: path to fpm socket, ie: +# ## "/var/run/php5-fpm.sock" +# ## or using a custom fpm status path: +# ## "/var/run/php5-fpm.sock:fpm-custom-status-path" +# ## glob patterns are also supported: +# ## "/var/run/php*.sock" +# ## +# ## - fcgi: the URL must start with fcgi:// or cgi://, and port must be present, ie: +# ## "fcgi://10.0.0.12:9000/status" +# ## "cgi://10.0.10.12:9001/status" +# ## +# ## Example of multiple gathering from local socket and remote host +# ## urls = ["http://192.168.1.20/status", "/tmp/fpm.sock"] +# urls = ["http://localhost/status"] +# +# ## Duration allowed to complete HTTP requests. +# # timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Ping given url(s) and return statistics +# [[inputs.ping]] +# ## Hosts to send ping packets to. +# urls = ["example.org"] +# +# ## Method used for sending pings, can be either "exec" or "native". When set +# ## to "exec" the systems ping command will be executed. When set to "native" +# ## the plugin will send pings directly. +# ## +# ## While the default is "exec" for backwards compatibility, new deployments +# ## are encouraged to use the "native" method for improved compatibility and +# ## performance. +# # method = "exec" +# +# ## Number of ping packets to send per interval. Corresponds to the "-c" +# ## option of the ping command. +# # count = 1 +# +# ## Time to wait between sending ping packets in seconds. Operates like the +# ## "-i" option of the ping command. +# # ping_interval = 1.0 +# +# ## If set, the time to wait for a ping response in seconds. Operates like +# ## the "-W" option of the ping command. +# # timeout = 1.0 +# +# ## If set, the total ping deadline, in seconds. Operates like the -w option +# ## of the ping command. +# # deadline = 10 +# +# ## Interface or source address to send ping from. Operates like the -I or -S +# ## option of the ping command. +# # interface = "" +# +# ## Percentiles to calculate. This only works with the native method. +# # percentiles = [50, 95, 99] +# +# ## Specify the ping executable binary. +# # binary = "ping" +# +# ## Arguments for ping command. When arguments is not empty, the command from +# ## the binary option will be used and other options (ping_interval, timeout, +# ## etc) will be ignored. +# # arguments = ["-c", "3"] +# +# ## Use only IPv6 addresses when resolving a hostname. +# # ipv6 = false +# +# ## Number of data bytes to be sent. Corresponds to the "-s" +# ## option of the ping command. This only works with the native method. +# # size = 56 + + +# # Measure postfix queue statistics +# # This plugin ONLY supports non-Windows +# [[inputs.postfix]] +# ## Postfix queue directory. If not provided, telegraf will try to use +# ## 'postconf -h queue_directory' to determine it. +# # queue_directory = "/var/spool/postfix" + + +# # Read metrics from one or many PowerDNS servers +# [[inputs.powerdns]] +# # An array of sockets to gather stats about. +# # Specify a path to unix socket. +# # +# # If no servers are specified, then '/var/run/pdns.controlsocket' is used as the path. +# unix_sockets = ["/var/run/pdns.controlsocket"] + + +# # Read metrics from one or many PowerDNS Recursor servers +# [[inputs.powerdns_recursor]] +# ## Path to the Recursor control socket. +# unix_sockets = ["/var/run/pdns_recursor.controlsocket"] +# +# ## Directory to create receive socket. This default is likely not writable, +# ## please reference the full plugin documentation for a recommended setup. +# # socket_dir = "/var/run/" +# ## Socket permissions for the receive socket. +# # socket_mode = "0666" +# +# ## The version of the PowerDNS control protocol to use. You will have to +# ## change this based on your PowerDNS Recursor version, see below: +# ## Version 1: PowerDNS <4.5.0 +# ## Version 2: PowerDNS 4.5.0 - 4.5.11 +# ## Version 3: PowerDNS >=4.6.0 +# ## By default this is set to 1. +# # control_protocol_version = 1 +# + + +# # Monitor process cpu and memory usage +# [[inputs.procstat]] +# ## PID file to monitor process +# pid_file = "/var/run/nginx.pid" +# ## executable name (ie, pgrep ) +# # exe = "nginx" +# ## pattern as argument for pgrep (ie, pgrep -f ) +# # pattern = "nginx" +# ## user as argument for pgrep (ie, pgrep -u ) +# # user = "nginx" +# ## Systemd unit name, supports globs when include_systemd_children is set to true +# # systemd_unit = "nginx.service" +# # include_systemd_children = false +# ## CGroup name or path, supports globs +# # cgroup = "systemd/system.slice/nginx.service" +# +# ## Windows service name +# # win_service = "" +# +# ## override for process_name +# ## This is optional; default is sourced from /proc//status +# # process_name = "bar" +# +# ## Field name prefix +# # prefix = "" +# +# ## When true add the full cmdline as a tag. +# # cmdline_tag = false +# +# ## Mode to use when calculating CPU usage. Can be one of 'solaris' or 'irix'. +# # mode = "irix" +# +# ## Add the PID as a tag instead of as a field. When collecting multiple +# ## processes with otherwise matching tags this setting should be enabled to +# ## ensure each process has a unique identity. +# ## +# ## Enabling this option may result in a large number of series, especially +# ## when processes have a short lifetime. +# # pid_tag = false +# +# ## Method to use when finding process IDs. Can be one of 'pgrep', or +# ## 'native'. The pgrep finder calls the pgrep executable in the PATH while +# ## the native finder performs the search directly in a manor dependent on the +# ## platform. Default is 'pgrep' +# # pid_finder = "pgrep" + + +# # Provides metrics from Proxmox nodes (Proxmox Virtual Environment > 6.2). +# [[inputs.proxmox]] +# ## API connection configuration. The API token was introduced in Proxmox v6.2. Required permissions for user and token: PVEAuditor role on /. +# base_url = "https://localhost:8006/api2/json" +# api_token = "USER@REALM!TOKENID=UUID" +# +# ## Node name, defaults to OS hostname +# ## Unless Telegraf is on the same host as Proxmox, setting this is required +# ## for Telegraf to successfully connect to Proxmox. If not on the same host, +# ## leaving this empty will often lead to a "search domain is not set" error. +# # node_name = "" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# insecure_skip_verify = false +# +# # HTTP response timeout (default: 5s) +# response_timeout = "5s" + + +# # Reads last_run_summary.yaml file and converts to measurements +# [[inputs.puppetagent]] +# ## Location of puppet last run summary file +# location = "/var/lib/puppet/state/last_run_summary.yaml" + + +# # Reads metrics from RabbitMQ servers via the Management Plugin +# [[inputs.rabbitmq]] +# ## Management Plugin url. (default: http://localhost:15672) +# # url = "http://localhost:15672" +# ## Tag added to rabbitmq_overview series; deprecated: use tags +# # name = "rmq-server-1" +# ## Credentials +# # username = "guest" +# # password = "guest" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Optional request timeouts +# ## +# ## ResponseHeaderTimeout, if non-zero, specifies the amount of time to wait +# ## for a server's response headers after fully writing the request. +# # header_timeout = "3s" +# ## +# ## client_timeout specifies a time limit for requests made by this client. +# ## Includes connection time, any redirects, and reading the response body. +# # client_timeout = "4s" +# +# ## A list of nodes to gather as the rabbitmq_node measurement. If not +# ## specified, metrics for all nodes are gathered. +# # nodes = ["rabbit@node1", "rabbit@node2"] +# +# ## A list of queues to gather as the rabbitmq_queue measurement. If not +# ## specified, metrics for all queues are gathered. +# ## Deprecated in 1.6: Use queue_name_include instead. +# # queues = ["telegraf"] +# +# ## A list of exchanges to gather as the rabbitmq_exchange measurement. If not +# ## specified, metrics for all exchanges are gathered. +# # exchanges = ["telegraf"] +# +# ## Metrics to include and exclude. Globs accepted. +# ## Note that an empty array for both will include all metrics +# ## Currently the following metrics are supported: "exchange", "federation", "node", "overview", "queue" +# # metric_include = [] +# # metric_exclude = [] +# +# ## Queues to include and exclude. Globs accepted. +# ## Note that an empty array for both will include all queues +# # queue_name_include = [] +# # queue_name_exclude = [] +# +# ## Federation upstreams to include and exclude specified as an array of glob +# ## pattern strings. Federation links can also be limited by the queue and +# ## exchange filters. +# # federation_upstream_include = [] +# # federation_upstream_exclude = [] + + +# [[inputs.radius]] +# ## An array of Server IPs and ports to gather from. If none specified, defaults to localhost. +# servers = ["127.0.0.1:1812","hostname.domain.com:1812"] +# +# ## Credentials for radius authentication. +# username = "myuser" +# password = "mypassword" +# secret = "mysecret" +# +# ## Maximum time to receive response. +# # response_timeout = "5s" + + +# # Read raindrops stats (raindrops - real-time stats for preforking Rack servers) +# [[inputs.raindrops]] +# ## An array of raindrops middleware URI to gather stats. +# urls = ["http://localhost:8080/_raindrops"] + + +# # Reads metrics from RavenDB servers via the Monitoring Endpoints +# [[inputs.ravendb]] +# ## Node URL and port that RavenDB is listening on. By default, +# ## attempts to connect securely over HTTPS, however, if the user +# ## is running a local unsecure development cluster users can use +# ## HTTP via a URL like "http://localhost:8080" +# url = "https://localhost:4433" +# +# ## RavenDB X509 client certificate setup +# # tls_cert = "/etc/telegraf/raven.crt" +# # tls_key = "/etc/telegraf/raven.key" +# +# ## Optional request timeout +# ## +# ## Timeout, specifies the amount of time to wait +# ## for a server's response headers after fully writing the request and +# ## time limit for requests made by this client +# # timeout = "5s" +# +# ## List of statistics which are collected +# # At least one is required +# # Allowed values: server, databases, indexes, collections +# # +# # stats_include = ["server", "databases", "indexes", "collections"] +# +# ## List of db where database stats are collected +# ## If empty, all db are concerned +# # db_stats_dbs = [] +# +# ## List of db where index status are collected +# ## If empty, all indexes from all db are concerned +# # index_stats_dbs = [] +# +# ## List of db where collection status are collected +# ## If empty, all collections from all db are concerned +# # collection_stats_dbs = [] + + +# # Read CPU, Fans, Powersupply and Voltage metrics of hardware server through redfish APIs +# [[inputs.redfish]] +# ## Redfish API Base URL. +# address = "https://127.0.0.1:5000" +# +# ## Credentials for the Redfish API. +# username = "root" +# password = "password123456" +# +# ## System Id to collect data for in Redfish APIs. +# computer_system_id="System.Embedded.1" +# +# ## Tag sets allow you to include redfish OData link parent data +# ## For Example. +# ## Thermal data is an OData link with parent Chassis which has a link of Location. +# ## For more info see the Redfish Resource and Schema Guide at DMTFs website. +# ## Available sets are: "chassis.location" and "chassis" +# # include_tag_sets = ["chassis.location"] +# +# ## Amount of time allowed to complete the HTTP request +# # timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read metrics from one or many redis-sentinel servers +# [[inputs.redis_sentinel]] +# ## specify servers via a url matching: +# ## [protocol://][username:password]@address[:port] +# ## e.g. +# ## tcp://localhost:26379 +# ## tcp://username:password@192.168.99.100 +# ## unix:///var/run/redis-sentinel.sock +# ## +# ## If no servers are specified, then localhost is used as the host. +# ## If no port is specified, 26379 is used +# # servers = ["tcp://localhost:26379"] +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = true + + +# # Read metrics from one or many RethinkDB servers +# [[inputs.rethinkdb]] +# ## An array of URI to gather stats about. Specify an ip or hostname +# ## with optional port add password. ie, +# ## rethinkdb://user:auth_key@10.10.3.30:28105, +# ## rethinkdb://10.10.3.33:18832, +# ## 10.0.0.1:10000, etc. +# servers = ["127.0.0.1:28015"] +# +# ## If you use actual rethinkdb of > 2.3.0 with username/password authorization, +# ## protocol have to be named "rethinkdb2" - it will use 1_0 H. +# # servers = ["rethinkdb2://username:password@127.0.0.1:28015"] +# +# ## If you use older versions of rethinkdb (<2.2) with auth_key, protocol +# ## have to be named "rethinkdb". +# # servers = ["rethinkdb://username:auth_key@127.0.0.1:28015"] + + +# # Read metrics one or many Riak servers +# [[inputs.riak]] +# # Specify a list of one or more riak http servers +# servers = ["http://localhost:8098"] + + +# # Read API usage and limits for a Salesforce organisation +# [[inputs.salesforce]] +# ## specify your credentials +# ## +# username = "your_username" +# password = "your_password" +# ## +# ## (optional) security token +# # security_token = "your_security_token" +# ## +# ## (optional) environment type (sandbox or production) +# ## default is: production +# ## +# # environment = "production" +# ## +# ## (optional) API version (default: "39.0") +# ## +# # version = "39.0" + + +# # Monitor sensors, requires lm-sensors package +# # This plugin ONLY supports Linux +# [[inputs.sensors]] +# ## Remove numbers from field names. +# ## If true, a field name like 'temp1_input' will be changed to 'temp_input'. +# # remove_numbers = true +# +# ## Timeout is the maximum amount of time that the sensors command can run. +# # timeout = "5s" + + +# # Get slab statistics from procfs +# # This plugin ONLY supports Linux +# [[inputs.slab]] +# # no configuration - please see the plugin's README for steps to configure +# # sudo properly + + +# # Read metrics from storage devices supporting S.M.A.R.T. +# [[inputs.smart]] +# ## Optionally specify the path to the smartctl executable +# # path_smartctl = "/usr/bin/smartctl" +# +# ## Optionally specify the path to the nvme-cli executable +# # path_nvme = "/usr/bin/nvme" +# +# ## Optionally specify if vendor specific attributes should be propagated for NVMe disk case +# ## ["auto-on"] - automatically find and enable additional vendor specific disk info +# ## ["vendor1", "vendor2", ...] - e.g. "Intel" enable additional Intel specific disk info +# # enable_extensions = ["auto-on"] +# +# ## On most platforms used cli utilities requires root access. +# ## Setting 'use_sudo' to true will make use of sudo to run smartctl or nvme-cli. +# ## Sudo must be configured to allow the telegraf user to run smartctl or nvme-cli +# ## without a password. +# # use_sudo = false +# +# ## Skip checking disks in this power mode. Defaults to +# ## "standby" to not wake up disks that have stopped rotating. +# ## See --nocheck in the man pages for smartctl. +# ## smartctl version 5.41 and 5.42 have faulty detection of +# ## power mode and might require changing this value to +# ## "never" depending on your disks. +# # nocheck = "standby" +# +# ## Gather all returned S.M.A.R.T. attribute metrics and the detailed +# ## information from each drive into the 'smart_attribute' measurement. +# # attributes = false +# +# ## Optionally specify devices to exclude from reporting if disks auto-discovery is performed. +# # excludes = [ "/dev/pass6" ] +# +# ## Optionally specify devices and device type, if unset +# ## a scan (smartctl --scan and smartctl --scan -d nvme) for S.M.A.R.T. devices will be done +# ## and all found will be included except for the excluded in excludes. +# # devices = [ "/dev/ada0 -d atacam", "/dev/nvme0"] +# +# ## Timeout for the cli command to complete. +# # timeout = "30s" +# +# ## Optionally call smartctl and nvme-cli with a specific concurrency policy. +# ## By default, smartctl and nvme-cli are called in separate threads (goroutines) to gather disk attributes. +# ## Some devices (e.g. disks in RAID arrays) may have access limitations that require sequential reading of +# ## SMART data - one individual array drive at the time. In such case please set this configuration option +# ## to "sequential" to get readings for all drives. +# ## valid options: concurrent, sequential +# # read_method = "concurrent" + + +# # Retrieves SNMP values from remote agents +# [[inputs.snmp]] +# ## Agent addresses to retrieve values from. +# ## format: agents = [":"] +# ## scheme: optional, either udp, udp4, udp6, tcp, tcp4, tcp6. +# ## default is udp +# ## port: optional +# ## example: agents = ["udp://127.0.0.1:161"] +# ## agents = ["tcp://127.0.0.1:161"] +# ## agents = ["udp4://v4only-snmp-agent"] +# agents = ["udp://127.0.0.1:161"] +# +# ## Timeout for each request. +# # timeout = "5s" +# +# ## SNMP version; can be 1, 2, or 3. +# # version = 2 +# +# ## Unconnected UDP socket +# ## When true, SNMP reponses are accepted from any address not just +# ## the requested address. This can be useful when gathering from +# ## redundant/failover systems. +# # unconnected_udp_socket = false +# +# ## Path to mib files +# ## Used by the gosmi translator. +# ## To add paths when translating with netsnmp, use the MIBDIRS environment variable +# # path = ["/usr/share/snmp/mibs"] +# +# ## SNMP community string. +# # community = "public" +# +# ## Agent host tag +# # agent_host_tag = "agent_host" +# +# ## Number of retries to attempt. +# # retries = 3 +# +# ## The GETBULK max-repetitions parameter. +# # max_repetitions = 10 +# +# ## SNMPv3 authentication and encryption options. +# ## +# ## Security Name. +# # sec_name = "myuser" +# ## Authentication protocol; one of "MD5", "SHA", "SHA224", "SHA256", "SHA384", "SHA512" or "". +# # auth_protocol = "MD5" +# ## Authentication password. +# # auth_password = "pass" +# ## Security Level; one of "noAuthNoPriv", "authNoPriv", or "authPriv". +# # sec_level = "authNoPriv" +# ## Context Name. +# # context_name = "" +# ## Privacy protocol used for encrypted messages; one of "DES", "AES", "AES192", "AES192C", "AES256", "AES256C", or "". +# ### Protocols "AES192", "AES192", "AES256", and "AES256C" require the underlying net-snmp tools +# ### to be compiled with --enable-blumenthal-aes (http://www.net-snmp.org/docs/INSTALL.html) +# # priv_protocol = "" +# ## Privacy password used for encrypted messages. +# # priv_password = "" +# +# ## Add fields and tables defining the variables you wish to collect. This +# ## example collects the system uptime and interface variables. Reference the +# ## full plugin documentation for configuration details. +# [[inputs.snmp.field]] +# oid = "RFC1213-MIB::sysUpTime.0" +# name = "uptime" +# +# [[inputs.snmp.field]] +# oid = "RFC1213-MIB::sysName.0" +# name = "source" +# is_tag = true +# +# [[inputs.snmp.table]] +# oid = "IF-MIB::ifTable" +# name = "interface" +# inherit_tags = ["source"] +# +# [[inputs.snmp.table.field]] +# oid = "IF-MIB::ifDescr" +# name = "ifDescr" +# is_tag = true + + +# ## DEPRECATED: The "snmp_legacy" plugin is deprecated in version 1.0.0 and will be removed in 1.30.0, use 'inputs.snmp' instead. +# [[inputs.snmp_legacy]] +# ## Use 'oids.txt' file to translate oids to names +# ## To generate 'oids.txt' you need to run: +# ## snmptranslate -m all -Tz -On | sed -e 's/"//g' > /tmp/oids.txt +# ## Or if you have an other MIB folder with custom MIBs +# ## snmptranslate -M /mycustommibfolder -Tz -On -m all | sed -e 's/"//g' > oids.txt +# snmptranslate_file = "/tmp/oids.txt" +# [[inputs.snmp.host]] +# address = "192.168.2.2:161" +# # SNMP community +# community = "public" # default public +# # SNMP version (1, 2 or 3) +# # Version 3 not supported yet +# version = 2 # default 2 +# # SNMP response timeout +# timeout = 2.0 # default 2.0 +# # SNMP request retries +# retries = 2 # default 2 +# # Which get/bulk do you want to collect for this host +# collect = ["mybulk", "sysservices", "sysdescr"] +# # Simple list of OIDs to get, in addition to "collect" +# get_oids = [] +# [[inputs.snmp.host]] +# address = "192.168.2.3:161" +# community = "public" +# version = 2 +# timeout = 2.0 +# retries = 2 +# collect = ["mybulk"] +# get_oids = [ +# "ifNumber", +# ".1.3.6.1.2.1.1.3.0", +# ] +# [[inputs.snmp.get]] +# name = "ifnumber" +# oid = "ifNumber" +# [[inputs.snmp.get]] +# name = "interface_speed" +# oid = "ifSpeed" +# instance = "0" +# [[inputs.snmp.get]] +# name = "sysuptime" +# oid = ".1.3.6.1.2.1.1.3.0" +# unit = "second" +# [[inputs.snmp.bulk]] +# name = "mybulk" +# max_repetition = 127 +# oid = ".1.3.6.1.2.1.1" +# [[inputs.snmp.bulk]] +# name = "ifoutoctets" +# max_repetition = 127 +# oid = "ifOutOctets" +# [[inputs.snmp.host]] +# address = "192.168.2.13:161" +# #address = "127.0.0.1:161" +# community = "public" +# version = 2 +# timeout = 2.0 +# retries = 2 +# #collect = ["mybulk", "sysservices", "sysdescr", "systype"] +# collect = ["sysuptime" ] +# [[inputs.snmp.host.table]] +# name = "iftable3" +# include_instances = ["enp5s0", "eth1"] +# # SNMP TABLEs +# # table without mapping neither subtables +# [[inputs.snmp.table]] +# name = "iftable1" +# oid = ".1.3.6.1.2.1.31.1.1.1" +# # table without mapping but with subtables +# [[inputs.snmp.table]] +# name = "iftable2" +# oid = ".1.3.6.1.2.1.31.1.1.1" +# sub_tables = [".1.3.6.1.2.1.2.2.1.13"] +# # table with mapping but without subtables +# [[inputs.snmp.table]] +# name = "iftable3" +# oid = ".1.3.6.1.2.1.31.1.1.1" +# # if empty. get all instances +# mapping_table = ".1.3.6.1.2.1.31.1.1.1.1" +# # if empty, get all subtables +# # table with both mapping and subtables +# [[inputs.snmp.table]] +# name = "iftable4" +# oid = ".1.3.6.1.2.1.31.1.1.1" +# # if empty get all instances +# mapping_table = ".1.3.6.1.2.1.31.1.1.1.1" +# # if empty get all subtables +# # sub_tables could be not "real subtables" +# sub_tables=[".1.3.6.1.2.1.2.2.1.13", "bytes_recv", "bytes_send"] + + +# # Gather indicators from established connections, using iproute2's ss command. +# # This plugin ONLY supports non-Windows +# [[inputs.socketstat]] +# ## ss can display information about tcp, udp, raw, unix, packet, dccp and sctp sockets +# ## Specify here the types you want to gather +# protocols = [ "tcp", "udp" ] +# +# ## The default timeout of 1s for ss execution can be overridden here: +# # timeout = "1s" + + +# # Gather timeseries from Google Cloud Platform v3 monitoring API +# [[inputs.stackdriver]] +# ## GCP Project +# project = "erudite-bloom-151019" +# +# ## Include timeseries that start with the given metric type. +# metric_type_prefix_include = [ +# "compute.googleapis.com/", +# ] +# +# ## Exclude timeseries that start with the given metric type. +# # metric_type_prefix_exclude = [] +# +# ## Most metrics are updated no more than once per minute; it is recommended +# ## to override the agent level interval with a value of 1m or greater. +# interval = "1m" +# +# ## Maximum number of API calls to make per second. The quota for accounts +# ## varies, it can be viewed on the API dashboard: +# ## https://cloud.google.com/monitoring/quotas#quotas_and_limits +# # rate_limit = 14 +# +# ## The delay and window options control the number of points selected on +# ## each gather. When set, metrics are gathered between: +# ## start: now() - delay - window +# ## end: now() - delay +# # +# ## Collection delay; if set too low metrics may not yet be available. +# # delay = "5m" +# # +# ## If unset, the window will start at 1m and be updated dynamically to span +# ## the time between calls (approximately the length of the plugin interval). +# # window = "1m" +# +# ## TTL for cached list of metric types. This is the maximum amount of time +# ## it may take to discover new metrics. +# # cache_ttl = "1h" +# +# ## If true, raw bucket counts are collected for distribution value types. +# ## For a more lightweight collection, you may wish to disable and use +# ## distribution_aggregation_aligners instead. +# # gather_raw_distribution_buckets = true +# +# ## Aggregate functions to be used for metrics whose value type is +# ## distribution. These aggregate values are recorded in in addition to raw +# ## bucket counts; if they are enabled. +# ## +# ## For a list of aligner strings see: +# ## https://cloud.google.com/monitoring/api/ref_v3/rpc/google.monitoring.v3#aligner +# # distribution_aggregation_aligners = [ +# # "ALIGN_PERCENTILE_99", +# # "ALIGN_PERCENTILE_95", +# # "ALIGN_PERCENTILE_50", +# # ] +# +# ## Filters can be added to reduce the number of time series matched. All +# ## functions are supported: starts_with, ends_with, has_substring, and +# ## one_of. Only the '=' operator is supported. +# ## +# ## The logical operators when combining filters are defined statically using +# ## the following values: +# ## filter ::= {AND AND AND } +# ## resource_labels ::= {OR } +# ## metric_labels ::= {OR } +# ## user_labels ::= {OR } +# ## system_labels ::= {OR } +# ## +# ## For more details, see https://cloud.google.com/monitoring/api/v3/filters +# # +# ## Resource labels refine the time series selection with the following expression: +# ## resource.labels. = +# # [[inputs.stackdriver.filter.resource_labels]] +# # key = "instance_name" +# # value = 'starts_with("localhost")' +# # +# ## Metric labels refine the time series selection with the following expression: +# ## metric.labels. = +# # [[inputs.stackdriver.filter.metric_labels]] +# # key = "device_name" +# # value = 'one_of("sda", "sdb")' +# # +# ## User labels refine the time series selection with the following expression: +# ## metadata.user_labels."" = +# # [[inputs.stackdriver.filter.user_labels]] +# # key = "environment" +# # value = 'one_of("prod", "staging")' +# # +# ## System labels refine the time series selection with the following expression: +# ## metadata.system_labels."" = +# # [[inputs.stackdriver.filter.system_labels]] +# # key = "machine_type" +# # value = 'starts_with("e2-")' + + +# # Gathers information about processes that running under supervisor using XML-RPC API +# [[inputs.supervisor]] +# ## Url of supervisor's XML-RPC endpoint if basic auth enabled in supervisor http server, +# ## than you have to add credentials to url (ex. http://login:pass@localhost:9001/RPC2) +# # url="http://localhost:9001/RPC2" +# ## With settings below you can manage gathering additional information about processes +# ## If both of them empty, then all additional information will be collected. +# ## Currently supported supported additional metrics are: pid, rc +# # metrics_include = [] + + +# # Get synproxy counter statistics from procfs +# # This plugin ONLY supports Linux +# [[inputs.synproxy]] +# # no configuration + + +# # Sysstat metrics collector +# # This plugin ONLY supports Linux +# [[inputs.sysstat]] +# ## Path to the sadc command. +# # +# ## Common Defaults: +# ## Debian/Ubuntu: /usr/lib/sysstat/sadc +# ## Arch: /usr/lib/sa/sadc +# ## RHEL/CentOS: /usr/lib64/sa/sadc +# sadc_path = "/usr/lib/sa/sadc" # required +# +# ## Path to the sadf command, if it is not in PATH +# # sadf_path = "/usr/bin/sadf" +# +# ## Activities is a list of activities, that are passed as argument to the +# ## sadc collector utility (e.g: DISK, SNMP etc...) +# ## The more activities that are added, the more data is collected. +# # activities = ["DISK"] +# +# ## Group metrics to measurements. +# ## +# ## If group is false each metric will be prefixed with a description +# ## and represents itself a measurement. +# ## +# ## If Group is true, corresponding metrics are grouped to a single measurement. +# # group = true +# +# ## Options for the sadf command. The values on the left represent the sadf options and +# ## the values on the right their description (wich are used for grouping and prefixing metrics). +# ## +# ## Run 'sar -h' or 'man sar' to find out the supported options for your sysstat version. +# [inputs.sysstat.options] +# -C = "cpu" +# -B = "paging" +# -b = "io" +# -d = "disk" # requires DISK activity +# "-n ALL" = "network" +# "-P ALL" = "per_cpu" +# -q = "queue" +# -R = "mem" +# -r = "mem_util" +# -S = "swap_util" +# -u = "cpu_util" +# -v = "inode" +# -W = "swap" +# -w = "task" +# # -H = "hugepages" # only available for newer linux distributions +# # "-I ALL" = "interrupts" # requires INT activity +# +# ## Device tags can be used to add additional tags for devices. For example the configuration below +# ## adds a tag vg with value rootvg for all metrics with sda devices. +# # [[inputs.sysstat.device_tags.sda]] +# # vg = "rootvg" + + +# # Gather systemd units state +# [[inputs.systemd_units]] +# ## Set timeout for systemctl execution +# # timeout = "1s" +# # +# ## Filter for a specific unit type, default is "service", other possible +# ## values are "socket", "target", "device", "mount", "automount", "swap", +# ## "timer", "path", "slice" and "scope ": +# # unittype = "service" +# # +# ## Filter for a specific pattern, default is "" (i.e. all), other possible +# ## values are valid pattern for systemctl, e.g. "a*" for all units with +# ## names starting with "a" +# # pattern = "" +# ## pattern = "telegraf* influxdb*" +# ## pattern = "a*" + + +# # Tacacs plugin collects successful tacacs authentication response times. +# [[inputs.tacacs]] +# ## An array of Server IPs (or hostnames) and ports to gather from. If none specified, defaults to localhost. +# # servers = ["127.0.0.1:49"] +# +# ## Request source server IP, normally the server running telegraf. +# # request_ip = "127.0.0.1" +# +# ## Credentials for tacacs authentication. +# username = "myuser" +# password = "mypassword" +# secret = "mysecret" +# +# ## Maximum time to receive response. +# # response_timeout = "5s" + + +# # Reads metrics from a Teamspeak 3 Server via ServerQuery +# [[inputs.teamspeak]] +# ## Server address for Teamspeak 3 ServerQuery +# # server = "127.0.0.1:10011" +# ## Username for ServerQuery +# username = "serverqueryuser" +# ## Password for ServerQuery +# password = "secret" +# ## Nickname of the ServerQuery client +# nickname = "telegraf" +# ## Array of virtual servers +# # virtual_servers = [1] + + +# # Read metrics about temperature +# [[inputs.temp]] +# # no configuration + + +# # Read Tengine's basic status information (ngx_http_reqstat_module) +# [[inputs.tengine]] +# ## An array of Tengine reqstat module URI to gather stats. +# urls = ["http://127.0.0.1/us"] +# +# ## HTTP response timeout (default: 5s) +# # response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Gather metrics from the Tomcat server status page. +# [[inputs.tomcat]] +# ## URL of the Tomcat server status +# # url = "http://127.0.0.1:8080/manager/status/all?XML=true" +# +# ## HTTP Basic Auth Credentials +# # username = "tomcat" +# # password = "s3cret" +# +# ## Request timeout +# # timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Inserts sine and cosine waves for demonstration purposes +# [[inputs.trig]] +# ## Set the amplitude +# amplitude = 10.0 + + +# # Read Twemproxy stats data +# [[inputs.twemproxy]] +# ## Twemproxy stats address and port (no scheme) +# addr = "localhost:22222" +# ## Monitor pool name +# pools = ["redis_pool", "mc_pool"] + + +# # A plugin to collect stats from the Unbound DNS resolver +# [[inputs.unbound]] +# ## Address of server to connect to, read from unbound conf default, optionally ':port' +# ## Will lookup IP if given a hostname +# server = "127.0.0.1:8953" +# +# ## If running as a restricted user you can prepend sudo for additional access: +# # use_sudo = false +# +# ## The default location of the unbound-control binary can be overridden with: +# # binary = "/usr/sbin/unbound-control" +# +# ## The default location of the unbound config file can be overridden with: +# # config_file = "/etc/unbound/unbound.conf" +# +# ## The default timeout of 1s can be overridden with: +# # timeout = "1s" +# +# ## When set to true, thread metrics are tagged with the thread id. +# ## +# ## The default is false for backwards compatibility, and will be changed to +# ## true in a future version. It is recommended to set to true on new +# ## deployments. +# thread_as_tag = false + + +# # Monitor UPSes connected via Network UPS Tools +# [[inputs.upsd]] +# ## A running NUT server to connect to. +# ## IPv6 addresses must be enclosed in brackets (e.g. "[::1]") +# # server = "127.0.0.1" +# # port = 3493 +# # username = "user" +# # password = "password" +# +# ## Force parsing numbers as floats +# ## It is highly recommended to enable this setting to parse numbers +# ## consistently as floats to avoid database conflicts where some numbers are +# ## parsed as integers and others as floats. +# # force_float = false + + +# # Read uWSGI metrics. +# [[inputs.uwsgi]] +# ## List with urls of uWSGI Stats servers. Url must match pattern: +# ## scheme://address[:port] +# ## +# ## For example: +# ## servers = ["tcp://localhost:5050", "http://localhost:1717", "unix:///tmp/statsock"] +# servers = ["tcp://127.0.0.1:1717"] +# +# ## General connection timeout +# # timeout = "5s" + + +# # A plugin to collect stats from Varnish HTTP Cache +# # This plugin ONLY supports non-Windows +# [[inputs.varnish]] +# ## If running as a restricted user you can prepend sudo for additional access: +# #use_sudo = false +# +# ## The default location of the varnishstat binary can be overridden with: +# binary = "/usr/bin/varnishstat" +# +# ## Additional custom arguments for the varnishstat command +# # binary_args = ["-f", "MAIN.*"] +# +# ## The default location of the varnishadm binary can be overridden with: +# adm_binary = "/usr/bin/varnishadm" +# +# ## Custom arguments for the varnishadm command +# # adm_binary_args = [""] +# +# ## Metric version defaults to metric_version=1, use metric_version=2 for removal of nonactive vcls +# ## Varnish 6.0.2 and newer is required for metric_version=2. +# metric_version = 1 +# +# ## Additional regexps to override builtin conversion of varnish metrics into telegraf metrics. +# ## Regexp group "_vcl" is used for extracting the VCL name. Metrics that contain nonactive VCL's are skipped. +# ## Regexp group "_field" overrides the field name. Other named regexp groups are used as tags. +# # regexps = ['^XCNT\.(?P<_vcl>[\w\-]*)(\.)*(?P[\w\-.+]*)\.(?P<_field>[\w\-.+]*)\.val'] +# +# ## By default, telegraf gather stats for 3 metric points. +# ## Setting stats will override the defaults shown below. +# ## Glob matching can be used, ie, stats = ["MAIN.*"] +# ## stats may also be set to ["*"], which will collect all stats +# stats = ["MAIN.cache_hit", "MAIN.cache_miss", "MAIN.uptime"] +# +# ## Optional name for the varnish instance (or working directory) to query +# ## Usually append after -n in varnish cli +# # instance_name = instanceName +# +# ## Timeout for varnishstat command +# # timeout = "1s" + + +# # Read metrics from the Vault API +# [[inputs.vault]] +# ## URL for the Vault agent +# # url = "http://127.0.0.1:8200" +# +# ## Use Vault token for authorization. +# ## Vault token configuration is mandatory. +# ## If both are empty or both are set, an error is thrown. +# # token_file = "/path/to/auth/token" +# ## OR +# token = "s.CDDrgg5zPv5ssI0Z2P4qxJj2" +# +# ## Set response_timeout (default 5 seconds) +# # response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = /path/to/cafile +# # tls_cert = /path/to/certfile +# # tls_key = /path/to/keyfile + + +# # Input plugin to counterPath Performance Counters on Windows operating systems +# # This plugin ONLY supports Windows +# [[inputs.win_perf_counters]] +# ## By default this plugin returns basic CPU and Disk statistics. See the +# ## README file for more examples. Uncomment examples below or write your own +# ## as you see fit. If the system being polled for data does not have the +# ## Object at startup of the Telegraf agent, it will not be gathered. +# +# ## Print All matching performance counters +# # PrintValid = false +# +# ## Whether request a timestamp along with the PerfCounter data or use current +# ## time +# # UsePerfCounterTime = true +# +# ## If UseWildcardsExpansion params is set to true, wildcards (partial +# ## wildcards in instance names and wildcards in counters names) in configured +# ## counter paths will be expanded and in case of localized Windows, counter +# ## paths will be also localized. It also returns instance indexes in instance +# ## names. If false, wildcards (not partial) in instance names will still be +# ## expanded, but instance indexes will not be returned in instance names. +# # UseWildcardsExpansion = false +# +# ## When running on a localized version of Windows and with +# ## UseWildcardsExpansion = true, Windows will localize object and counter +# ## names. When LocalizeWildcardsExpansion = false, use the names in +# ## object.Counters instead of the localized names. Only Instances can have +# ## wildcards in this case. ObjectName and Counters must not have wildcards +# ## when this setting is false. +# # LocalizeWildcardsExpansion = true +# +# ## Period after which counters will be reread from configuration and +# ## wildcards in counter paths expanded +# # CountersRefreshInterval="1m" +# +# ## Accepts a list of PDH error codes which are defined in pdh.go, if this +# ## error is encountered it will be ignored. For example, you can provide +# ## "PDH_NO_DATA" to ignore performance counters with no instances. By default +# ## no errors are ignored You can find the list here: +# ## https://github.com/influxdata/telegraf/blob/master/plugins/inputs/win_perf_counters/pdh.go +# ## e.g. IgnoredErrors = ["PDH_NO_DATA"] +# # IgnoredErrors = [] +# +# ## NOTE: Due to the way TOML is parsed, tables must be at the END of the +# ## plugin definition, otherwise additional config options are read as part of +# ## the table +# +# # [[inputs.win_perf_counters.object]] +# # Measurement = "" +# # ObjectName = "" +# # Instances = [""] +# # Counters = [] +# ## Additional Object Settings +# ## * IncludeTotal: set to true to include _Total instance when querying +# ## for all metrics via '*' +# ## * WarnOnMissing: print out when the performance counter is missing +# ## from object, counter or instance +# ## * UseRawValues: gather raw values instead of formatted. Raw values are +# ## stored in the field name with the "_Raw" suffix, e.g. +# ## "Disk_Read_Bytes_sec_Raw". +# # IncludeTotal = false +# # WarnOnMissing = false +# # UseRawValues = false +# +# ## Processor usage, alternative to native, reports on a per core. +# # [[inputs.win_perf_counters.object]] +# # Measurement = "win_cpu" +# # ObjectName = "Processor" +# # Instances = ["*"] +# # UseRawValues = true +# # Counters = [ +# # "% Idle Time", +# # "% Interrupt Time", +# # "% Privileged Time", +# # "% User Time", +# # "% Processor Time", +# # "% DPC Time", +# # ] +# +# ## Disk times and queues +# # [[inputs.win_perf_counters.object]] +# # Measurement = "win_disk" +# # ObjectName = "LogicalDisk" +# # Instances = ["*"] +# # Counters = [ +# # "% Idle Time", +# # "% Disk Time", +# # "% Disk Read Time", +# # "% Disk Write Time", +# # "% User Time", +# # "% Free Space", +# # "Current Disk Queue Length", +# # "Free Megabytes", +# # ] +# +# # [[inputs.win_perf_counters.object]] +# # Measurement = "win_diskio" +# # ObjectName = "PhysicalDisk" +# # Instances = ["*"] +# # Counters = [ +# # "Disk Read Bytes/sec", +# # "Disk Write Bytes/sec", +# # "Current Disk Queue Length", +# # "Disk Reads/sec", +# # "Disk Writes/sec", +# # "% Disk Time", +# # "% Disk Read Time", +# # "% Disk Write Time", +# # ] +# +# # [[inputs.win_perf_counters.object]] +# # Measurement = "win_net" +# # ObjectName = "Network Interface" +# # Instances = ["*"] +# # Counters = [ +# # "Bytes Received/sec", +# # "Bytes Sent/sec", +# # "Packets Received/sec", +# # "Packets Sent/sec", +# # "Packets Received Discarded", +# # "Packets Outbound Discarded", +# # "Packets Received Errors", +# # "Packets Outbound Errors", +# # ] +# +# # [[inputs.win_perf_counters.object]] +# # Measurement = "win_system" +# # ObjectName = "System" +# # Instances = ["------"] +# # Counters = [ +# # "Context Switches/sec", +# # "System Calls/sec", +# # "Processor Queue Length", +# # "System Up Time", +# # ] +# +# ## Example counterPath where the Instance portion must be removed to get +# ## data back, such as from the Memory object. +# # [[inputs.win_perf_counters.object]] +# # Measurement = "win_mem" +# # ObjectName = "Memory" +# ## Use 6 x - to remove the Instance bit from the counterPath. +# # Instances = ["------"] +# # Counters = [ +# # "Available Bytes", +# # "Cache Faults/sec", +# # "Demand Zero Faults/sec", +# # "Page Faults/sec", +# # "Pages/sec", +# # "Transition Faults/sec", +# # "Pool Nonpaged Bytes", +# # "Pool Paged Bytes", +# # "Standby Cache Reserve Bytes", +# # "Standby Cache Normal Priority Bytes", +# # "Standby Cache Core Bytes", +# # ] +# +# ## Example query where the Instance portion must be removed to get data back, +# ## such as from the Paging File object. +# # [[inputs.win_perf_counters.object]] +# # Measurement = "win_swap" +# # ObjectName = "Paging File" +# # Instances = ["_Total"] +# # Counters = [ +# # "% Usage", +# # ] + + +# # Input plugin to report Windows services info. +# # This plugin ONLY supports Windows +# [[inputs.win_services]] +# ## Names of the services to monitor. Leave empty to monitor all the available services on the host. Globs accepted. Case sensitive. +# service_names = [ +# "LanmanServer", +# "TermService", +# "Win*", +# ] +# excluded_service_names = ['WinRM'] # optional, list of service names to exclude + + +# # Input plugin to query Windows Management Instrumentation +# # This plugin ONLY supports Windows +# [[inputs.win_wmi]] +# [[inputs.win_wmi.query]] +# # a string representing the WMI namespace to be queried +# namespace = "root\\cimv2" +# # a string representing the WMI class to be queried +# class_name = "Win32_Volume" +# # an array of strings representing the properties of the WMI class to be queried +# properties = ["Name", "Capacity", "FreeSpace"] +# # a string specifying a WHERE clause to use as a filter for the WQL +# filter = 'NOT Name LIKE "\\\\?\\%"' +# # WMI class properties which should be considered tags instead of fields +# tag_properties = ["Name"] + + +# # Collect Wireguard server interface and peer statistics +# [[inputs.wireguard]] +# ## Optional list of Wireguard device/interface names to query. +# ## If omitted, all Wireguard interfaces are queried. +# # devices = ["wg0"] + + +# # Monitor wifi signal strength and quality +# # This plugin ONLY supports Linux +# [[inputs.wireless]] +# ## Sets 'proc' directory path +# ## If not specified, then default is /proc +# # host_proc = "/proc" + + +# # Reads metrics from a SSL certificate +# [[inputs.x509_cert]] +# ## List certificate sources, support wildcard expands for files +# ## Prefix your entry with 'file://' if you intend to use relative paths +# sources = ["tcp://example.org:443", "https://influxdata.com:443", +# "smtp://mail.localhost:25", "udp://127.0.0.1:4433", +# "/etc/ssl/certs/ssl-cert-snakeoil.pem", +# "/etc/mycerts/*.mydomain.org.pem", "file:///path/to/*.pem"] +# +# ## Timeout for SSL connection +# # timeout = "5s" +# +# ## Pass a different name into the TLS request (Server Name Indication). +# ## This is synonymous with tls_server_name, and only one of the two +# ## options may be specified at one time. +# ## example: server_name = "myhost.example.org" +# # server_name = "myhost.example.org" +# +# ## Only output the leaf certificates and omit the root ones. +# # exclude_root_certs = false +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# # tls_server_name = "myhost.example.org" +# +# ## Set the proxy URL +# # use_proxy = true +# # proxy_url = "http://localhost:8888" + + +# # Gathers Metrics From a Dell EMC XtremIO Storage Array's V3 API +# [[inputs.xtremio]] +# ## XtremIO User Interface Endpoint +# url = "https://xtremio.example.com/" # required +# +# ## Credentials +# username = "user1" +# password = "pass123" +# +# ## Metrics to collect from the XtremIO +# # collectors = ["bbus","clusters","ssds","volumes","xms"] +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use SSL but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read metrics of ZFS from arcstats, zfetchstats, vdev_cache_stats, pools and datasets +# # This plugin ONLY supports Linux & FreeBSD +# [[inputs.zfs]] +# ## ZFS kstat path. Ignored on FreeBSD +# ## If not specified, then default is: +# # kstatPath = "/proc/spl/kstat/zfs" +# +# ## By default, telegraf gather all zfs stats +# ## Override the stats list using the kstatMetrics array: +# ## For FreeBSD, the default is: +# # kstatMetrics = ["arcstats", "zfetchstats", "vdev_cache_stats"] +# ## For Linux, the default is: +# # kstatMetrics = ["abdstats", "arcstats", "dnodestats", "dbufcachestats", +# # "dmu_tx", "fm", "vdev_mirror_stats", "zfetchstats", "zil"] +# +# ## By default, don't gather zpool stats +# # poolMetrics = false +# +# ## By default, don't gather dataset stats +# # datasetMetrics = false + + +# # Reads 'mntr' stats from one or many zookeeper servers +# [[inputs.zookeeper]] +# ## An array of address to gather stats about. Specify an ip or hostname +# ## with port. ie localhost:2181, 10.0.0.1:2181, etc. +# +# ## If no servers are specified, then localhost is used as the host. +# ## If no port is specified, 2181 is used +# servers = [":2181"] +# +# ## Timeout for metric collections from all servers. Minimum timeout is "1s". +# # timeout = "5s" +# +# ## Float Parsing - the initial implementation forced any value unable to be +# ## parsed as an int to be a string. Setting this to "float" will attempt to +# ## parse float values as floats and not strings. This would break existing +# ## metrics and may cause issues if a value switches between a float and int. +# # parse_floats = "string" +# +# ## Optional TLS Config +# # enable_tls = false +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## If false, skip chain & host verification +# # insecure_skip_verify = true + + +############################################################################### +# SERVICE INPUT PLUGINS # +############################################################################### + + +# # Pull Metric Statistics from Aliyun CMS +# [[inputs.aliyuncms]] +# ## Aliyun Credentials +# ## Credentials are loaded in the following order +# ## 1) Ram RoleArn credential +# ## 2) AccessKey STS token credential +# ## 3) AccessKey credential +# ## 4) Ecs Ram Role credential +# ## 5) RSA keypair credential +# ## 6) Environment variables credential +# ## 7) Instance metadata credential +# +# # access_key_id = "" +# # access_key_secret = "" +# # access_key_sts_token = "" +# # role_arn = "" +# # role_session_name = "" +# # private_key = "" +# # public_key_id = "" +# # role_name = "" +# +# ## Specify ali cloud regions to be queried for metric and object discovery +# ## If not set, all supported regions (see below) would be covered, it can +# ## provide a significant load on API, so the recommendation here is to +# ## limit the list as much as possible. +# ## Allowed values: https://www.alibabacloud.com/help/zh/doc-detail/40654.htm +# ## Default supported regions are: +# ## cn-qingdao,cn-beijing,cn-zhangjiakou,cn-huhehaote,cn-hangzhou, +# ## cn-shanghai, cn-shenzhen, cn-heyuan,cn-chengdu,cn-hongkong, +# ## ap-southeast-1,ap-southeast-2,ap-southeast-3,ap-southeast-5, +# ## ap-south-1,ap-northeast-1, us-west-1,us-east-1,eu-central-1, +# ## eu-west-1,me-east-1 +# ## +# ## From discovery perspective it set the scope for object discovery, +# ## the discovered info can be used to enrich the metrics with objects +# ## attributes/tags. Discovery is not supported for all projects. +# ## Currently, discovery supported for the following projects: +# ## - acs_ecs_dashboard +# ## - acs_rds_dashboard +# ## - acs_slb_dashboard +# ## - acs_vpc_eip +# regions = ["cn-hongkong"] +# +# ## Requested AliyunCMS aggregation Period (required) +# ## The period must be multiples of 60s and the minimum for AliyunCMS metrics +# ## is 1 minute (60s). However not all metrics are made available to the +# ## one minute period. Some are collected at 3 minute, 5 minute, or larger +# ## intervals. +# ## See: https://help.aliyun.com/document_detail/51936.html?spm=a2c4g.11186623.2.18.2bc1750eeOw1Pv +# ## Note that if a period is configured that is smaller than the minimum for +# ## a particular metric, that metric will not be returned by Aliyun's +# ## OpenAPI and will not be collected by Telegraf. +# period = "5m" +# +# ## Collection Delay (required) +# ## The delay must account for metrics availability via AliyunCMS API. +# delay = "1m" +# +# ## Recommended: use metric 'interval' that is a multiple of 'period' +# ## to avoid gaps or overlap in pulled data +# interval = "5m" +# +# ## Metric Statistic Project (required) +# project = "acs_slb_dashboard" +# +# ## Maximum requests per second, default value is 200 +# ratelimit = 200 +# +# ## How often the discovery API call executed (default 1m) +# #discovery_interval = "1m" +# +# ## NOTE: Due to the way TOML is parsed, tables must be at the END of the +# ## plugin definition, otherwise additional config options are read as part of +# ## the table +# +# ## Metrics to Pull +# ## At least one metrics definition required +# [[inputs.aliyuncms.metrics]] +# ## Metrics names to be requested, +# ## Description can be found here (per project): +# ## https://help.aliyun.com/document_detail/28619.html?spm=a2c4g.11186623.6.690.1938ad41wg8QSq +# names = ["InstanceActiveConnection", "InstanceNewConnection"] +# +# ## Dimension filters for Metric (optional) +# ## This allows to get additional metric dimension. If dimension is not +# ## specified it can be returned or the data can be aggregated - it depends +# ## on particular metric, you can find details here: +# ## https://help.aliyun.com/document_detail/28619.html?spm=a2c4g.11186623.6.690.1938ad41wg8QSq +# ## +# ## Note, that by default dimension filter includes the list of discovered +# ## objects in scope (if discovery is enabled). Values specified here would +# ## be added into the list of discovered objects. You can specify either +# ## single dimension: +# # dimensions = '{"instanceId": "p-example"}' +# +# ## Or you can specify several dimensions at once: +# # dimensions = '[{"instanceId": "p-example"},{"instanceId": "q-example"}]' +# +# ## Tag Query Path +# ## The following tags added by default: +# ## * regionId (if discovery enabled) +# ## * userId +# ## * instanceId +# ## Enrichment tags, can be added from discovery (if supported) +# ## Notation is +# ## : +# ## To figure out which fields are available, consult the +# ## Describe API per project. For example, for SLB see: +# ## https://api.aliyun.com/#/?product=Slb&version=2014-05-15&api=DescribeLoadBalancers¶ms={}&tab=MOCK&lang=GO +# # tag_query_path = [ +# # "address:Address", +# # "name:LoadBalancerName", +# # "cluster_owner:Tags.Tag[?TagKey=='cs.cluster.name'].TagValue | [0]" +# # ] +# +# ## Allow metrics without discovery data, if discovery is enabled. +# ## If set to true, then metric without discovery data would be emitted, otherwise dropped. +# ## This cane be of help, in case debugging dimension filters, or partial coverage of +# ## discovery scope vs monitoring scope +# # allow_dps_without_discovery = false + + +# # AMQP consumer plugin +# [[inputs.amqp_consumer]] +# ## Brokers to consume from. If multiple brokers are specified a random broker +# ## will be selected anytime a connection is established. This can be +# ## helpful for load balancing when not using a dedicated load balancer. +# brokers = ["amqp://localhost:5672/influxdb"] +# +# ## Authentication credentials for the PLAIN auth_method. +# # username = "" +# # password = "" +# +# ## Name of the exchange to declare. If unset, no exchange will be declared. +# exchange = "telegraf" +# +# ## Exchange type; common types are "direct", "fanout", "topic", "header", "x-consistent-hash". +# # exchange_type = "topic" +# +# ## If true, exchange will be passively declared. +# # exchange_passive = false +# +# ## Exchange durability can be either "transient" or "durable". +# # exchange_durability = "durable" +# +# ## Additional exchange arguments. +# # exchange_arguments = { } +# # exchange_arguments = {"hash_property" = "timestamp"} +# +# ## AMQP queue name. +# queue = "telegraf" +# +# ## AMQP queue durability can be "transient" or "durable". +# queue_durability = "durable" +# +# ## If true, queue will be passively declared. +# # queue_passive = false +# +# ## Additional arguments when consuming from Queue +# # queue_consume_arguments = { } +# # queue_consume_arguments = {"x-stream-offset" = "first"} +# +# ## A binding between the exchange and queue using this binding key is +# ## created. If unset, no binding is created. +# binding_key = "#" +# +# ## Maximum number of messages server should give to the worker. +# # prefetch_count = 50 +# +# ## Max undelivered messages +# ## This plugin uses tracking metrics, which ensure messages are read to +# ## outputs before acknowledging them to the original broker to ensure data +# ## is not lost. This option sets the maximum messages to read from the +# ## broker that have not been written by an output. +# ## +# ## This value needs to be picked with awareness of the agent's +# ## metric_batch_size value as well. Setting max undelivered messages too high +# ## can result in a constant stream of data batches to the output. While +# ## setting it too low may never flush the broker's messages. +# # max_undelivered_messages = 1000 +# +# ## Auth method. PLAIN and EXTERNAL are supported +# ## Using EXTERNAL requires enabling the rabbitmq_auth_mechanism_ssl plugin as +# ## described here: https://www.rabbitmq.com/plugins.html +# # auth_method = "PLAIN" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Content encoding for message payloads, can be set to +# ## "gzip", "identity" or "auto" +# ## - Use "gzip" to decode gzip +# ## - Use "identity" to apply no encoding +# ## - Use "auto" determine the encoding using the ContentEncoding header +# # content_encoding = "identity" +# +# ## Maximum size of decoded message. +# ## Acceptable units are B, KiB, KB, MiB, MB... +# ## Without quotes and units, interpreted as size in bytes. +# # max_decompression_size = "500MB" +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# ## DEPRECATED: The "cassandra" plugin is deprecated in version 1.7.0 and will be removed in 1.30.0, use 'inputs.jolokia2' with the 'cassandra.conf' example configuration instead. +# # Read Cassandra metrics through Jolokia +# [[inputs.cassandra]] +# context = "/jolokia/read" +# ## List of cassandra servers exposing jolokia read service +# servers = ["myuser:mypassword@10.10.10.1:8778","10.10.10.2:8778",":8778"] +# ## List of metrics collected on above servers +# ## Each metric consists of a jmx path. +# ## This will collect all heap memory usage metrics from the jvm and +# ## ReadLatency metrics for all keyspaces and tables. +# ## "type=Table" in the query works with Cassandra3.0. Older versions might +# ## need to use "type=ColumnFamily" +# metrics = [ +# "/java.lang:type=Memory/HeapMemoryUsage", +# "/org.apache.cassandra.metrics:type=Table,keyspace=*,scope=*,name=ReadLatency" +# ] + + +# # Cisco model-driven telemetry (MDT) input plugin for IOS XR, IOS XE and NX-OS platforms +# [[inputs.cisco_telemetry_mdt]] +# ## Telemetry transport can be "tcp" or "grpc". TLS is only supported when +# ## using the grpc transport. +# transport = "grpc" +# +# ## Address and port to host telemetry listener +# service_address = ":57000" +# +# ## Grpc Maximum Message Size, default is 4MB, increase the size. This is +# ## stored as a uint32, and limited to 4294967295. +# max_msg_size = 4000000 +# +# ## Enable TLS; grpc transport only. +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## Enable TLS client authentication and define allowed CA certificates; grpc +# ## transport only. +# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# +# ## Define (for certain nested telemetry measurements with embedded tags) which fields are tags +# # embedded_tags = ["Cisco-IOS-XR-qos-ma-oper:qos/interface-table/interface/input/service-policy-names/service-policy-instance/statistics/class-stats/class-name"] +# +# ## Include the delete field in every telemetry message. +# # include_delete_field = false +# +# ## Specify custom name for incoming MDT source field. +# # source_field_name = "mdt_source" +# +# ## Define aliases to map telemetry encoding paths to simple measurement names +# [inputs.cisco_telemetry_mdt.aliases] +# ifstats = "ietf-interfaces:interfaces-state/interface/statistics" +# ## Define Property Xformation, please refer README and https://pubhub.devnetcloud.com/media/dme-docs-9-3-3/docs/appendix/ for Model details. +# [inputs.cisco_telemetry_mdt.dmes] +# # Global Property Xformation. +# # prop1 = "uint64 to int" +# # prop2 = "uint64 to string" +# # prop3 = "string to uint64" +# # prop4 = "string to int64" +# # prop5 = "string to float64" +# # auto-prop-xfrom = "auto-float-xfrom" #Xform any property which is string, and has float number to type float64 +# # Per Path property xformation, Name is telemetry configuration under sensor-group, path configuration "WORD Distinguished Name" +# # Per Path configuration is better as it avoid property collision issue of types. +# # dnpath = '{"Name": "show ip route summary","prop": [{"Key": "routes","Value": "string"}, {"Key": "best-paths","Value": "string"}]}' +# # dnpath2 = '{"Name": "show processes cpu","prop": [{"Key": "kernel_percent","Value": "float"}, {"Key": "idle_percent","Value": "float"}, {"Key": "process","Value": "string"}, {"Key": "user_percent","Value": "float"}, {"Key": "onesec","Value": "float"}]}' +# # dnpath3 = '{"Name": "show processes memory physical","prop": [{"Key": "processname","Value": "string"}]}' +# +# ## Additional GRPC connection settings. +# [inputs.cisco_telemetry_mdt.grpc_enforcement_policy] +# ## GRPC permit keepalives without calls, set to true if your clients are +# ## sending pings without calls in-flight. This can sometimes happen on IOS-XE +# ## devices where the GRPC connection is left open but subscriptions have been +# ## removed, and adding subsequent subscriptions does not keep a stable session. +# # permit_keepalive_without_calls = false +# +# ## GRPC minimum timeout between successive pings, decreasing this value may +# ## help if this plugin is closing connections with ENHANCE_YOUR_CALM (too_many_pings). +# # keepalive_minimum_time = "5m" + + +# # Read metrics from one or many ClickHouse servers +# [[inputs.clickhouse]] +# ## Username for authorization on ClickHouse server +# username = "default" +# +# ## Password for authorization on ClickHouse server +# # password = "" +# +# ## HTTP(s) timeout while getting metrics values +# ## The timeout includes connection time, any redirects, and reading the +# ## response body. +# # timeout = 5s +# +# ## List of servers for metrics scraping +# ## metrics scrape via HTTP(s) clickhouse interface +# ## https://clickhouse.tech/docs/en/interfaces/http/ +# servers = ["http://127.0.0.1:8123"] +# +# ## If "auto_discovery"" is "true" plugin tries to connect to all servers +# ## available in the cluster with using same "user:password" described in +# ## "user" and "password" parameters and get this server hostname list from +# ## "system.clusters" table. See +# ## - https://clickhouse.tech/docs/en/operations/system_tables/#system-clusters +# ## - https://clickhouse.tech/docs/en/operations/server_settings/settings/#server_settings_remote_servers +# ## - https://clickhouse.tech/docs/en/operations/table_engines/distributed/ +# ## - https://clickhouse.tech/docs/en/operations/table_engines/replication/#creating-replicated-tables +# # auto_discovery = true +# +# ## Filter cluster names in "system.clusters" when "auto_discovery" is "true" +# ## when this filter present then "WHERE cluster IN (...)" filter will apply +# ## please use only full cluster names here, regexp and glob filters is not +# ## allowed for "/etc/clickhouse-server/config.d/remote.xml" +# ## +# ## +# ## +# ## +# ## clickhouse-ru-1.local9000 +# ## clickhouse-ru-2.local9000 +# ## +# ## +# ## clickhouse-eu-1.local9000 +# ## clickhouse-eu-2.local9000 +# ## +# ## +# ## +# ## +# ## +# ## +# ## example: cluster_include = ["my-own-cluster"] +# # cluster_include = [] +# +# ## Filter cluster names in "system.clusters" when "auto_discovery" is +# ## "true" when this filter present then "WHERE cluster NOT IN (...)" +# ## filter will apply +# ## example: cluster_exclude = ["my-internal-not-discovered-cluster"] +# # cluster_exclude = [] +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read metrics from Google PubSub +# [[inputs.cloud_pubsub]] +# ## Required. Name of Google Cloud Platform (GCP) Project that owns +# ## the given PubSub subscription. +# project = "my-project" +# +# ## Required. Name of PubSub subscription to ingest metrics from. +# subscription = "my-subscription" +# +# ## Required. Data format to consume. +# ## Each data format has its own unique set of configuration options. +# ## Read more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" +# +# ## Optional. Filepath for GCP credentials JSON file to authorize calls to +# ## PubSub APIs. If not set explicitly, Telegraf will attempt to use +# ## Application Default Credentials, which is preferred. +# # credentials_file = "path/to/my/creds.json" +# +# ## Optional. Number of seconds to wait before attempting to restart the +# ## PubSub subscription receiver after an unexpected error. +# ## If the streaming pull for a PubSub Subscription fails (receiver), +# ## the agent attempts to restart receiving messages after this many seconds. +# # retry_delay_seconds = 5 +# +# ## Optional. Maximum byte length of a message to consume. +# ## Larger messages are dropped with an error. If less than 0 or unspecified, +# ## treated as no limit. +# # max_message_len = 1000000 +# +# ## Max undelivered messages +# ## This plugin uses tracking metrics, which ensure messages are read to +# ## outputs before acknowledging them to the original broker to ensure data +# ## is not lost. This option sets the maximum messages to read from the +# ## broker that have not been written by an output. +# ## +# ## This value needs to be picked with awareness of the agent's +# ## metric_batch_size value as well. Setting max undelivered messages too high +# ## can result in a constant stream of data batches to the output. While +# ## setting it too low may never flush the broker's messages. +# # max_undelivered_messages = 1000 +# +# ## The following are optional Subscription ReceiveSettings in PubSub. +# ## Read more about these values: +# ## https://godoc.org/cloud.google.com/go/pubsub#ReceiveSettings +# +# ## Optional. Maximum number of seconds for which a PubSub subscription +# ## should auto-extend the PubSub ACK deadline for each message. If less than +# ## 0, auto-extension is disabled. +# # max_extension = 0 +# +# ## Optional. Maximum number of unprocessed messages in PubSub +# ## (unacknowledged but not yet expired in PubSub). +# ## A value of 0 is treated as the default PubSub value. +# ## Negative values will be treated as unlimited. +# # max_outstanding_messages = 0 +# +# ## Optional. Maximum size in bytes of unprocessed messages in PubSub +# ## (unacknowledged but not yet expired in PubSub). +# ## A value of 0 is treated as the default PubSub value. +# ## Negative values will be treated as unlimited. +# # max_outstanding_bytes = 0 +# +# ## Optional. Max number of goroutines a PubSub Subscription receiver can spawn +# ## to pull messages from PubSub concurrently. This limit applies to each +# ## subscription separately and is treated as the PubSub default if less than +# ## 1. Note this setting does not limit the number of messages that can be +# ## processed concurrently (use "max_outstanding_messages" instead). +# # max_receiver_go_routines = 0 +# +# ## Optional. If true, Telegraf will attempt to base64 decode the +# ## PubSub message data before parsing. Many GCP services that +# ## output JSON to Google PubSub base64-encode the JSON payload. +# # base64_data = false +# +# ## Content encoding for message payloads, can be set to "gzip" or +# ## "identity" to apply no encoding. +# # content_encoding = "identity" +# +# ## If content encoding is not "identity", sets the maximum allowed size, +# ## in bytes, for a message payload when it's decompressed. Can be increased +# ## for larger payloads or reduced to protect against decompression bombs. +# ## Acceptable units are B, KiB, KB, MiB, MB... +# # max_decompression_size = "500MB" + + +# # Google Cloud Pub/Sub Push HTTP listener +# [[inputs.cloud_pubsub_push]] +# ## Address and port to host HTTP listener on +# service_address = ":8080" +# +# ## Application secret to verify messages originate from Cloud Pub/Sub +# # token = "" +# +# ## Path to listen to. +# # path = "/" +# +# ## Maximum duration before timing out read of the request +# # read_timeout = "10s" +# ## Maximum duration before timing out write of the response. This should be +# ## set to a value large enough that you can send at least 'metric_batch_size' +# ## number of messages within the duration. +# # write_timeout = "10s" +# +# ## Maximum allowed http request body size in bytes. +# ## 0 means to use the default of 524,288,00 bytes (500 mebibytes) +# # max_body_size = "500MB" +# +# ## Whether to add the pubsub metadata, such as message attributes and +# ## subscription as a tag. +# # add_meta = false +# +# ## Max undelivered messages +# ## This plugin uses tracking metrics, which ensure messages are read to +# ## outputs before acknowledging them to the original broker to ensure data +# ## is not lost. This option sets the maximum messages to read from the +# ## broker that have not been written by an output. +# ## +# ## This value needs to be picked with awareness of the agent's +# ## metric_batch_size value as well. Setting max undelivered messages too high +# ## can result in a constant stream of data batches to the output. While +# ## setting it too low may never flush the broker's messages. +# # max_undelivered_messages = 1000 +# +# ## Set one or more allowed client CA certificate file names to +# ## enable mutually authenticated TLS connections +# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# +# ## Add service certificate and key +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # AWS Metric Streams listener +# [[inputs.cloudwatch_metric_streams]] +# ## Address and port to host HTTP listener on +# service_address = ":443" +# +# ## Paths to listen to. +# # paths = ["/telegraf"] +# +# ## maximum duration before timing out read of the request +# # read_timeout = "10s" +# +# ## maximum duration before timing out write of the response +# # write_timeout = "10s" +# +# ## Maximum allowed http request body size in bytes. +# ## 0 means to use the default of 524,288,000 bytes (500 mebibytes) +# # max_body_size = "500MB" +# +# ## Optional access key for Firehose security. +# # access_key = "test-key" +# +# ## An optional flag to keep Metric Streams metrics compatible with +# ## CloudWatch's API naming +# # api_compatability = false +# +# ## Set one or more allowed client CA certificate file names to +# ## enable mutually authenticated TLS connections +# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# +# ## Add service certificate and key +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" + + +# # A ctrlX Data Layer server sent event input plugin +# [[inputs.ctrlx_datalayer]] +# ## Hostname or IP address of the ctrlX CORE Data Layer server +# ## example: server = "localhost" # Telegraf is running directly on the device +# ## server = "192.168.1.1" # Connect to ctrlX CORE remote via IP +# ## server = "host.example.com" # Connect to ctrlX CORE remote via hostname +# ## server = "10.0.2.2:8443" # Connect to ctrlX CORE Virtual from development environment +# server = "localhost" +# +# ## Authentication credentials +# username = "boschrexroth" +# password = "boschrexroth" +# +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Timeout for HTTP requests. (default: "10s") +# # timeout = "10s" +# +# +# ## Create a ctrlX Data Layer subscription. +# ## It is possible to define multiple subscriptions per host. Each subscription can have its own +# ## sampling properties and a list of nodes to subscribe to. +# ## All subscriptions share the same credentials. +# [[inputs.ctrlx_datalayer.subscription]] +# ## The name of the measurement. (default: "ctrlx") +# measurement = "memory" +# +# ## Configure the ctrlX Data Layer nodes which should be subscribed. +# ## address - node address in ctrlX Data Layer (mandatory) +# ## name - field name to use in the output (optional, default: base name of address) +# ## tags - extra node tags to be added to the output metric (optional) +# ## Note: +# ## Use either the inline notation or the bracketed notation, not both. +# ## The tags property is only supported in bracketed notation due to toml parser restrictions +# ## Examples: +# ## Inline notation +# nodes=[ +# {name="available", address="framework/metrics/system/memavailable-mb"}, +# {name="used", address="framework/metrics/system/memused-mb"}, +# ] +# ## Bracketed notation +# # [[inputs.ctrlx_datalayer.subscription.nodes]] +# # name ="available" +# # address="framework/metrics/system/memavailable-mb" +# # ## Define extra tags related to node to be added to the output metric (optional) +# # [inputs.ctrlx_datalayer.subscription.nodes.tags] +# # node_tag1="node_tag1" +# # node_tag2="node_tag2" +# # [[inputs.ctrlx_datalayer.subscription.nodes]] +# # name ="used" +# # address="framework/metrics/system/memused-mb" +# +# ## The switch "output_json_string" enables output of the measurement as json. +# ## That way it can be used in in a subsequent processor plugin, e.g. "Starlark Processor Plugin". +# # output_json_string = false +# +# ## Define extra tags related to subscription to be added to the output metric (optional) +# # [inputs.ctrlx_datalayer.subscription.tags] +# # subscription_tag1 = "subscription_tag1" +# # subscription_tag2 = "subscription_tag2" +# +# ## The interval in which messages shall be sent by the ctrlX Data Layer to this plugin. (default: 1s) +# ## Higher values reduce load on network by queuing samples on server side and sending as a single TCP packet. +# # publish_interval = "1s" +# +# ## The interval a "keepalive" message is sent if no change of data occurs. (default: 60s) +# ## Only used internally to detect broken network connections. +# # keep_alive_interval = "60s" +# +# ## The interval an "error" message is sent if an error was received from a node. (default: 10s) +# ## Higher values reduce load on output target and network in case of errors by limiting frequency of error messages. +# # error_interval = "10s" +# +# ## The interval that defines the fastest rate at which the node values should be sampled and values captured. (default: 1s) +# ## The sampling frequency should be adjusted to the dynamics of the signal to be sampled. +# ## Higher sampling frequence increases load on ctrlX Data Layer. +# ## The sampling frequency can be higher, than the publish interval. Captured samples are put in a queue and sent in publish interval. +# ## Note: The minimum sampling interval can be overruled by a global setting in the ctrlX Data Layer configuration ('datalayer/subscriptions/settings'). +# # sampling_interval = "1s" +# +# ## The requested size of the node value queue. (default: 10) +# ## Relevant if more values are captured than can be sent. +# # queue_size = 10 +# +# ## The behaviour of the queue if it is full. (default: "DiscardOldest") +# ## Possible values: +# ## - "DiscardOldest" +# ## The oldest value gets deleted from the queue when it is full. +# ## - "DiscardNewest" +# ## The newest value gets deleted from the queue when it is full. +# # queue_behaviour = "DiscardOldest" +# +# ## The filter when a new value will be sampled. (default: 0.0) +# ## Calculation rule: If (abs(lastCapturedValue - newValue) > dead_band_value) capture(newValue). +# # dead_band_value = 0.0 +# +# ## The conditions on which a sample should be captured and thus will be sent as a message. (default: "StatusValue") +# ## Possible values: +# ## - "Status" +# ## Capture the value only, when the state of the node changes from or to error state. Value changes are ignored. +# ## - "StatusValue" +# ## Capture when the value changes or the node changes from or to error state. +# ## See also 'dead_band_value' for what is considered as a value change. +# ## - "StatusValueTimestamp": +# ## Capture even if the value is the same, but the timestamp of the value is newer. +# ## Note: This might lead to high load on the network because every sample will be sent as a message +# ## even if the value of the node did not change. +# # value_change = "StatusValue" +# + + +# # Ingests files in a directory and then moves them to a target directory. +# [[inputs.directory_monitor]] +# ## The directory to monitor and read files from (including sub-directories if "recursive" is true). +# directory = "" +# # +# ## The directory to move finished files to (maintaining directory hierachy from source). +# finished_directory = "" +# # +# ## Setting recursive to true will make the plugin recursively walk the directory and process all sub-directories. +# # recursive = false +# # +# ## The directory to move files to upon file error. +# ## If not provided, erroring files will stay in the monitored directory. +# # error_directory = "" +# # +# ## The amount of time a file is allowed to sit in the directory before it is picked up. +# ## This time can generally be low but if you choose to have a very large file written to the directory and it's potentially slow, +# ## set this higher so that the plugin will wait until the file is fully copied to the directory. +# # directory_duration_threshold = "50ms" +# # +# ## A list of the only file names to monitor, if necessary. Supports regex. If left blank, all files are ingested. +# # files_to_monitor = ["^.*\.csv"] +# # +# ## A list of files to ignore, if necessary. Supports regex. +# # files_to_ignore = [".DS_Store"] +# # +# ## Maximum lines of the file to process that have not yet be written by the +# ## output. For best throughput set to the size of the output's metric_buffer_limit. +# ## Warning: setting this number higher than the output's metric_buffer_limit can cause dropped metrics. +# # max_buffered_metrics = 10000 +# # +# ## The maximum amount of file paths to queue up for processing at once, before waiting until files are processed to find more files. +# ## Lowering this value will result in *slightly* less memory use, with a potential sacrifice in speed efficiency, if absolutely necessary. +# # file_queue_size = 100000 +# # +# ## Name a tag containing the name of the file the data was parsed from. Leave empty +# ## to disable. Cautious when file name variation is high, this can increase the cardinality +# ## significantly. Read more about cardinality here: +# ## https://docs.influxdata.com/influxdb/cloud/reference/glossary/#series-cardinality +# # file_tag = "" +# # +# ## Specify if the file can be read completely at once or if it needs to be read line by line (default). +# ## Possible values: "line-by-line", "at-once" +# # parse_method = "line-by-line" +# # +# ## The dataformat to be read from the files. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Read logging output from the Docker engine +# [[inputs.docker_log]] +# ## Docker Endpoint +# ## To use TCP, set endpoint = "tcp://[ip]:[port]" +# ## To use environment variables (ie, docker-machine), set endpoint = "ENV" +# # endpoint = "unix:///var/run/docker.sock" +# +# ## When true, container logs are read from the beginning; otherwise reading +# ## begins at the end of the log. If state-persistence is enabled for Telegraf, +# ## the reading continues at the last previously processed timestamp. +# # from_beginning = false +# +# ## Timeout for Docker API calls. +# # timeout = "5s" +# +# ## Containers to include and exclude. Globs accepted. +# ## Note that an empty array for both will include all containers +# # container_name_include = [] +# # container_name_exclude = [] +# +# ## Container states to include and exclude. Globs accepted. +# ## When empty only containers in the "running" state will be captured. +# # container_state_include = [] +# # container_state_exclude = [] +# +# ## docker labels to include and exclude as tags. Globs accepted. +# ## Note that an empty array for both will include all labels as tags +# # docker_label_include = [] +# # docker_label_exclude = [] +# +# ## Set the source tag for the metrics to the container ID hostname, eg first 12 chars +# source_tag = false +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Azure Event Hubs service input plugin +# [[inputs.eventhub_consumer]] +# ## The default behavior is to create a new Event Hub client from environment variables. +# ## This requires one of the following sets of environment variables to be set: +# ## +# ## 1) Expected Environment Variables: +# ## - "EVENTHUB_CONNECTION_STRING" +# ## +# ## 2) Expected Environment Variables: +# ## - "EVENTHUB_NAMESPACE" +# ## - "EVENTHUB_NAME" +# ## - "EVENTHUB_KEY_NAME" +# ## - "EVENTHUB_KEY_VALUE" +# +# ## 3) Expected Environment Variables: +# ## - "EVENTHUB_NAMESPACE" +# ## - "EVENTHUB_NAME" +# ## - "AZURE_TENANT_ID" +# ## - "AZURE_CLIENT_ID" +# ## - "AZURE_CLIENT_SECRET" +# +# ## Uncommenting the option below will create an Event Hub client based solely on the connection string. +# ## This can either be the associated environment variable or hard coded directly. +# ## If this option is uncommented, environment variables will be ignored. +# ## Connection string should contain EventHubName (EntityPath) +# # connection_string = "" +# +# ## Set persistence directory to a valid folder to use a file persister instead of an in-memory persister +# # persistence_dir = "" +# +# ## Change the default consumer group +# # consumer_group = "" +# +# ## By default the event hub receives all messages present on the broker, alternative modes can be set below. +# ## The timestamp should be in https://github.com/toml-lang/toml#offset-date-time format (RFC 3339). +# ## The 3 options below only apply if no valid persister is read from memory or file (e.g. first run). +# # from_timestamp = +# # latest = true +# +# ## Set a custom prefetch count for the receiver(s) +# # prefetch_count = 1000 +# +# ## Add an epoch to the receiver(s) +# # epoch = 0 +# +# ## Change to set a custom user agent, "telegraf" is used by default +# # user_agent = "telegraf" +# +# ## To consume from a specific partition, set the partition_ids option. +# ## An empty array will result in receiving from all partitions. +# # partition_ids = ["0","1"] +# +# ## Max undelivered messages +# ## This plugin uses tracking metrics, which ensure messages are read to +# ## outputs before acknowledging them to the original broker to ensure data +# ## is not lost. This option sets the maximum messages to read from the +# ## broker that have not been written by an output. +# ## +# ## This value needs to be picked with awareness of the agent's +# ## metric_batch_size value as well. Setting max undelivered messages too high +# ## can result in a constant stream of data batches to the output. While +# ## setting it too low may never flush the broker's messages. +# # max_undelivered_messages = 1000 +# +# ## Set either option below to true to use a system property as timestamp. +# ## You have the choice between EnqueuedTime and IoTHubEnqueuedTime. +# ## It is recommended to use this setting when the data itself has no timestamp. +# # enqueued_time_as_ts = true +# # iot_hub_enqueued_time_as_ts = true +# +# ## Tags or fields to create from keys present in the application property bag. +# ## These could for example be set by message enrichments in Azure IoT Hub. +# # application_property_tags = [] +# # application_property_fields = [] +# +# ## Tag or field name to use for metadata +# ## By default all metadata is disabled +# # sequence_number_field = "SequenceNumber" +# # enqueued_time_field = "EnqueuedTime" +# # offset_field = "Offset" +# # partition_id_tag = "PartitionID" +# # partition_key_tag = "PartitionKey" +# # iot_hub_device_connection_id_tag = "IoTHubDeviceConnectionID" +# # iot_hub_auth_generation_id_tag = "IoTHubAuthGenerationID" +# # iot_hub_connection_auth_method_tag = "IoTHubConnectionAuthMethod" +# # iot_hub_connection_module_id_tag = "IoTHubConnectionModuleID" +# # iot_hub_enqueued_time_field = "IoTHubEnqueuedTime" +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Run executable as long-running input plugin +# [[inputs.execd]] +# ## One program to run as daemon. +# ## NOTE: process and each argument should each be their own string +# command = ["telegraf-smartctl", "-d", "/dev/sda"] +# +# ## Environment variables +# ## Array of "key=value" pairs to pass as environment variables +# ## e.g. "KEY=value", "USERNAME=John Doe", +# ## "LD_LIBRARY_PATH=/opt/custom/lib64:/usr/local/libs" +# # environment = [] +# +# ## Define how the process is signaled on each collection interval. +# ## Valid values are: +# ## "none" : Do not signal anything. (Recommended for service inputs) +# ## The process must output metrics by itself. +# ## "STDIN" : Send a newline on STDIN. (Recommended for gather inputs) +# ## "SIGHUP" : Send a HUP signal. Not available on Windows. (not recommended) +# ## "SIGUSR1" : Send a USR1 signal. Not available on Windows. +# ## "SIGUSR2" : Send a USR2 signal. Not available on Windows. +# signal = "none" +# +# ## Delay before the process is restarted after an unexpected termination +# restart_delay = "10s" +# +# ## Buffer size used to read from the command output stream +# ## Optional parameter. Default is 64 Kib, minimum is 16 bytes +# # buffer_size = "64Kib" +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # gNMI telemetry input plugin +# [[inputs.gnmi]] +# ## Address and port of the gNMI GRPC server +# addresses = ["10.49.234.114:57777"] +# +# ## define credentials +# username = "cisco" +# password = "cisco" +# +# ## gNMI encoding requested (one of: "proto", "json", "json_ietf", "bytes") +# # encoding = "proto" +# +# ## redial in case of failures after +# # redial = "10s" +# +# ## gRPC Maximum Message Size +# # max_msg_size = "4MB" +# +# ## Enable to get the canonical path as field-name +# # canonical_field_names = false +# +# ## Remove leading slashes and dots in field-name +# # trim_field_names = false +# +# ## enable client-side TLS and define CA to authenticate the device +# # enable_tls = false +# # tls_ca = "/etc/telegraf/ca.pem" +# ## Minimal TLS version to accept by the client +# # tls_min_version = "TLS12" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = true +# +# ## define client-side TLS certificate & key to authenticate to the device +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## gNMI subscription prefix (optional, can usually be left empty) +# ## See: https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#222-paths +# # origin = "" +# # prefix = "" +# # target = "" +# +# ## Vendor specific options +# ## This defines what vendor specific options to load. +# ## * Juniper Header Extension (juniper_header): some sensors are directly managed by +# ## Linecard, which adds the Juniper GNMI Header Extension. Enabling this +# ## allows the decoding of the Extension header if present. Currently this knob +# ## adds component, component_id & sub_component_id as additionnal tags +# # vendor_specific = [] +# +# ## Define additional aliases to map encoding paths to measurement names +# # [inputs.gnmi.aliases] +# # ifcounters = "openconfig:/interfaces/interface/state/counters" +# +# [[inputs.gnmi.subscription]] +# ## Name of the measurement that will be emitted +# name = "ifcounters" +# +# ## Origin and path of the subscription +# ## See: https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#222-paths +# ## +# ## origin usually refers to a (YANG) data model implemented by the device +# ## and path to a specific substructure inside it that should be subscribed +# ## to (similar to an XPath). YANG models can be found e.g. here: +# ## https://github.com/YangModels/yang/tree/master/vendor/cisco/xr +# origin = "openconfig-interfaces" +# path = "/interfaces/interface/state/counters" +# +# ## Subscription mode ("target_defined", "sample", "on_change") and interval +# subscription_mode = "sample" +# sample_interval = "10s" +# +# ## Suppress redundant transmissions when measured values are unchanged +# # suppress_redundant = false +# +# ## If suppression is enabled, send updates at least every X seconds anyway +# # heartbeat_interval = "60s" +# +# ## Tag subscriptions are applied as tags to other subscriptions. +# # [[inputs.gnmi.tag_subscription]] +# # ## When applying this value as a tag to other metrics, use this tag name +# # name = "descr" +# # +# # ## All other subscription fields are as normal +# # origin = "openconfig-interfaces" +# # path = "/interfaces/interface/state" +# # subscription_mode = "on_change" +# # +# # ## Match strategy to use for the tag. +# # ## Tags are only applied for metrics of the same address. The following +# # ## settings are valid: +# # ## unconditional -- always match +# # ## name -- match by the "name" key +# # ## This resembles the previsou 'tag-only' behavior. +# # ## elements -- match by the keys in the path filtered by the path +# # ## parts specified `elements` below +# # ## By default, 'elements' is used if the 'elements' option is provided, +# # ## otherwise match by 'name'. +# # # match = "" +# # +# # ## For the 'elements' match strategy, at least one path-element name must +# # ## be supplied containing at least one key to match on. Multiple path +# # ## elements can be specified in any order. All given keys must be equal +# # ## for a match. +# # # elements = ["description", "interface"] + + +# ## DEPRECATED: The "http_listener" plugin is deprecated in version 1.9.0, has been renamed to 'influxdb_listener', use 'inputs.influxdb_listener' or 'inputs.http_listener_v2' instead. +# # Accept metrics over InfluxDB 1.x HTTP API +# [[inputs.influxdb_listener]] +# ## Address and port to host HTTP listener on +# service_address = ":8186" +# +# ## maximum duration before timing out read of the request +# read_timeout = "10s" +# ## maximum duration before timing out write of the response +# write_timeout = "10s" +# +# ## Maximum allowed HTTP request body size in bytes. +# ## 0 means to use the default of 32MiB. +# max_body_size = 0 +# +# ## Maximum line size allowed to be sent in bytes. +# ## deprecated in 1.14; parser now handles lines of unlimited length and option is ignored +# # max_line_size = 0 +# +# ## Set one or more allowed client CA certificate file names to +# ## enable mutually authenticated TLS connections +# tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# +# ## Add service certificate and key +# tls_cert = "/etc/telegraf/cert.pem" +# tls_key = "/etc/telegraf/key.pem" +# +# ## Optional tag name used to store the database name. +# ## If the write has a database in the query string then it will be kept in this tag name. +# ## This tag can be used in downstream outputs. +# ## The default value of nothing means it will be off and the database will not be recorded. +# ## If you have a tag that is the same as the one specified below, and supply a database, +# ## the tag will be overwritten with the database supplied. +# # database_tag = "" +# +# ## If set the retention policy specified in the write query will be added as +# ## the value of this tag name. +# # retention_policy_tag = "" +# +# ## Optional username and password to accept for HTTP basic authentication +# ## or authentication token. +# ## You probably want to make sure you have TLS configured above for this. +# ## Use these options for the authentication token in the form +# ## Authentication: Token : +# # basic_username = "foobar" +# # basic_password = "barfoo" +# +# ## Optional JWT token authentication for HTTP requests +# ## Please see the documentation at +# ## https://docs.influxdata.com/influxdb/v1.8/administration/authentication_and_authorization/#authenticate-using-jwt-tokens +# ## for further details. +# ## Please note: Token authentication and basic authentication cannot be used +# ## at the same time. +# # token_shared_secret = "" +# # token_username = "" +# +# ## Influx line protocol parser +# ## 'internal' is the default. 'upstream' is a newer parser that is faster +# ## and more memory efficient. +# # parser_type = "internal" + + +# # Generic HTTP write listener +# [[inputs.http_listener_v2]] +# ## Address and port to host HTTP listener on +# service_address = ":8080" +# +# ## Paths to listen to. +# # paths = ["/telegraf"] +# +# ## Save path as http_listener_v2_path tag if set to true +# # path_tag = false +# +# ## HTTP methods to accept. +# # methods = ["POST", "PUT"] +# +# ## Optional HTTP headers +# ## These headers are applied to the server that is listening for HTTP +# ## requests and included in responses. +# # http_headers = {"HTTP_HEADER" = "TAG_NAME"} +# +# ## maximum duration before timing out read of the request +# # read_timeout = "10s" +# ## maximum duration before timing out write of the response +# # write_timeout = "10s" +# +# ## Maximum allowed http request body size in bytes. +# ## 0 means to use the default of 524,288,000 bytes (500 mebibytes) +# # max_body_size = "500MB" +# +# ## Part of the request to consume. Available options are "body" and +# ## "query". +# # data_source = "body" +# +# ## Set one or more allowed client CA certificate file names to +# ## enable mutually authenticated TLS connections +# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# +# ## Add service certificate and key +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## Minimal TLS version accepted by the server +# # tls_min_version = "TLS12" +# +# ## Optional username and password to accept for HTTP basic authentication. +# ## You probably want to make sure you have TLS configured above for this. +# # basic_username = "foobar" +# # basic_password = "barfoo" +# +# ## Optional setting to map http headers into tags +# ## If the http header is not present on the request, no corresponding tag will be added +# ## If multiple instances of the http header are present, only the first value will be used +# # http_header_tags = {"HTTP_HEADER" = "TAG_NAME"} +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Accept metrics over InfluxDB 1.x HTTP API +# [[inputs.influxdb_listener]] +# ## Address and port to host HTTP listener on +# service_address = ":8186" +# +# ## maximum duration before timing out read of the request +# read_timeout = "10s" +# ## maximum duration before timing out write of the response +# write_timeout = "10s" +# +# ## Maximum allowed HTTP request body size in bytes. +# ## 0 means to use the default of 32MiB. +# max_body_size = 0 +# +# ## Maximum line size allowed to be sent in bytes. +# ## deprecated in 1.14; parser now handles lines of unlimited length and option is ignored +# # max_line_size = 0 +# +# ## Set one or more allowed client CA certificate file names to +# ## enable mutually authenticated TLS connections +# tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# +# ## Add service certificate and key +# tls_cert = "/etc/telegraf/cert.pem" +# tls_key = "/etc/telegraf/key.pem" +# +# ## Optional tag name used to store the database name. +# ## If the write has a database in the query string then it will be kept in this tag name. +# ## This tag can be used in downstream outputs. +# ## The default value of nothing means it will be off and the database will not be recorded. +# ## If you have a tag that is the same as the one specified below, and supply a database, +# ## the tag will be overwritten with the database supplied. +# # database_tag = "" +# +# ## If set the retention policy specified in the write query will be added as +# ## the value of this tag name. +# # retention_policy_tag = "" +# +# ## Optional username and password to accept for HTTP basic authentication +# ## or authentication token. +# ## You probably want to make sure you have TLS configured above for this. +# ## Use these options for the authentication token in the form +# ## Authentication: Token : +# # basic_username = "foobar" +# # basic_password = "barfoo" +# +# ## Optional JWT token authentication for HTTP requests +# ## Please see the documentation at +# ## https://docs.influxdata.com/influxdb/v1.8/administration/authentication_and_authorization/#authenticate-using-jwt-tokens +# ## for further details. +# ## Please note: Token authentication and basic authentication cannot be used +# ## at the same time. +# # token_shared_secret = "" +# # token_username = "" +# +# ## Influx line protocol parser +# ## 'internal' is the default. 'upstream' is a newer parser that is faster +# ## and more memory efficient. +# # parser_type = "internal" + + +# # Accept metrics over InfluxDB 2.x HTTP API +# [[inputs.influxdb_v2_listener]] +# ## Address and port to host InfluxDB listener on +# ## (Double check the port. Could be 9999 if using OSS Beta) +# service_address = ":8086" +# +# ## Maximum duration before timing out read of the request +# # read_timeout = "10s" +# ## Maximum duration before timing out write of the response +# # write_timeout = "10s" +# +# ## Maximum allowed HTTP request body size in bytes. +# ## 0 means to use the default of 32MiB. +# # max_body_size = "32MiB" +# +# ## Optional tag to determine the bucket. +# ## If the write has a bucket in the query string then it will be kept in this tag name. +# ## This tag can be used in downstream outputs. +# ## The default value of nothing means it will be off and the database will not be recorded. +# # bucket_tag = "" +# +# ## Set one or more allowed client CA certificate file names to +# ## enable mutually authenticated TLS connections +# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# +# ## Add service certificate and key +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## Optional token to accept for HTTP authentication. +# ## You probably want to make sure you have TLS configured above for this. +# # token = "some-long-shared-secret-token" +# +# ## Influx line protocol parser +# ## 'internal' is the default. 'upstream' is a newer parser that is faster +# ## and more memory efficient. +# # parser_type = "internal" + + +# # Intel Performance Monitoring Unit plugin exposes Intel PMU metrics available through Linux Perf subsystem +# # This plugin ONLY supports Linux on amd64 +# [[inputs.intel_pmu]] +# ## List of filesystem locations of JSON files that contain PMU event definitions. +# event_definitions = ["/var/cache/pmu/GenuineIntel-6-55-4-core.json", "/var/cache/pmu/GenuineIntel-6-55-4-uncore.json"] +# +# ## List of core events measurement entities. There can be more than one core_events sections. +# [[inputs.intel_pmu.core_events]] +# ## List of events to be counted. Event names shall match names from event_definitions files. +# ## Single entry can contain name of the event (case insensitive) augmented with config options and perf modifiers. +# ## If absent, all core events from provided event_definitions are counted skipping unresolvable ones. +# events = ["INST_RETIRED.ANY", "CPU_CLK_UNHALTED.THREAD_ANY:config1=0x4043200000000k"] +# +# ## Limits the counting of events to core numbers specified. +# ## If absent, events are counted on all cores. +# ## Single "0", multiple "0,1,2" and range "0-2" notation is supported for each array element. +# ## example: cores = ["0,2", "4", "12-16"] +# cores = ["0"] +# +# ## Indicator that plugin shall attempt to run core_events.events as a single perf group. +# ## If absent or set to false, each event is counted individually. Defaults to false. +# ## This limits the number of events that can be measured to a maximum of available hardware counters per core. +# ## Could vary depending on type of event, use of fixed counters. +# # perf_group = false +# +# ## Optionally set a custom tag value that will be added to every measurement within this events group. +# ## Can be applied to any group of events, unrelated to perf_group setting. +# # events_tag = "" +# +# ## List of uncore event measurement entities. There can be more than one uncore_events sections. +# [[inputs.intel_pmu.uncore_events]] +# ## List of events to be counted. Event names shall match names from event_definitions files. +# ## Single entry can contain name of the event (case insensitive) augmented with config options and perf modifiers. +# ## If absent, all uncore events from provided event_definitions are counted skipping unresolvable ones. +# events = ["UNC_CHA_CLOCKTICKS", "UNC_CHA_TOR_OCCUPANCY.IA_MISS"] +# +# ## Limits the counting of events to specified sockets. +# ## If absent, events are counted on all sockets. +# ## Single "0", multiple "0,1" and range "0-1" notation is supported for each array element. +# ## example: sockets = ["0-2"] +# sockets = ["0"] +# +# ## Indicator that plugin shall provide an aggregated value for multiple units of same type distributed in an uncore. +# ## If absent or set to false, events for each unit are exposed as separate metric. Defaults to false. +# # aggregate_uncore_units = false +# +# ## Optionally set a custom tag value that will be added to every measurement within this events group. +# # events_tag = "" + + +# # Read Intel RDT metrics +# # This plugin ONLY supports non-Windows +# [[inputs.intel_rdt]] +# ## Optionally set sampling interval to Nx100ms. +# ## This value is propagated to pqos tool. Interval format is defined by pqos itself. +# ## If not provided or provided 0, will be set to 10 = 10x100ms = 1s. +# # sampling_interval = "10" +# +# ## Optionally specify the path to pqos executable. +# ## If not provided, auto discovery will be performed. +# # pqos_path = "/usr/local/bin/pqos" +# +# ## Optionally specify if IPC and LLC_Misses metrics shouldn't be propagated. +# ## If not provided, default value is false. +# # shortened_metrics = false +# +# ## Specify the list of groups of CPU core(s) to be provided as pqos input. +# ## Mandatory if processes aren't set and forbidden if processes are specified. +# ## e.g. ["0-3", "4,5,6"] or ["1-3,4"] +# # cores = ["0-3"] +# +# ## Specify the list of processes for which Metrics will be collected. +# ## Mandatory if cores aren't set and forbidden if cores are specified. +# ## e.g. ["qemu", "pmd"] +# # processes = ["process"] +# +# ## Specify if the pqos process should be called with sudo. +# ## Mandatory if the telegraf process does not run as root. +# # use_sudo = false + + +# # Subscribe and receive OpenConfig Telemetry data using JTI +# [[inputs.jti_openconfig_telemetry]] +# ## List of device addresses to collect telemetry from +# servers = ["localhost:1883"] +# +# ## Authentication details. Username and password are must if device expects +# ## authentication. Client ID must be unique when connecting from multiple instances +# ## of telegraf to the same device +# username = "user" +# password = "pass" +# client_id = "telegraf" +# +# ## Frequency to get data +# sample_frequency = "1000ms" +# +# ## Sensors to subscribe for +# ## A identifier for each sensor can be provided in path by separating with space +# ## Else sensor path will be used as identifier +# ## When identifier is used, we can provide a list of space separated sensors. +# ## A single subscription will be created with all these sensors and data will +# ## be saved to measurement with this identifier name +# sensors = [ +# "/interfaces/", +# "collection /components/ /lldp", +# ] +# +# ## We allow specifying sensor group level reporting rate. To do this, specify the +# ## reporting rate in Duration at the beginning of sensor paths / collection +# ## name. For entries without reporting rate, we use configured sample frequency +# sensors = [ +# "1000ms customReporting /interfaces /lldp", +# "2000ms collection /components", +# "/interfaces", +# ] +# +# ## Timestamp Source +# ## Set to 'collection' for time of collection, and 'data' for using the time +# ## provided by the _timestamp field. +# # timestamp_source = "collection" +# +# ## Optional TLS Config +# # enable_tls = false +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Minimal TLS version to accept by the client +# # tls_min_version = "TLS12" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Delay between retry attempts of failed RPC calls or streams. Defaults to 1000ms. +# ## Failed streams/calls will not be retried if 0 is provided +# retry_delay = "1000ms" +# +# ## Period for sending keep-alive packets on idle connections +# ## This is helpful to identify broken connections to the server +# # keep_alive_period = "10s" +# +# ## To treat all string values as tags, set this to true +# str_as_tags = false + + +# # Read metrics from Kafka topics +# [[inputs.kafka_consumer]] +# ## Kafka brokers. +# brokers = ["localhost:9092"] +# +# ## Topics to consume. +# topics = ["telegraf"] +# +# ## Topic regular expressions to consume. Matches will be added to topics. +# ## Example: topic_regexps = [ "*test", "metric[0-9A-z]*" ] +# # topic_regexps = [ ] +# +# ## When set this tag will be added to all metrics with the topic as the value. +# # topic_tag = "" +# +# ## Optional Client id +# # client_id = "Telegraf" +# +# ## Set the minimal supported Kafka version. Setting this enables the use of new +# ## Kafka features and APIs. Must be 0.10.2.0 or greater. +# ## ex: version = "1.1.0" +# # version = "" +# +# ## Optional TLS Config +# # enable_tls = false +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Period between keep alive probes. +# ## Defaults to the OS configuration if not specified or zero. +# # keep_alive_period = "15s" +# +# ## SASL authentication credentials. These settings should typically be used +# ## with TLS encryption enabled +# # sasl_username = "kafka" +# # sasl_password = "secret" +# +# ## Optional SASL: +# ## one of: OAUTHBEARER, PLAIN, SCRAM-SHA-256, SCRAM-SHA-512, GSSAPI +# ## (defaults to PLAIN) +# # sasl_mechanism = "" +# +# ## used if sasl_mechanism is GSSAPI +# # sasl_gssapi_service_name = "" +# # ## One of: KRB5_USER_AUTH and KRB5_KEYTAB_AUTH +# # sasl_gssapi_auth_type = "KRB5_USER_AUTH" +# # sasl_gssapi_kerberos_config_path = "/" +# # sasl_gssapi_realm = "realm" +# # sasl_gssapi_key_tab_path = "" +# # sasl_gssapi_disable_pafxfast = false +# +# ## used if sasl_mechanism is OAUTHBEARER +# # sasl_access_token = "" +# +# ## SASL protocol version. When connecting to Azure EventHub set to 0. +# # sasl_version = 1 +# +# # Disable Kafka metadata full fetch +# # metadata_full = false +# +# ## Name of the consumer group. +# # consumer_group = "telegraf_metrics_consumers" +# +# ## Compression codec represents the various compression codecs recognized by +# ## Kafka in messages. +# ## 0 : None +# ## 1 : Gzip +# ## 2 : Snappy +# ## 3 : LZ4 +# ## 4 : ZSTD +# # compression_codec = 0 +# ## Initial offset position; one of "oldest" or "newest". +# # offset = "oldest" +# +# ## Consumer group partition assignment strategy; one of "range", "roundrobin" or "sticky". +# # balance_strategy = "range" +# +# ## Maximum number of retries for metadata operations including +# ## connecting. Sets Sarama library's Metadata.Retry.Max config value. If 0 or +# ## unset, use the Sarama default of 3, +# # metadata_retry_max = 0 +# +# ## Type of retry backoff. Valid options: "constant", "exponential" +# # metadata_retry_type = "constant" +# +# ## Amount of time to wait before retrying. When metadata_retry_type is +# ## "constant", each retry is delayed this amount. When "exponential", the +# ## first retry is delayed this amount, and subsequent delays are doubled. If 0 +# ## or unset, use the Sarama default of 250 ms +# # metadata_retry_backoff = 0 +# +# ## Maximum amount of time to wait before retrying when metadata_retry_type is +# ## "exponential". Ignored for other retry types. If 0, there is no backoff +# ## limit. +# # metadata_retry_max_duration = 0 +# +# ## Strategy for making connection to kafka brokers. Valid options: "startup", +# ## "defer". If set to "defer" the plugin is allowed to start before making a +# ## connection. This is useful if the broker may be down when telegraf is +# ## started, but if there are any typos in the broker setting, they will cause +# ## connection failures without warning at startup +# # connection_strategy = "startup" +# +# ## Maximum length of a message to consume, in bytes (default 0/unlimited); +# ## larger messages are dropped +# max_message_len = 1000000 +# +# ## Max undelivered messages +# ## This plugin uses tracking metrics, which ensure messages are read to +# ## outputs before acknowledging them to the original broker to ensure data +# ## is not lost. This option sets the maximum messages to read from the +# ## broker that have not been written by an output. +# ## +# ## This value needs to be picked with awareness of the agent's +# ## metric_batch_size value as well. Setting max undelivered messages too high +# ## can result in a constant stream of data batches to the output. While +# ## setting it too low may never flush the broker's messages. +# # max_undelivered_messages = 1000 +# +# ## Maximum amount of time the consumer should take to process messages. If +# ## the debug log prints messages from sarama about 'abandoning subscription +# ## to [topic] because consuming was taking too long', increase this value to +# ## longer than the time taken by the output plugin(s). +# ## +# ## Note that the effective timeout could be between 'max_processing_time' and +# ## '2 * max_processing_time'. +# # max_processing_time = "100ms" +# +# ## The default number of message bytes to fetch from the broker in each +# ## request (default 1MB). This should be larger than the majority of +# ## your messages, or else the consumer will spend a lot of time +# ## negotiating sizes and not actually consuming. Similar to the JVM's +# ## `fetch.message.max.bytes`. +# # consumer_fetch_default = "1MB" +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# ## DEPRECATED: The "kafka_consumer_legacy" plugin is deprecated in version 1.4.0 and will be removed in 1.30.0, use 'inputs.kafka_consumer' instead, NOTE: 'kafka_consumer' only supports Kafka v0.8+. +# # Read metrics from Kafka topic(s) +# [[inputs.kafka_consumer_legacy]] +# ## topic(s) to consume +# topics = ["telegraf"] +# +# ## an array of Zookeeper connection strings +# zookeeper_peers = ["localhost:2181"] +# +# ## Zookeeper Chroot +# zookeeper_chroot = "" +# +# ## the name of the consumer group +# consumer_group = "telegraf_metrics_consumers" +# +# ## Offset (must be either "oldest" or "newest") +# offset = "oldest" +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" +# +# ## Maximum length of a message to consume, in bytes (default 0/unlimited); +# ## larger messages are dropped +# max_message_len = 65536 + + +# # Configuration for the AWS Kinesis input. +# [[inputs.kinesis_consumer]] +# ## Amazon REGION of kinesis endpoint. +# region = "ap-southeast-2" +# +# ## Amazon Credentials +# ## Credentials are loaded in the following order +# ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified +# ## 2) Assumed credentials via STS if role_arn is specified +# ## 3) explicit credentials from 'access_key' and 'secret_key' +# ## 4) shared profile from 'profile' +# ## 5) environment variables +# ## 6) shared credentials file +# ## 7) EC2 Instance Profile +# # access_key = "" +# # secret_key = "" +# # token = "" +# # role_arn = "" +# # web_identity_token_file = "" +# # role_session_name = "" +# # profile = "" +# # shared_credential_file = "" +# +# ## Endpoint to make request against, the correct endpoint is automatically +# ## determined and this option should only be set if you wish to override the +# ## default. +# ## ex: endpoint_url = "http://localhost:8000" +# # endpoint_url = "" +# +# ## Kinesis StreamName must exist prior to starting telegraf. +# streamname = "StreamName" +# +# ## Shard iterator type (only 'TRIM_HORIZON' and 'LATEST' currently supported) +# # shard_iterator_type = "TRIM_HORIZON" +# +# ## Max undelivered messages +# ## This plugin uses tracking metrics, which ensure messages are read to +# ## outputs before acknowledging them to the original broker to ensure data +# ## is not lost. This option sets the maximum messages to read from the +# ## broker that have not been written by an output. +# ## +# ## This value needs to be picked with awareness of the agent's +# ## metric_batch_size value as well. Setting max undelivered messages too high +# ## can result in a constant stream of data batches to the output. While +# ## setting it too low may never flush the broker's messages. +# # max_undelivered_messages = 1000 +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" +# +# ## +# ## The content encoding of the data from kinesis +# ## If you are processing a cloudwatch logs kinesis stream then set this to "gzip" +# ## as AWS compresses cloudwatch log data before it is sent to kinesis (aws +# ## also base64 encodes the zip byte data before pushing to the stream. The base64 decoding +# ## is done automatically by the golang sdk, as data is read from kinesis) +# ## +# # content_encoding = "identity" +# +# ## Optional +# ## Configuration for a dynamodb checkpoint +# [inputs.kinesis_consumer.checkpoint_dynamodb] +# ## unique name for this consumer +# app_name = "default" +# table_name = "default" + + +# # Listener capable of handling KNX bus messages provided through a KNX-IP Interface. +# [[inputs.knx_listener]] +# ## Type of KNX-IP interface. +# ## Can be either "tunnel_udp", "tunnel_tcp", "tunnel" (alias for tunnel_udp) or "router". +# # service_type = "tunnel" +# +# ## Address of the KNX-IP interface. +# service_address = "localhost:3671" +# +# ## Measurement definition(s) +# # [[inputs.knx_listener.measurement]] +# # ## Name of the measurement +# # name = "temperature" +# # ## Datapoint-Type (DPT) of the KNX messages +# # dpt = "9.001" +# # ## List of Group-Addresses (GAs) assigned to the measurement +# # addresses = ["5/5/1"] +# +# # [[inputs.knx_listener.measurement]] +# # name = "illumination" +# # dpt = "9.004" +# # addresses = ["5/5/3"] + + +# # Read metrics off Arista LANZ, via socket +# [[inputs.lanz]] +# ## URL to Arista LANZ endpoint +# servers = [ +# "tcp://switch1.int.example.com:50001", +# "tcp://switch2.int.example.com:50001", +# ] + + +# ## DEPRECATED: The "logparser" plugin is deprecated in version 1.15.0, use 'inputs.tail' with 'grok' data format instead. +# # Read metrics off Arista LANZ, via socket +# [[inputs.logparser]] +# ## Log files to parse. +# ## These accept standard unix glob matching rules, but with the addition of +# ## ** as a "super asterisk". ie: +# ## /var/log/**.log -> recursively find all .log files in /var/log +# ## /var/log/*/*.log -> find all .log files with a parent dir in /var/log +# ## /var/log/apache.log -> only tail the apache log file +# files = ["/var/log/apache/access.log"] +# +# ## Read files that currently exist from the beginning. Files that are created +# ## while telegraf is running (and that match the "files" globs) will always +# ## be read from the beginning. +# from_beginning = false +# +# ## Method used to watch for file updates. Can be either "inotify" or "poll". +# # watch_method = "inotify" +# +# ## Parse logstash-style "grok" patterns: +# [inputs.logparser.grok] +# ## This is a list of patterns to check the given log file(s) for. +# ## Note that adding patterns here increases processing time. The most +# ## efficient configuration is to have one pattern per logparser. +# ## Other common built-in patterns are: +# ## %{COMMON_LOG_FORMAT} (plain apache & nginx access logs) +# ## %{COMBINED_LOG_FORMAT} (access logs + referrer & agent) +# patterns = ["%{COMBINED_LOG_FORMAT}"] +# +# ## Name of the outputted measurement name. +# measurement = "apache_access_log" +# +# ## Full path(s) to custom pattern files. +# custom_pattern_files = [] +# +# ## Custom patterns can also be defined here. Put one pattern per line. +# custom_patterns = ''' +# ''' +# +# ## Timezone allows you to provide an override for timestamps that +# ## don't already include an offset +# ## e.g. 04/06/2016 12:41:45 data one two 5.43µs +# ## +# ## Default: "" which renders UTC +# ## Options are as follows: +# ## 1. Local -- interpret based on machine localtime +# ## 2. "Canada/Eastern" -- Unix TZ values like those found in https://en.wikipedia.org/wiki/List_of_tz_database_time_zones +# ## 3. UTC -- or blank/unspecified, will return timestamp in UTC +# # timezone = "Canada/Eastern" +# +# ## When set to "disable", timestamp will not incremented if there is a +# ## duplicate. +# # unique_timestamp = "auto" + + +# # Read metrics from one or many MongoDB servers +# [[inputs.mongodb]] +# ## An array of URLs of the form: +# ## "mongodb://" [user ":" pass "@"] host [ ":" port] +# ## For example: +# ## mongodb://user:auth_key@10.10.3.30:27017, +# ## mongodb://10.10.3.33:18832, +# ## +# ## If connecting to a cluster, users must include the "?connect=direct" in +# ## the URL to ensure that the connection goes directly to the specified node +# ## and not have all connections passed to the master node. +# servers = ["mongodb://127.0.0.1:27017/?connect=direct"] +# +# ## When true, collect cluster status. +# ## Note that the query that counts jumbo chunks triggers a COLLSCAN, which +# ## may have an impact on performance. +# # gather_cluster_status = true +# +# ## When true, collect per database stats +# # gather_perdb_stats = false +# +# ## When true, collect per collection stats +# # gather_col_stats = false +# +# ## When true, collect usage statistics for each collection +# ## (insert, update, queries, remove, getmore, commands etc...). +# # gather_top_stat = false +# +# ## List of db where collections stats are collected +# ## If empty, all db are concerned +# # col_stats_dbs = ["local"] +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Specifies plugin behavior regarding disconnected servers +# ## Available choices : +# ## - error: telegraf will return an error on startup if one the servers is unreachable +# ## - skip: telegraf will skip unreachable servers on both startup and gather +# # disconnected_servers_behavior = "error" + + +# # Read metrics from MQTT topic(s) +# [[inputs.mqtt_consumer]] +# ## Broker URLs for the MQTT server or cluster. To connect to multiple +# ## clusters or standalone servers, use a separate plugin instance. +# ## example: servers = ["tcp://localhost:1883"] +# ## servers = ["ssl://localhost:1883"] +# ## servers = ["ws://localhost:1883"] +# servers = ["tcp://127.0.0.1:1883"] +# +# ## Topics that will be subscribed to. +# topics = [ +# "telegraf/host01/cpu", +# "telegraf/+/mem", +# "sensors/#", +# ] +# +# ## The message topic will be stored in a tag specified by this value. If set +# ## to the empty string no topic tag will be created. +# # topic_tag = "topic" +# +# ## QoS policy for messages +# ## 0 = at most once +# ## 1 = at least once +# ## 2 = exactly once +# ## +# ## When using a QoS of 1 or 2, you should enable persistent_session to allow +# ## resuming unacknowledged messages. +# # qos = 0 +# +# ## Connection timeout for initial connection in seconds +# # connection_timeout = "30s" +# +# ## Max undelivered messages +# ## This plugin uses tracking metrics, which ensure messages are read to +# ## outputs before acknowledging them to the original broker to ensure data +# ## is not lost. This option sets the maximum messages to read from the +# ## broker that have not been written by an output. +# ## +# ## This value needs to be picked with awareness of the agent's +# ## metric_batch_size value as well. Setting max undelivered messages too high +# ## can result in a constant stream of data batches to the output. While +# ## setting it too low may never flush the broker's messages. +# # max_undelivered_messages = 1000 +# +# ## Persistent session disables clearing of the client session on connection. +# ## In order for this option to work you must also set client_id to identify +# ## the client. To receive messages that arrived while the client is offline, +# ## also set the qos option to 1 or 2 and don't forget to also set the QoS when +# ## publishing. +# # persistent_session = false +# +# ## If unset, a random client ID will be generated. +# # client_id = "" +# +# ## Username and password to connect MQTT server. +# # username = "telegraf" +# # password = "metricsmetricsmetricsmetrics" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Client trace messages +# ## When set to true, and debug mode enabled in the agent settings, the MQTT +# ## client's messages are included in telegraf logs. These messages are very +# ## noisey, but essential for debugging issues. +# # client_trace = false +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" +# +# ## Enable extracting tag values from MQTT topics +# ## _ denotes an ignored entry in the topic path +# # [[inputs.mqtt_consumer.topic_parsing]] +# # topic = "" +# # measurement = "" +# # tags = "" +# # fields = "" +# ## Value supported is int, float, unit +# # [[inputs.mqtt_consumer.topic.types]] +# # key = type + + +# # Read metrics from NATS subject(s) +# [[inputs.nats_consumer]] +# ## urls of NATS servers +# servers = ["nats://localhost:4222"] +# +# ## subject(s) to consume +# ## If you use jetstream you need to set the subjects +# ## in jetstream_subjects +# subjects = ["telegraf"] +# +# ## jetstream subjects +# ## jetstream is a streaming technology inside of nats. +# ## With jetstream the nats-server persists messages and +# ## a consumer can consume historical messages. This is +# ## useful when telegraf needs to restart it don't miss a +# ## message. You need to configure the nats-server. +# ## https://docs.nats.io/nats-concepts/jetstream. +# jetstream_subjects = ["js_telegraf"] +# +# ## name a queue group +# queue_group = "telegraf_consumers" +# +# ## Optional credentials +# # username = "" +# # password = "" +# +# ## Optional NATS 2.0 and NATS NGS compatible user credentials +# # credentials = "/etc/telegraf/nats.creds" +# +# ## Use Transport Layer Security +# # secure = false +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Sets the limits for pending msgs and bytes for each subscription +# ## These shouldn't need to be adjusted except in very high throughput scenarios +# # pending_message_limit = 65536 +# # pending_bytes_limit = 67108864 +# +# ## Max undelivered messages +# ## This plugin uses tracking metrics, which ensure messages are read to +# ## outputs before acknowledging them to the original broker to ensure data +# ## is not lost. This option sets the maximum messages to read from the +# ## broker that have not been written by an output. +# ## +# ## This value needs to be picked with awareness of the agent's +# ## metric_batch_size value as well. Setting max undelivered messages too high +# ## can result in a constant stream of data batches to the output. While +# ## setting it too low may never flush the broker's messages. +# # max_undelivered_messages = 1000 +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Netflow v5, Netflow v9 and IPFIX collector +# [[inputs.netflow]] +# ## Address to listen for netflow,ipfix or sflow packets. +# ## example: service_address = "udp://:2055" +# ## service_address = "udp4://:2055" +# ## service_address = "udp6://:2055" +# service_address = "udp://:2055" +# +# ## Set the size of the operating system's receive buffer. +# ## example: read_buffer_size = "64KiB" +# ## Uses the system's default if not set. +# # read_buffer_size = "" +# +# ## Protocol version to use for decoding. +# ## Available options are +# ## "ipfix" -- IPFIX / Netflow v10 protocol (also works for Netflow v9) +# ## "netflow v5" -- Netflow v5 protocol +# ## "netflow v9" -- Netflow v9 protocol (also works for IPFIX) +# ## "sflow v5" -- sFlow v5 protocol +# # protocol = "ipfix" +# +# ## Private Enterprise Numbers (PEN) mappings for decoding +# ## This option allows to specify vendor-specific mapping files to use during +# ## decoding. +# # private_enterprise_number_files = [] +# +# ## Dump incoming packets to the log +# ## This can be helpful to debug parsing issues. Only active if +# ## Telegraf is in debug mode. +# # dump_packets = false + + +# # Read metrics from NSQD topic(s) +# [[inputs.nsq_consumer]] +# ## Server option still works but is deprecated, we just prepend it to the nsqd array. +# # server = "localhost:4150" +# +# ## An array representing the NSQD TCP HTTP Endpoints +# nsqd = ["localhost:4150"] +# +# ## An array representing the NSQLookupd HTTP Endpoints +# nsqlookupd = ["localhost:4161"] +# topic = "telegraf" +# channel = "consumer" +# max_in_flight = 100 +# +# ## Max undelivered messages +# ## This plugin uses tracking metrics, which ensure messages are read to +# ## outputs before acknowledging them to the original broker to ensure data +# ## is not lost. This option sets the maximum messages to read from the +# ## broker that have not been written by an output. +# ## +# ## This value needs to be picked with awareness of the agent's +# ## metric_batch_size value as well. Setting max undelivered messages too high +# ## can result in a constant stream of data batches to the output. While +# ## setting it too low may never flush the broker's messages. +# # max_undelivered_messages = 1000 +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Retrieve data from OPCUA devices +# [[inputs.opcua_listener]] +# ## Metric name +# # name = "opcua_listener" +# # +# ## OPC UA Endpoint URL +# # endpoint = "opc.tcp://localhost:4840" +# # +# ## Maximum time allowed to establish a connect to the endpoint. +# # connect_timeout = "10s" +# # +# ## Maximum time allowed for a request over the established connection. +# # request_timeout = "5s" +# # +# ## The interval at which the server should at least update its monitored items +# # subscription_interval = "100ms" +# # +# ## Security policy, one of "None", "Basic128Rsa15", "Basic256", +# ## "Basic256Sha256", or "auto" +# # security_policy = "auto" +# # +# ## Security mode, one of "None", "Sign", "SignAndEncrypt", or "auto" +# # security_mode = "auto" +# # +# ## Path to cert.pem. Required when security mode or policy isn't "None". +# ## If cert path is not supplied, self-signed cert and key will be generated. +# # certificate = "/etc/telegraf/cert.pem" +# # +# ## Path to private key.pem. Required when security mode or policy isn't "None". +# ## If key path is not supplied, self-signed cert and key will be generated. +# # private_key = "/etc/telegraf/key.pem" +# # +# ## Authentication Method, one of "Certificate", "UserName", or "Anonymous". To +# ## authenticate using a specific ID, select 'Certificate' or 'UserName' +# # auth_method = "Anonymous" +# # +# ## Username. Required for auth_method = "UserName" +# # username = "" +# # +# ## Password. Required for auth_method = "UserName" +# # password = "" +# # +# ## Option to select the metric timestamp to use. Valid options are: +# ## "gather" -- uses the time of receiving the data in telegraf +# ## "server" -- uses the timestamp provided by the server +# ## "source" -- uses the timestamp provided by the source +# # timestamp = "gather" +# # +# ## The default timetsamp format is RFC3339Nano +# # Other timestamp layouts can be configured using the Go language time +# # layout specification from https://golang.org/pkg/time/#Time.Format +# # e.g.: json_timestamp_format = "2006-01-02T15:04:05Z07:00" +# #timestamp_format = "" +# # +# ## Node ID configuration +# ## name - field name to use in the output +# ## namespace - OPC UA namespace of the node (integer value 0 thru 3) +# ## identifier_type - OPC UA ID type (s=string, i=numeric, g=guid, b=opaque) +# ## identifier - OPC UA ID (tag as shown in opcua browser) +# ## default_tags - extra tags to be added to the output metric (optional) +# ## +# ## Use either the inline notation or the bracketed notation, not both. +# # +# ## Inline notation (default_tags not supported yet) +# # nodes = [ +# # {name="", namespace="", identifier_type="", identifier=""}, +# # {name="", namespace="", identifier_type="", identifier=""}, +# # ] +# # +# ## Bracketed notation +# # [[inputs.opcua_listener.nodes]] +# # name = "node1" +# # namespace = "" +# # identifier_type = "" +# # identifier = "" +# # default_tags = { tag1 = "value1", tag2 = "value2" } +# # +# # [[inputs.opcua_listener.nodes]] +# # name = "node2" +# # namespace = "" +# # identifier_type = "" +# # identifier = "" +# # +# ## Node Group +# ## Sets defaults so they aren't required in every node. +# ## Default values can be set for: +# ## * Metric name +# ## * OPC UA namespace +# ## * Identifier +# ## * Default tags +# ## +# ## Multiple node groups are allowed +# #[[inputs.opcua_listener.group]] +# ## Group Metric name. Overrides the top level name. If unset, the +# ## top level name is used. +# # name = +# # +# ## Group default namespace. If a node in the group doesn't set its +# ## namespace, this is used. +# # namespace = +# # +# ## Group default identifier type. If a node in the group doesn't set its +# ## namespace, this is used. +# # identifier_type = +# # +# ## Default tags that are applied to every node in this group. Can be +# ## overwritten in a node by setting a different value for the tag name. +# ## example: default_tags = { tag1 = "value1" } +# # default_tags = {} +# # +# ## Node ID Configuration. Array of nodes with the same settings as above. +# ## Use either the inline notation or the bracketed notation, not both. +# # +# ## Inline notation (default_tags not supported yet) +# # nodes = [ +# # {name="node1", namespace="", identifier_type="", identifier=""}, +# # {name="node2", namespace="", identifier_type="", identifier=""}, +# #] +# # +# ## Bracketed notation +# # [[inputs.opcua_listener.group.nodes]] +# # name = "node1" +# # namespace = "" +# # identifier_type = "" +# # identifier = "" +# # default_tags = { tag1 = "override1", tag2 = "value2" } +# # +# # [[inputs.opcua_listener.group.nodes]] +# # name = "node2" +# # namespace = "" +# # identifier_type = "" +# # identifier = "" +# +# ## Enable workarounds required by some devices to work correctly +# # [inputs.opcua_listener.workarounds] +# ## Set additional valid status codes, StatusOK (0x0) is always considered valid +# # additional_valid_status_codes = ["0xC0"] +# +# # [inputs.opcua_listener.request_workarounds] +# ## Use unregistered reads instead of registered reads +# # use_unregistered_reads = false + + +# # Collects performance metrics from OpenStack services +# [[inputs.openstack]] +# ## The recommended interval to poll is '30m' +# +# ## The identity endpoint to authenticate against and get the service catalog from. +# authentication_endpoint = "https://my.openstack.cloud:5000" +# +# ## The domain to authenticate against when using a V3 identity endpoint. +# # domain = "default" +# +# ## The project to authenticate as. +# # project = "admin" +# +# ## User authentication credentials. Must have admin rights. +# username = "admin" +# password = "password" +# +# ## Available services are: +# ## "agents", "aggregates", "cinder_services", "flavors", "hypervisors", "networks", +# ## "nova_services", "ports", "projects", "servers", "services", "stacks", "storage_pools", +# ## "subnets", "volumes" +# # enabled_services = ["services", "projects", "hypervisors", "flavors", "networks", "volumes"] +# +# ## Collect Server Diagnostics +# # server_diagnotics = false +# +# ## output secrets (such as adminPass(for server) and UserID(for volume)). +# # output_secrets = false +# +# ## Amount of time allowed to complete the HTTP(s) request. +# # timeout = "5s" +# +# ## HTTP Proxy support +# # http_proxy_url = "" +# +# ## Optional TLS Config +# # tls_ca = /path/to/cafile +# # tls_cert = /path/to/certfile +# # tls_key = /path/to/keyfile +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Options for tags received from Openstack +# # tag_prefix = "openstack_tag_" +# # tag_value = "true" +# +# ## Timestamp format for timestamp data received from Openstack. +# ## If false format is unix nanoseconds. +# # human_readable_timestamps = false +# +# ## Measure Openstack call duration +# # measure_openstack_requests = false + + +# # Receive OpenTelemetry traces, metrics, and logs over gRPC +# [[inputs.opentelemetry]] +# ## Override the default (0.0.0.0:4317) destination OpenTelemetry gRPC service +# ## address:port +# # service_address = "0.0.0.0:4317" +# +# ## Override the default (5s) new connection timeout +# # timeout = "5s" +# +# ## Override the default span attributes to be used as line protocol tags. +# ## These are always included as tags: +# ## - trace ID +# ## - span ID +# ## The default values are strongly recommended for use with Jaeger: +# ## - service.name +# ## - span.name +# ## Other common attributes can be found here: +# ## - https://github.com/open-telemetry/opentelemetry-collector/tree/main/semconv +# # span_dimensions = ["service.name", "span.name"] +# +# ## Override the default log record attributes to be used as line protocol tags. +# ## These are always included as tags, if available: +# ## - trace ID +# ## - span ID +# ## The default values: +# ## - service.name +# ## Other common attributes can be found here: +# ## - https://github.com/open-telemetry/opentelemetry-collector/tree/main/semconv +# ## When using InfluxDB for both logs and traces, be certain that log_record_dimensions +# ## matches the span_dimensions value. +# # log_record_dimensions = ["service.name"] +# +# ## Override the default (prometheus-v1) metrics schema. +# ## Supports: "prometheus-v1", "prometheus-v2" +# ## For more information about the alternatives, read the Prometheus input +# ## plugin notes. +# # metrics_schema = "prometheus-v1" +# +# ## Optional TLS Config. +# ## For advanced options: https://github.com/influxdata/telegraf/blob/v1.18.3/docs/TLS.md +# ## +# ## Set one or more allowed client CA certificate file names to +# ## enable mutually authenticated TLS connections. +# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# ## Add service certificate and key. +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" + + +# # Read metrics from one or many pgbouncer servers +# [[inputs.pgbouncer]] +# ## specify address via a url matching: +# ## postgres://[pqgotest[:password]]@host:port[/dbname]\ +# ## ?sslmode=[disable|verify-ca|verify-full] +# ## or a simple string: +# ## host=localhost port=5432 user=pqgotest password=... sslmode=... dbname=app_production +# ## +# ## All connection parameters are optional. +# ## +# address = "host=localhost user=pgbouncer sslmode=disable" +# +# ## Specify which "show" commands to gather metrics for. +# ## Choose from: "stats", "pools", "lists", "databases" +# # show_commands = ["stats", "pools"] + + +# # Read metrics from one or many postgresql servers +# [[inputs.postgresql]] +# ## Specify address via a url matching: +# ## postgres://[pqgotest[:password]]@localhost[/dbname]?sslmode=[disable|verify-ca|verify-full]&statement_timeout=... +# ## or a simple string: +# ## host=localhost user=pqgotest password=... sslmode=... dbname=app_production +# ## Users can pass the path to the socket as the host value to use a socket +# ## connection (e.g. `/var/run/postgresql`). +# ## +# ## All connection parameters are optional. +# ## +# ## Without the dbname parameter, the driver will default to a database +# ## with the same name as the user. This dbname is just for instantiating a +# ## connection with the server and doesn't restrict the databases we are trying +# ## to grab metrics for. +# ## +# address = "host=localhost user=postgres sslmode=disable" +# +# ## A custom name for the database that will be used as the "server" tag in the +# ## measurement output. If not specified, a default one generated from +# ## the connection address is used. +# # outputaddress = "db01" +# +# ## connection configuration. +# ## maxlifetime - specify the maximum lifetime of a connection. +# ## default is forever (0s) +# ## +# ## Note that this does not interrupt queries, the lifetime will not be enforced +# ## whilst a query is running +# # max_lifetime = "0s" +# +# ## A list of databases to explicitly ignore. If not specified, metrics for all +# ## databases are gathered. Do NOT use with the 'databases' option. +# # ignored_databases = ["postgres", "template0", "template1"] +# +# ## A list of databases to pull metrics about. If not specified, metrics for all +# ## databases are gathered. Do NOT use with the 'ignored_databases' option. +# # databases = ["app_production", "testing"] +# +# ## Whether to use prepared statements when connecting to the database. +# ## This should be set to false when connecting through a PgBouncer instance +# ## with pool_mode set to transaction. +# prepared_statements = true + + +# # Read metrics from one or many postgresql servers +# [[inputs.postgresql_extensible]] +# # specify address via a url matching: +# # postgres://[pqgotest[:password]]@host:port[/dbname]?sslmode=...&statement_timeout=... +# # or a simple string: +# # host=localhost port=5432 user=pqgotest password=... sslmode=... dbname=app_production +# # +# # All connection parameters are optional. +# # Without the dbname parameter, the driver will default to a database +# # with the same name as the user. This dbname is just for instantiating a +# # connection with the server and doesn't restrict the databases we are trying +# # to grab metrics for. +# # +# address = "host=localhost user=postgres sslmode=disable" +# +# ## A list of databases to pull metrics about. +# ## deprecated in 1.22.3; use the sqlquery option to specify database to use +# # databases = ["app_production", "testing"] +# +# ## Whether to use prepared statements when connecting to the database. +# ## This should be set to false when connecting through a PgBouncer instance +# ## with pool_mode set to transaction. +# prepared_statements = true +# +# # Define the toml config where the sql queries are stored +# # The script option can be used to specify the .sql file path. +# # If script and sqlquery options specified at same time, sqlquery will be used +# # +# # the measurement field defines measurement name for metrics produced +# # by the query. Default is "postgresql". +# # +# # the tagvalue field is used to define custom tags (separated by comas). +# # the query is expected to return columns which match the names of the +# # defined tags. The values in these columns must be of a string-type, +# # a number-type or a blob-type. +# # +# # The timestamp field is used to override the data points timestamp value. By +# # default, all rows inserted with current time. By setting a timestamp column, +# # the row will be inserted with that column's value. +# # +# # The min_version field specifies minimal database version this query +# # will run on. +# # +# # The max_version field when set specifies maximal database version +# # this query will NOT run on. +# # +# # Database version in `minversion` and `maxversion` is represented as +# # a single integer without last component, for example: +# # 9.6.2 -> 906 +# # 15.2 -> 1500 +# # +# # Structure : +# # [[inputs.postgresql_extensible.query]] +# # measurement string +# # sqlquery string +# # min_version int +# # max_version int +# # withdbname boolean +# # tagvalue string (coma separated) +# # timestamp string +# [[inputs.postgresql_extensible.query]] +# measurement="pg_stat_database" +# sqlquery="SELECT * FROM pg_stat_database where datname" +# min_version=901 +# tagvalue="" +# [[inputs.postgresql_extensible.query]] +# script="your_sql-filepath.sql" +# min_version=901 +# max_version=1300 +# tagvalue="" + + +# # Read metrics from one or many prometheus clients +# [[inputs.prometheus]] +# ## An array of urls to scrape metrics from. +# urls = ["http://localhost:9100/metrics"] +# +# ## Metric version controls the mapping from Prometheus metrics into Telegraf metrics. +# ## See "Metric Format Configuration" in plugins/inputs/prometheus/README.md for details. +# ## Valid options: 1, 2 +# # metric_version = 1 +# +# ## Url tag name (tag containing scrapped url. optional, default is "url") +# # url_tag = "url" +# +# ## Whether the timestamp of the scraped metrics will be ignored. +# ## If set to true, the gather time will be used. +# # ignore_timestamp = false +# +# ## An array of Kubernetes services to scrape metrics from. +# # kubernetes_services = ["http://my-service-dns.my-namespace:9100/metrics"] +# +# ## Kubernetes config file to create client from. +# # kube_config = "/path/to/kubernetes.config" +# +# ## Scrape Pods +# ## Enable scraping of k8s pods. Further settings as to which pods to scape +# ## are determiend by the 'method' option below. When enabled, the default is +# ## to use annotations to determine whether to scrape or not. +# # monitor_kubernetes_pods = false +# +# ## Scrape Pods Method +# ## annotations: default, looks for specific pod annotations documented below +# ## settings: only look for pods matching the settings provided, not +# ## annotations +# ## settings+annotations: looks at pods that match annotations using the user +# ## defined settings +# # monitor_kubernetes_pods_method = "annotations" +# +# ## Scrape Pods 'annotations' method options +# ## If set method is set to 'annotations' or 'settings+annotations', these +# ## annotation flags are looked for: +# ## - prometheus.io/scrape: Required to enable scraping for this pod. Can also +# ## use 'prometheus.io/scrape=false' annotation to opt-out entirely. +# ## - prometheus.io/scheme: If the metrics endpoint is secured then you will +# ## need to set this to 'https' & most likely set the tls config +# ## - prometheus.io/path: If the metrics path is not /metrics, define it with +# ## this annotation +# ## - prometheus.io/port: If port is not 9102 use this annotation +# +# ## Scrape Pods 'settings' method options +# ## When using 'settings' or 'settings+annotations', the default values for +# ## annotations can be modified using with the following options: +# # monitor_kubernetes_pods_scheme = "http" +# # monitor_kubernetes_pods_port = "9102" +# # monitor_kubernetes_pods_path = "/metrics" +# +# ## Get the list of pods to scrape with either the scope of +# ## - cluster: the kubernetes watch api (default, no need to specify) +# ## - node: the local cadvisor api; for scalability. Note that the config node_ip or the environment variable NODE_IP must be set to the host IP. +# # pod_scrape_scope = "cluster" +# +# ## Only for node scrape scope: node IP of the node that telegraf is running on. +# ## Either this config or the environment variable NODE_IP must be set. +# # node_ip = "10.180.1.1" +# +# ## Only for node scrape scope: interval in seconds for how often to get updated pod list for scraping. +# ## Default is 60 seconds. +# # pod_scrape_interval = 60 +# +# ## Restricts Kubernetes monitoring to a single namespace +# ## ex: monitor_kubernetes_pods_namespace = "default" +# # monitor_kubernetes_pods_namespace = "" +# ## The name of the label for the pod that is being scraped. +# ## Default is 'namespace' but this can conflict with metrics that have the label 'namespace' +# # pod_namespace_label_name = "namespace" +# # label selector to target pods which have the label +# # kubernetes_label_selector = "env=dev,app=nginx" +# # field selector to target pods +# # eg. To scrape pods on a specific node +# # kubernetes_field_selector = "spec.nodeName=$HOSTNAME" +# +# ## Filter which pod annotations and labels will be added to metric tags +# # +# # pod_annotation_include = ["annotation-key-1"] +# # pod_annotation_exclude = ["exclude-me"] +# # pod_label_include = ["label-key-1"] +# # pod_label_exclude = ["exclude-me"] +# +# # cache refresh interval to set the interval for re-sync of pods list. +# # Default is 60 minutes. +# # cache_refresh_interval = 60 +# +# ## Scrape Services available in Consul Catalog +# # [inputs.prometheus.consul] +# # enabled = true +# # agent = "http://localhost:8500" +# # query_interval = "5m" +# +# # [[inputs.prometheus.consul.query]] +# # name = "a service name" +# # tag = "a service tag" +# # url = 'http://{{if ne .ServiceAddress ""}}{{.ServiceAddress}}{{else}}{{.Address}}{{end}}:{{.ServicePort}}/{{with .ServiceMeta.metrics_path}}{{.}}{{else}}metrics{{end}}' +# # [inputs.prometheus.consul.query.tags] +# # host = "{{.Node}}" +# +# ## Use bearer token for authorization. ('bearer_token' takes priority) +# # bearer_token = "/path/to/bearer/token" +# ## OR +# # bearer_token_string = "abc_123" +# +# ## HTTP Basic Authentication username and password. ('bearer_token' and +# ## 'bearer_token_string' take priority) +# # username = "" +# # password = "" +# +# ## Optional custom HTTP headers +# # http_headers = {"X-Special-Header" = "Special-Value"} +# +# ## Specify timeout duration for slower prometheus clients (default is 5s) +# # timeout = "5s" +# +# ## deprecated in 1.26; use the timeout option +# # response_timeout = "5s" +# +# ## HTTP Proxy support +# # use_system_proxy = false +# # http_proxy_url = "" +# +# ## Optional TLS Config +# # tls_ca = /path/to/cafile +# # tls_cert = /path/to/certfile +# # tls_key = /path/to/keyfile +# +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Use the given name as the SNI server name on each URL +# # tls_server_name = "myhost.example.org" +# +# ## TLS renegotiation method, choose from "never", "once", "freely" +# # tls_renegotiation_method = "never" +# +# ## Enable/disable TLS +# ## Set to true/false to enforce TLS being enabled/disabled. If not set, +# ## enable TLS only if any of the other options are specified. +# # tls_enable = true +# +# ## Control pod scraping based on pod namespace annotations +# ## Pass and drop here act like tagpass and tagdrop, but instead +# ## of filtering metrics they filters pod candidates for scraping +# #[inputs.prometheus.namespace_annotation_pass] +# # annotation_key = ["value1", "value2"] +# #[inputs.prometheus.namespace_annotation_drop] +# # some_annotation_key = ["dont-scrape"] + + +# # RAS plugin exposes counter metrics for Machine Check Errors provided by RASDaemon (sqlite3 output is required). +# # This plugin ONLY supports Linux on 386, amd64, arm, and arm64 +# [[inputs.ras]] +# ## Optional path to RASDaemon sqlite3 database. +# ## Default: /var/lib/rasdaemon/ras-mc_event.db +# # db_path = "" + + +# # Read metrics from one or many redis servers +# [[inputs.redis]] +# ## specify servers via a url matching: +# ## [protocol://][username:password]@address[:port] +# ## e.g. +# ## tcp://localhost:6379 +# ## tcp://username:password@192.168.99.100 +# ## unix:///var/run/redis.sock +# ## +# ## If no servers are specified, then localhost is used as the host. +# ## If no port is specified, 6379 is used +# servers = ["tcp://localhost:6379"] +# +# ## Optional. Specify redis commands to retrieve values +# # [[inputs.redis.commands]] +# # # The command to run where each argument is a separate element +# # command = ["get", "sample-key"] +# # # The field to store the result in +# # field = "sample-key-value" +# # # The type of the result +# # # Can be "string", "integer", or "float" +# # type = "string" +# +# ## Specify username and password for ACL auth (Redis 6.0+). You can add this +# ## to the server URI above or specify it here. The values here take +# ## precidence. +# # username = "" +# # password = "" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = true + + +# # Riemann protobuff listener +# [[inputs.riemann_listener]] +# ## URL to listen on +# ## Default is "tcp://:5555" +# # service_address = "tcp://:8094" +# # service_address = "tcp://127.0.0.1:http" +# # service_address = "tcp4://:8094" +# # service_address = "tcp6://:8094" +# # service_address = "tcp6://[2001:db8::1]:8094" +# +# ## Maximum number of concurrent connections. +# ## 0 (default) is unlimited. +# # max_connections = 1024 +# ## Read timeout. +# ## 0 (default) is unlimited. +# # read_timeout = "30s" +# ## Optional TLS configuration. +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Enables client authentication if set. +# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# ## Maximum socket buffer size (in bytes when no unit specified). +# # read_buffer_size = "64KiB" +# ## Period between keep alive probes. +# ## 0 disables keep alive probes. +# ## Defaults to the OS configuration. +# # keep_alive_period = "5m" + + +# # Plugin for retrieving data from Siemens PLCs via the S7 protocol (RFC1006) +# [[inputs.s7comm]] +# ## Parameters to contact the PLC (mandatory) +# ## The server is in the [:port] format where the port defaults to 102 +# ## if not explicitly specified. +# server = "127.0.0.1:102" +# rack = 0 +# slot = 0 +# +# ## Timeout for requests +# # timeout = "10s" +# +# ## Log detailed connection messages for debugging +# ## This option only has an effect when Telegraf runs in debug mode +# # debug_connection = false +# +# ## Metric definition(s) +# [[inputs.s7comm.metric]] +# ## Name of the measurement +# # name = "s7comm" +# +# ## Field definitions +# ## name - field name +# ## address - indirect address ".
[.extra]" +# ## area - e.g. be "DB1" for data-block one +# ## type - supported types are (uppercase) +# ## X -- bit, requires the bit-number as 'extra' +# ## parameter +# ## B -- byte (8 bit) +# ## C -- character (8 bit) +# ## W -- word (16 bit) +# ## DW -- double word (32 bit) +# ## I -- integer (16 bit) +# ## DI -- double integer (32 bit) +# ## R -- IEEE 754 real floating point number (32 bit) +# ## DT -- date-time, always converted to unix timestamp +# ## with nano-second precision +# ## S -- string, requires the maximum length of the +# ## string as 'extra' parameter +# ## address - start address to read if not specified otherwise +# ## in the type field +# ## extra - extra parameter e.g. for the bit and string type +# fields = [ +# { name="rpm", address="DB1.R4" }, +# { name="status_ok", address="DB1.X2.1" }, +# { name="last_error", address="DB2.S1.32" }, +# { name="last_error_time", address="DB2.DT2" } +# ] +# +# ## Tags assigned to the metric +# # [inputs.s7comm.metric.tags] +# # device = "compressor" +# # location = "main building" + + +# # SFlow V5 Protocol Listener +# [[inputs.sflow]] +# ## Address to listen for sFlow packets. +# ## example: service_address = "udp://:6343" +# ## service_address = "udp4://:6343" +# ## service_address = "udp6://:6343" +# service_address = "udp://:6343" +# +# ## Set the size of the operating system's receive buffer. +# ## example: read_buffer_size = "64KiB" +# # read_buffer_size = "" + + +# # Receive SNMP traps +# [[inputs.snmp_trap]] +# ## Transport, local address, and port to listen on. Transport must +# ## be "udp://". Omit local address to listen on all interfaces. +# ## example: "udp://127.0.0.1:1234" +# ## +# ## Special permissions may be required to listen on a port less than +# ## 1024. See README.md for details +# ## +# # service_address = "udp://:162" +# ## +# ## Path to mib files +# ## Used by the gosmi translator. +# ## To add paths when translating with netsnmp, use the MIBDIRS environment variable +# # path = ["/usr/share/snmp/mibs"] +# ## +# ## Deprecated in 1.20.0; no longer running snmptranslate +# ## Timeout running snmptranslate command +# # timeout = "5s" +# ## Snmp version +# # version = "2c" +# ## SNMPv3 authentication and encryption options. +# ## +# ## Security Name. +# # sec_name = "myuser" +# ## Authentication protocol; one of "MD5", "SHA" or "". +# # auth_protocol = "MD5" +# ## Authentication password. +# # auth_password = "pass" +# ## Security Level; one of "noAuthNoPriv", "authNoPriv", or "authPriv". +# # sec_level = "authNoPriv" +# ## Privacy protocol used for encrypted messages; one of "DES", "AES", "AES192", "AES192C", "AES256", "AES256C" or "". +# # priv_protocol = "" +# ## Privacy password used for encrypted messages. +# # priv_password = "" + + +# # Generic socket listener capable of handling multiple socket types. +# [[inputs.socket_listener]] +# ## URL to listen on +# # service_address = "tcp://:8094" +# # service_address = "tcp://127.0.0.1:http" +# # service_address = "tcp4://:8094" +# # service_address = "tcp6://:8094" +# # service_address = "tcp6://[2001:db8::1]:8094" +# # service_address = "udp://:8094" +# # service_address = "udp4://:8094" +# # service_address = "udp6://:8094" +# # service_address = "unix:///tmp/telegraf.sock" +# # service_address = "unixgram:///tmp/telegraf.sock" +# +# ## Change the file mode bits on unix sockets. These permissions may not be +# ## respected by some platforms, to safely restrict write permissions it is best +# ## to place the socket into a directory that has previously been created +# ## with the desired permissions. +# ## ex: socket_mode = "777" +# # socket_mode = "" +# +# ## Maximum number of concurrent connections. +# ## Only applies to stream sockets (e.g. TCP). +# ## 0 (default) is unlimited. +# # max_connections = 1024 +# +# ## Read timeout. +# ## Only applies to stream sockets (e.g. TCP). +# ## 0 (default) is unlimited. +# # read_timeout = "30s" +# +# ## Optional TLS configuration. +# ## Only applies to stream sockets (e.g. TCP). +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Enables client authentication if set. +# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# +# ## Maximum socket buffer size (in bytes when no unit specified). +# ## For stream sockets, once the buffer fills up, the sender will start backing up. +# ## For datagram sockets, once the buffer fills up, metrics will start dropping. +# ## Defaults to the OS default. +# # read_buffer_size = "64KiB" +# +# ## Period between keep alive probes. +# ## Only applies to TCP sockets. +# ## 0 disables keep alive probes. +# ## Defaults to the OS configuration. +# # keep_alive_period = "5m" +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# # data_format = "influx" +# +# ## Content encoding for message payloads, can be set to "gzip" to or +# ## "identity" to apply no encoding. +# # content_encoding = "identity" +# +# ## Maximum size of decoded packet. +# ## Acceptable units are B, KiB, KB, MiB, MB... +# ## Without quotes and units, interpreted as size in bytes. +# # max_decompression_size = "500MB" +# +# ## Message splitting strategy and corresponding settings for stream sockets +# ## (tcp, tcp4, tcp6, unix or unixpacket). The setting is ignored for packet +# ## listeners such as udp. +# ## Available strategies are: +# ## newline -- split at newlines (default) +# ## null -- split at null bytes +# ## delimiter -- split at delimiter byte-sequence in hex-format +# ## given in `splitting_delimiter` +# ## fixed length -- split after number of bytes given in `splitting_length` +# ## variable length -- split depending on length information received in the +# ## data. The length field information is specified in +# ## `splitting_length_field`. +# # splitting_strategy = "newline" +# +# ## Delimiter used to split received data to messages consumed by the parser. +# ## The delimiter is a hex byte-sequence marking the end of a message +# ## e.g. "0x0D0A", "x0d0a" or "0d0a" marks a Windows line-break (CR LF). +# ## The value is case-insensitive and can be specifed with "0x" or "x" prefix +# ## or withou. +# ## Note: This setting is only used for splitting_strategy = "delimiter". +# # splitting_delimiter = "" +# +# ## Fixed length of a message in bytes. +# ## Note: This setting is only used for splitting_strategy = "fixed length". +# # splitting_length = 0 +# +# ## Specification of the length field contained in the data to split messages +# ## with variable length. The specification contains the following fields: +# ## offset -- start of length field in bytes from begin of data +# ## bytes -- length of length field in bytes +# ## endianness -- endianness of the value, either "be" for big endian or +# ## "le" for little endian +# ## header_length -- total length of header to be skipped when passing +# ## data on to the parser. If zero (default), the header +# ## is passed on to the parser together with the message. +# ## Note: This setting is only used for splitting_strategy = "variable length". +# # splitting_length_field = {offset = 0, bytes = 0, endianness = "be", header_length = 0} + + +# # Read stats from one or more Solr servers or cores +# [[inputs.solr]] +# ## specify a list of one or more Solr servers +# servers = ["http://localhost:8983"] +# +# ## specify a list of one or more Solr cores (default - all) +# # cores = ["*"] +# +# ## Optional HTTP Basic Auth Credentials +# # username = "username" +# # password = "pa$$word" +# +# ## Timeout for HTTP requests +# # timeout = "5s" + + +# # Read metrics from SQL queries +# [[inputs.sql]] +# ## Database Driver +# ## See https://github.com/influxdata/telegraf/blob/master/docs/SQL_DRIVERS_INPUT.md for +# ## a list of supported drivers. +# driver = "mysql" +# +# ## Data source name for connecting +# ## The syntax and supported options depends on selected driver. +# dsn = "username:password@mysqlserver:3307/dbname?param=value" +# +# ## Timeout for any operation +# ## Note that the timeout for queries is per query not per gather. +# # timeout = "5s" +# +# ## Connection time limits +# ## By default the maximum idle time and maximum lifetime of a connection is unlimited, i.e. the connections +# ## will not be closed automatically. If you specify a positive time, the connections will be closed after +# ## idleing or existing for at least that amount of time, respectively. +# # connection_max_idle_time = "0s" +# # connection_max_life_time = "0s" +# +# ## Connection count limits +# ## By default the number of open connections is not limited and the number of maximum idle connections +# ## will be inferred from the number of queries specified. If you specify a positive number for any of the +# ## two options, connections will be closed when reaching the specified limit. The number of idle connections +# ## will be clipped to the maximum number of connections limit if any. +# # connection_max_open = 0 +# # connection_max_idle = auto +# +# ## Specifies plugin behavior regarding disconnected servers +# ## Available choices : +# ## - error: telegraf will return an error on startup if one the servers is unreachable +# ## - ignore: telegraf will ignore unreachable servers on both startup and gather +# # disconnected_servers_behavior = "error" +# +# [[inputs.sql.query]] +# ## Query to perform on the server +# query="SELECT user,state,latency,score FROM Scoreboard WHERE application > 0" +# ## Alternatively to specifying the query directly you can select a file here containing the SQL query. +# ## Only one of 'query' and 'query_script' can be specified! +# # query_script = "/path/to/sql/script.sql" +# +# ## Name of the measurement +# ## In case both measurement and 'measurement_col' are given, the latter takes precedence. +# # measurement = "sql" +# +# ## Column name containing the name of the measurement +# ## If given, this will take precedence over the 'measurement' setting. In case a query result +# ## does not contain the specified column, we fall-back to the 'measurement' setting. +# # measurement_column = "" +# +# ## Column name containing the time of the measurement +# ## If ommited, the time of the query will be used. +# # time_column = "" +# +# ## Format of the time contained in 'time_col' +# ## The time must be 'unix', 'unix_ms', 'unix_us', 'unix_ns', or a golang time format. +# ## See https://golang.org/pkg/time/#Time.Format for details. +# # time_format = "unix" +# +# ## Column names containing tags +# ## An empty include list will reject all columns and an empty exclude list will not exclude any column. +# ## I.e. by default no columns will be returned as tag and the tags are empty. +# # tag_columns_include = [] +# # tag_columns_exclude = [] +# +# ## Column names containing fields (explicit types) +# ## Convert the given columns to the corresponding type. Explicit type conversions take precedence over +# ## the automatic (driver-based) conversion below. +# ## NOTE: Columns should not be specified for multiple types or the resulting type is undefined. +# # field_columns_float = [] +# # field_columns_int = [] +# # field_columns_uint = [] +# # field_columns_bool = [] +# # field_columns_string = [] +# +# ## Column names containing fields (automatic types) +# ## An empty include list is equivalent to '[*]' and all returned columns will be accepted. An empty +# ## exclude list will not exclude any column. I.e. by default all columns will be returned as fields. +# ## NOTE: We rely on the database driver to perform automatic datatype conversion. +# # field_columns_include = [] +# # field_columns_exclude = [] + + +# # Read metrics from Microsoft SQL Server +# [[inputs.sqlserver]] +# ## Specify instances to monitor with a list of connection strings. +# ## All connection parameters are optional. +# ## By default, the host is localhost, listening on default port, TCP 1433. +# ## for Windows, the user is the currently running AD user (SSO). +# ## See https://github.com/microsoft/go-mssqldb for detailed connection +# ## parameters, in particular, tls connections can be created like so: +# ## "encrypt=true;certificate=;hostNameInCertificate=" +# servers = [ +# "Server=192.168.1.10;Port=1433;User Id=;Password=;app name=telegraf;log=1;", +# ] +# +# ## Timeout for query execution operation +# ## Note that the timeout for queries is per query not per gather. +# ## 0 value means no timeout +# # query_timeout = "0s" +# +# ## Authentication method +# ## valid methods: "connection_string", "AAD" +# # auth_method = "connection_string" +# +# ## "database_type" enables a specific set of queries depending on the database type. If specified, it replaces azuredb = true/false and query_version = 2 +# ## In the config file, the sql server plugin section should be repeated each with a set of servers for a specific database_type. +# ## Possible values for database_type are - "SQLServer" or "AzureSQLDB" or "AzureSQLManagedInstance" or "AzureSQLPool" +# database_type = "SQLServer" +# +# ## A list of queries to include. If not specified, all the below listed queries are used. +# include_query = [] +# +# ## A list of queries to explicitly ignore. +# exclude_query = ["SQLServerAvailabilityReplicaStates", "SQLServerDatabaseReplicaStates"] +# +# ## Queries enabled by default for database_type = "SQLServer" are - +# ## SQLServerPerformanceCounters, SQLServerWaitStatsCategorized, SQLServerDatabaseIO, SQLServerProperties, SQLServerMemoryClerks, +# ## SQLServerSchedulers, SQLServerRequests, SQLServerVolumeSpace, SQLServerCpu, SQLServerAvailabilityReplicaStates, SQLServerDatabaseReplicaStates, +# ## SQLServerRecentBackups +# +# ## Queries enabled by default for database_type = "AzureSQLDB" are - +# ## AzureSQLDBResourceStats, AzureSQLDBResourceGovernance, AzureSQLDBWaitStats, AzureSQLDBDatabaseIO, AzureSQLDBServerProperties, +# ## AzureSQLDBOsWaitstats, AzureSQLDBMemoryClerks, AzureSQLDBPerformanceCounters, AzureSQLDBRequests, AzureSQLDBSchedulers +# +# ## Queries enabled by default for database_type = "AzureSQLManagedInstance" are - +# ## AzureSQLMIResourceStats, AzureSQLMIResourceGovernance, AzureSQLMIDatabaseIO, AzureSQLMIServerProperties, AzureSQLMIOsWaitstats, +# ## AzureSQLMIMemoryClerks, AzureSQLMIPerformanceCounters, AzureSQLMIRequests, AzureSQLMISchedulers +# +# ## Queries enabled by default for database_type = "AzureSQLPool" are - +# ## AzureSQLPoolResourceStats, AzureSQLPoolResourceGovernance, AzureSQLPoolDatabaseIO, AzureSQLPoolWaitStats, +# ## AzureSQLPoolMemoryClerks, AzureSQLPoolPerformanceCounters, AzureSQLPoolSchedulers +# +# ## Queries enabled by default for database_type = "AzureArcSQLManagedInstance" are - +# ## AzureSQLMIDatabaseIO, AzureSQLMIServerProperties, AzureSQLMIOsWaitstats, +# ## AzureSQLMIMemoryClerks, AzureSQLMIPerformanceCounters, AzureSQLMIRequests, AzureSQLMISchedulers +# +# ## Following are old config settings +# ## You may use them only if you are using the earlier flavor of queries, however it is recommended to use +# ## the new mechanism of identifying the database_type there by use it's corresponding queries +# +# ## Optional parameter, setting this to 2 will use a new version +# ## of the collection queries that break compatibility with the original +# ## dashboards. +# ## Version 2 - is compatible from SQL Server 2012 and later versions and also for SQL Azure DB +# # query_version = 2 +# +# ## If you are using AzureDB, setting this to true will gather resource utilization metrics +# # azuredb = false +# +# ## Toggling this to true will emit an additional metric called "sqlserver_telegraf_health". +# ## This metric tracks the count of attempted queries and successful queries for each SQL instance specified in "servers". +# ## The purpose of this metric is to assist with identifying and diagnosing any connectivity or query issues. +# ## This setting/metric is optional and is disabled by default. +# # health_metric = false +# +# ## Possible queries accross different versions of the collectors +# ## Queries enabled by default for specific Database Type +# +# ## database_type = AzureSQLDB by default collects the following queries +# ## - AzureSQLDBWaitStats +# ## - AzureSQLDBResourceStats +# ## - AzureSQLDBResourceGovernance +# ## - AzureSQLDBDatabaseIO +# ## - AzureSQLDBServerProperties +# ## - AzureSQLDBOsWaitstats +# ## - AzureSQLDBMemoryClerks +# ## - AzureSQLDBPerformanceCounters +# ## - AzureSQLDBRequests +# ## - AzureSQLDBSchedulers +# +# ## database_type = AzureSQLManagedInstance by default collects the following queries +# ## - AzureSQLMIResourceStats +# ## - AzureSQLMIResourceGovernance +# ## - AzureSQLMIDatabaseIO +# ## - AzureSQLMIServerProperties +# ## - AzureSQLMIOsWaitstats +# ## - AzureSQLMIMemoryClerks +# ## - AzureSQLMIPerformanceCounters +# ## - AzureSQLMIRequests +# ## - AzureSQLMISchedulers +# +# ## database_type = AzureSQLPool by default collects the following queries +# ## - AzureSQLPoolResourceStats +# ## - AzureSQLPoolResourceGovernance +# ## - AzureSQLPoolDatabaseIO +# ## - AzureSQLPoolOsWaitStats, +# ## - AzureSQLPoolMemoryClerks +# ## - AzureSQLPoolPerformanceCounters +# ## - AzureSQLPoolSchedulers +# +# ## database_type = SQLServer by default collects the following queries +# ## - SQLServerPerformanceCounters +# ## - SQLServerWaitStatsCategorized +# ## - SQLServerDatabaseIO +# ## - SQLServerProperties +# ## - SQLServerMemoryClerks +# ## - SQLServerSchedulers +# ## - SQLServerRequests +# ## - SQLServerVolumeSpace +# ## - SQLServerCpu +# ## - SQLServerRecentBackups +# ## and following as optional (if mentioned in the include_query list) +# ## - SQLServerAvailabilityReplicaStates +# ## - SQLServerDatabaseReplicaStates +# +# ## Version 2 by default collects the following queries +# ## Version 2 is being deprecated, please consider using database_type. +# ## - PerformanceCounters +# ## - WaitStatsCategorized +# ## - DatabaseIO +# ## - ServerProperties +# ## - MemoryClerk +# ## - Schedulers +# ## - SqlRequests +# ## - VolumeSpace +# ## - Cpu +# +# ## Version 1 by default collects the following queries +# ## Version 1 is deprecated, please consider using database_type. +# ## - PerformanceCounters +# ## - WaitStatsCategorized +# ## - CPUHistory +# ## - DatabaseIO +# ## - DatabaseSize +# ## - DatabaseStats +# ## - DatabaseProperties +# ## - MemoryClerk +# ## - VolumeSpace +# ## - PerformanceMetrics + + +# # Statsd Server +# [[inputs.statsd]] +# ## Protocol, must be "tcp", "udp4", "udp6" or "udp" (default=udp) +# protocol = "udp" +# +# ## MaxTCPConnection - applicable when protocol is set to tcp (default=250) +# max_tcp_connections = 250 +# +# ## Enable TCP keep alive probes (default=false) +# tcp_keep_alive = false +# +# ## Specifies the keep-alive period for an active network connection. +# ## Only applies to TCP sockets and will be ignored if tcp_keep_alive is false. +# ## Defaults to the OS configuration. +# # tcp_keep_alive_period = "2h" +# +# ## Address and port to host UDP listener on +# service_address = ":8125" +# +# ## The following configuration options control when telegraf clears it's cache +# ## of previous values. If set to false, then telegraf will only clear it's +# ## cache when the daemon is restarted. +# ## Reset gauges every interval (default=true) +# delete_gauges = true +# ## Reset counters every interval (default=true) +# delete_counters = true +# ## Reset sets every interval (default=true) +# delete_sets = true +# ## Reset timings & histograms every interval (default=true) +# delete_timings = true +# +# ## Enable aggregation temporality adds temporality=delta or temporality=commulative tag, and +# ## start_time field, which adds the start time of the metric accumulation. +# ## You should use this when using OpenTelemetry output. +# # enable_aggregation_temporality = false +# +# ## Percentiles to calculate for timing & histogram stats. +# percentiles = [50.0, 90.0, 99.0, 99.9, 99.95, 100.0] +# +# ## separator to use between elements of a statsd metric +# metric_separator = "_" +# +# ## Parses tags in the datadog statsd format +# ## http://docs.datadoghq.com/guides/dogstatsd/ +# ## deprecated in 1.10; use datadog_extensions option instead +# parse_data_dog_tags = false +# +# ## Parses extensions to statsd in the datadog statsd format +# ## currently supports metrics and datadog tags. +# ## http://docs.datadoghq.com/guides/dogstatsd/ +# datadog_extensions = false +# +# ## Parses distributions metric as specified in the datadog statsd format +# ## https://docs.datadoghq.com/developers/metrics/types/?tab=distribution#definition +# datadog_distributions = false +# +# ## Statsd data translation templates, more info can be read here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/TEMPLATE_PATTERN.md +# # templates = [ +# # "cpu.* measurement*" +# # ] +# +# ## Number of UDP messages allowed to queue up, once filled, +# ## the statsd server will start dropping packets +# allowed_pending_messages = 10000 +# +# ## Number of worker threads used to parse the incoming messages. +# # number_workers_threads = 5 +# +# ## Number of timing/histogram values to track per-measurement in the +# ## calculation of percentiles. Raising this limit increases the accuracy +# ## of percentiles but also increases the memory usage and cpu time. +# percentile_limit = 1000 +# +# ## Maximum socket buffer size in bytes, once the buffer fills up, metrics +# ## will start dropping. Defaults to the OS default. +# # read_buffer_size = 65535 +# +# ## Max duration (TTL) for each metric to stay cached/reported without being updated. +# # max_ttl = "10h" +# +# ## Sanitize name method +# ## By default, telegraf will pass names directly as they are received. +# ## However, upstream statsd now does sanitization of names which can be +# ## enabled by using the "upstream" method option. This option will a) replace +# ## white space with '_', replace '/' with '-', and remove charachters not +# ## matching 'a-zA-Z_\-0-9\.;='. +# #sanitize_name_method = "" + + +# # Suricata stats and alerts plugin +# [[inputs.suricata]] +# ## Source +# ## Data sink for Suricata stats log. This is expected to be a filename of a +# ## unix socket to be created for listening. +# # source = "/var/run/suricata-stats.sock" +# +# ## Delimiter +# ## Used for flattening field keys, e.g. subitem "alert" of "detect" becomes +# ## "detect_alert" when delimiter is "_". +# # delimiter = "_" +# +# ## Metric version +# ## Version 1 only collects stats and optionally will look for alerts if +# ## the configuration setting alerts is set to true. +# ## Version 2 parses any event type message by default and produced metrics +# ## under a single metric name using a tag to differentiate between event +# ## types. The timestamp for the message is applied to the generated metric. +# ## Additional tags and fields are included as well. +# # version = "1" +# +# ## Alerts +# ## In metric version 1, only status is captured by default, alerts must be +# ## turned on with this configuration option. This option does not apply for +# ## metric version 2. +# # alerts = false + + +# [[inputs.syslog]] +# ## Protocol, address and port to host the syslog receiver. +# ## If no host is specified, then localhost is used. +# ## If no port is specified, 6514 is used (RFC5425#section-4.1). +# ## ex: server = "tcp://localhost:6514" +# ## server = "udp://:6514" +# ## server = "unix:///var/run/telegraf-syslog.sock" +# ## When using tcp, consider using 'tcp4' or 'tcp6' to force the usage of IPv4 +# ## or IPV6 respectively. There are cases, where when not specified, a system +# ## may force an IPv4 mapped IPv6 address. +# server = "tcp://:6514" +# +# ## TLS Config +# # tls_allowed_cacerts = ["/etc/telegraf/ca.pem"] +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## Period between keep alive probes. +# ## 0 disables keep alive probes. +# ## Defaults to the OS configuration. +# ## Only applies to stream sockets (e.g. TCP). +# # keep_alive_period = "5m" +# +# ## Maximum number of concurrent connections (default = 0). +# ## 0 means unlimited. +# ## Only applies to stream sockets (e.g. TCP). +# # max_connections = 1024 +# +# ## Read timeout is the maximum time allowed for reading a single message (default = 5s). +# ## 0 means unlimited. +# # read_timeout = "5s" +# +# ## The framing technique with which it is expected that messages are transported (default = "octet-counting"). +# ## Whether the messages come using the octect-counting (RFC5425#section-4.3.1, RFC6587#section-3.4.1), +# ## or the non-transparent framing technique (RFC6587#section-3.4.2). +# ## Must be one of "octect-counting", "non-transparent". +# # framing = "octet-counting" +# +# ## The trailer to be expected in case of non-transparent framing (default = "LF"). +# ## Must be one of "LF", or "NUL". +# # trailer = "LF" +# +# ## Whether to parse in best effort mode or not (default = false). +# ## By default best effort parsing is off. +# # best_effort = false +# +# ## The RFC standard to use for message parsing +# ## By default RFC5424 is used. RFC3164 only supports UDP transport (no streaming support) +# ## Must be one of "RFC5424", or "RFC3164". +# # syslog_standard = "RFC5424" +# +# ## Character to prepend to SD-PARAMs (default = "_"). +# ## A syslog message can contain multiple parameters and multiple identifiers within structured data section. +# ## Eg., [id1 name1="val1" name2="val2"][id2 name1="val1" nameA="valA"] +# ## For each combination a field is created. +# ## Its name is created concatenating identifier, sdparam_separator, and parameter name. +# # sdparam_separator = "_" + + +# # Parse the new lines appended to a file +# [[inputs.tail]] +# ## File names or a pattern to tail. +# ## These accept standard unix glob matching rules, but with the addition of +# ## ** as a "super asterisk". ie: +# ## "/var/log/**.log" -> recursively find all .log files in /var/log +# ## "/var/log/*/*.log" -> find all .log files with a parent dir in /var/log +# ## "/var/log/apache.log" -> just tail the apache log file +# ## "/var/log/log[!1-2]* -> tail files without 1-2 +# ## "/var/log/log[^1-2]* -> identical behavior as above +# ## See https://github.com/gobwas/glob for more examples +# ## +# files = ["/var/mymetrics.out"] +# +# ## Read file from beginning. +# # from_beginning = false +# +# ## Whether file is a named pipe +# # pipe = false +# +# ## Method used to watch for file updates. Can be either "inotify" or "poll". +# # watch_method = "inotify" +# +# ## Maximum lines of the file to process that have not yet be written by the +# ## output. For best throughput set based on the number of metrics on each +# ## line and the size of the output's metric_batch_size. +# # max_undelivered_lines = 1000 +# +# ## Character encoding to use when interpreting the file contents. Invalid +# ## characters are replaced using the unicode replacement character. When set +# ## to the empty string the data is not decoded to text. +# ## ex: character_encoding = "utf-8" +# ## character_encoding = "utf-16le" +# ## character_encoding = "utf-16be" +# ## character_encoding = "" +# # character_encoding = "" +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" +# +# ## Set the tag that will contain the path of the tailed file. If you don't want this tag, set it to an empty string. +# # path_tag = "path" +# +# ## Filters to apply to files before generating metrics +# ## "ansi_color" removes ANSI colors +# # filters = [] +# +# ## multiline parser/codec +# ## https://www.elastic.co/guide/en/logstash/2.4/plugins-filters-multiline.html +# #[inputs.tail.multiline] +# ## The pattern should be a regexp which matches what you believe to be an indicator that the field is part of an event consisting of multiple lines of log data. +# #pattern = "^\s" +# +# ## The field's value must be previous or next and indicates the relation to the +# ## multi-line event. +# #match_which_line = "previous" +# +# ## The invert_match can be true or false (defaults to false). +# ## If true, a message not matching the pattern will constitute a match of the multiline filter and the what will be applied. (vice-versa is also true) +# #invert_match = false +# +# ## The handling method for quoted text (defaults to 'ignore'). +# ## The following methods are available: +# ## ignore -- do not consider quotation (default) +# ## single-quotes -- consider text quoted by single quotes (') +# ## double-quotes -- consider text quoted by double quotes (") +# ## backticks -- consider text quoted by backticks (`) +# ## When handling quotes, escaped quotes (e.g. \") are handled correctly. +# #quotation = "ignore" +# +# ## The preserve_newline option can be true or false (defaults to false). +# ## If true, the newline character is preserved for multiline elements, +# ## this is useful to preserve message-structure e.g. for logging outputs. +# #preserve_newline = false +# +# #After the specified timeout, this plugin sends the multiline event even if no new pattern is found to start a new event. The default is 5s. +# #timeout = 5s + + +# ## DEPRECATED: The "tcp_listener" plugin is deprecated in version 1.3.0 and will be removed in 1.30.0, use 'inputs.socket_listener' instead. +# # Generic TCP listener +# [[inputs.tcp_listener]] +# # socket_listener plugin +# # see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/socket_listener + + +# ## DEPRECATED: The "udp_listener" plugin is deprecated in version 1.3.0 and will be removed in 1.30.0, use 'inputs.socket_listener' instead. +# # Generic UDP listener +# [[inputs.udp_listener]] +# # see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/socket_listener + + +# # Read metrics from one or many vCenters +# [[inputs.vsphere]] +# ## List of vCenter URLs to be monitored. These three lines must be uncommented +# ## and edited for the plugin to work. +# vcenters = [ "https://vcenter.local/sdk" ] +# username = "user@corp.local" +# password = "secret" +# +# ## VMs +# ## Typical VM metrics (if omitted or empty, all metrics are collected) +# # vm_include = [ "/*/vm/**"] # Inventory path to VMs to collect (by default all are collected) +# # vm_exclude = [] # Inventory paths to exclude +# vm_metric_include = [ +# "cpu.demand.average", +# "cpu.idle.summation", +# "cpu.latency.average", +# "cpu.readiness.average", +# "cpu.ready.summation", +# "cpu.run.summation", +# "cpu.usagemhz.average", +# "cpu.used.summation", +# "cpu.wait.summation", +# "mem.active.average", +# "mem.granted.average", +# "mem.latency.average", +# "mem.swapin.average", +# "mem.swapinRate.average", +# "mem.swapout.average", +# "mem.swapoutRate.average", +# "mem.usage.average", +# "mem.vmmemctl.average", +# "net.bytesRx.average", +# "net.bytesTx.average", +# "net.droppedRx.summation", +# "net.droppedTx.summation", +# "net.usage.average", +# "power.power.average", +# "virtualDisk.numberReadAveraged.average", +# "virtualDisk.numberWriteAveraged.average", +# "virtualDisk.read.average", +# "virtualDisk.readOIO.latest", +# "virtualDisk.throughput.usage.average", +# "virtualDisk.totalReadLatency.average", +# "virtualDisk.totalWriteLatency.average", +# "virtualDisk.write.average", +# "virtualDisk.writeOIO.latest", +# "sys.uptime.latest", +# ] +# # vm_metric_exclude = [] ## Nothing is excluded by default +# # vm_instances = true ## true by default +# +# ## Hosts +# ## Typical host metrics (if omitted or empty, all metrics are collected) +# # host_include = [ "/*/host/**"] # Inventory path to hosts to collect (by default all are collected) +# # host_exclude [] # Inventory paths to exclude +# host_metric_include = [ +# "cpu.coreUtilization.average", +# "cpu.costop.summation", +# "cpu.demand.average", +# "cpu.idle.summation", +# "cpu.latency.average", +# "cpu.readiness.average", +# "cpu.ready.summation", +# "cpu.swapwait.summation", +# "cpu.usage.average", +# "cpu.usagemhz.average", +# "cpu.used.summation", +# "cpu.utilization.average", +# "cpu.wait.summation", +# "disk.deviceReadLatency.average", +# "disk.deviceWriteLatency.average", +# "disk.kernelReadLatency.average", +# "disk.kernelWriteLatency.average", +# "disk.numberReadAveraged.average", +# "disk.numberWriteAveraged.average", +# "disk.read.average", +# "disk.totalReadLatency.average", +# "disk.totalWriteLatency.average", +# "disk.write.average", +# "mem.active.average", +# "mem.latency.average", +# "mem.state.latest", +# "mem.swapin.average", +# "mem.swapinRate.average", +# "mem.swapout.average", +# "mem.swapoutRate.average", +# "mem.totalCapacity.average", +# "mem.usage.average", +# "mem.vmmemctl.average", +# "net.bytesRx.average", +# "net.bytesTx.average", +# "net.droppedRx.summation", +# "net.droppedTx.summation", +# "net.errorsRx.summation", +# "net.errorsTx.summation", +# "net.usage.average", +# "power.power.average", +# "storageAdapter.numberReadAveraged.average", +# "storageAdapter.numberWriteAveraged.average", +# "storageAdapter.read.average", +# "storageAdapter.write.average", +# "sys.uptime.latest", +# ] +# ## Collect IP addresses? Valid values are "ipv4" and "ipv6" +# # ip_addresses = ["ipv6", "ipv4" ] +# +# # host_metric_exclude = [] ## Nothing excluded by default +# # host_instances = true ## true by default +# +# +# ## Clusters +# # cluster_include = [ "/*/host/**"] # Inventory path to clusters to collect (by default all are collected) +# # cluster_exclude = [] # Inventory paths to exclude +# # cluster_metric_include = [] ## if omitted or empty, all metrics are collected +# # cluster_metric_exclude = [] ## Nothing excluded by default +# # cluster_instances = false ## false by default +# +# ## Resource Pools +# # resource_pool_include = [ "/*/host/**"] # Inventory path to resource pools to collect (by default all are collected) +# # resource_pool_exclude = [] # Inventory paths to exclude +# # resource_pool_metric_include = [] ## if omitted or empty, all metrics are collected +# # resource_pool_metric_exclude = [] ## Nothing excluded by default +# # resource_pool_instances = false ## false by default +# +# ## Datastores +# # datastore_include = [ "/*/datastore/**"] # Inventory path to datastores to collect (by default all are collected) +# # datastore_exclude = [] # Inventory paths to exclude +# # datastore_metric_include = [] ## if omitted or empty, all metrics are collected +# # datastore_metric_exclude = [] ## Nothing excluded by default +# # datastore_instances = false ## false by default +# +# ## Datacenters +# # datacenter_include = [ "/*/host/**"] # Inventory path to clusters to collect (by default all are collected) +# # datacenter_exclude = [] # Inventory paths to exclude +# datacenter_metric_include = [] ## if omitted or empty, all metrics are collected +# datacenter_metric_exclude = [ "*" ] ## Datacenters are not collected by default. +# # datacenter_instances = false ## false by default +# +# ## VSAN +# # vsan_metric_include = [] ## if omitted or empty, all metrics are collected +# # vsan_metric_exclude = [ "*" ] ## vSAN are not collected by default. +# ## Whether to skip verifying vSAN metrics against the ones from GetSupportedEntityTypes API. +# # vsan_metric_skip_verify = false ## false by default. +# +# ## Interval for sampling vSAN performance metrics, can be reduced down to +# ## 30 seconds for vSAN 8 U1. +# # vsan_interval = "5m" +# +# ## Plugin Settings +# ## separator character to use for measurement and field names (default: "_") +# # separator = "_" +# +# ## number of objects to retrieve per query for realtime resources (vms and hosts) +# ## set to 64 for vCenter 5.5 and 6.0 (default: 256) +# # max_query_objects = 256 +# +# ## number of metrics to retrieve per query for non-realtime resources (clusters and datastores) +# ## set to 64 for vCenter 5.5 and 6.0 (default: 256) +# # max_query_metrics = 256 +# +# ## number of go routines to use for collection and discovery of objects and metrics +# # collect_concurrency = 1 +# # discover_concurrency = 1 +# +# ## the interval before (re)discovering objects subject to metrics collection (default: 300s) +# # object_discovery_interval = "300s" +# +# ## timeout applies to any of the api request made to vcenter +# # timeout = "60s" +# +# ## When set to true, all samples are sent as integers. This makes the output +# ## data types backwards compatible with Telegraf 1.9 or lower. Normally all +# ## samples from vCenter, with the exception of percentages, are integer +# ## values, but under some conditions, some averaging takes place internally in +# ## the plugin. Setting this flag to "false" will send values as floats to +# ## preserve the full precision when averaging takes place. +# # use_int_samples = true +# +# ## Custom attributes from vCenter can be very useful for queries in order to slice the +# ## metrics along different dimension and for forming ad-hoc relationships. They are disabled +# ## by default, since they can add a considerable amount of tags to the resulting metrics. To +# ## enable, simply set custom_attribute_exclude to [] (empty set) and use custom_attribute_include +# ## to select the attributes you want to include. +# ## By default, since they can add a considerable amount of tags to the resulting metrics. To +# ## enable, simply set custom_attribute_exclude to [] (empty set) and use custom_attribute_include +# ## to select the attributes you want to include. +# # custom_attribute_include = [] +# # custom_attribute_exclude = ["*"] +# +# ## The number of vSphere 5 minute metric collection cycles to look back for non-realtime metrics. In +# ## some versions (6.7, 7.0 and possible more), certain metrics, such as cluster metrics, may be reported +# ## with a significant delay (>30min). If this happens, try increasing this number. Please note that increasing +# ## it too much may cause performance issues. +# # metric_lookback = 3 +# +# ## Optional SSL Config +# # ssl_ca = "/path/to/cafile" +# # ssl_cert = "/path/to/certfile" +# # ssl_key = "/path/to/keyfile" +# ## Use SSL but skip chain & host verification +# # insecure_skip_verify = false +# +# ## The Historical Interval value must match EXACTLY the interval in the daily +# # "Interval Duration" found on the VCenter server under Configure > General > Statistics > Statistic intervals +# # historical_interval = "5m" +# +# ## Specifies plugin behavior regarding disconnected servers +# ## Available choices : +# ## - error: telegraf will return an error on startup if one the servers is unreachable +# ## - ignore: telegraf will ignore unreachable servers on both startup and gather +# # disconnected_servers_behavior = "error" +# +# ## HTTP Proxy support +# # use_system_proxy = true +# # http_proxy_url = "" + + +# # A Webhooks Event collector +# [[inputs.webhooks]] +# ## Address and port to host Webhook listener on +# service_address = ":1619" +# +# ## Maximum duration before timing out read of the request +# # read_timeout = "10s" +# ## Maximum duration before timing out write of the response +# # write_timeout = "10s" +# +# [inputs.webhooks.filestack] +# path = "/filestack" +# +# ## HTTP basic auth +# #username = "" +# #password = "" +# +# [inputs.webhooks.github] +# path = "/github" +# # secret = "" +# +# ## HTTP basic auth +# #username = "" +# #password = "" +# +# [inputs.webhooks.mandrill] +# path = "/mandrill" +# +# ## HTTP basic auth +# #username = "" +# #password = "" +# +# [inputs.webhooks.rollbar] +# path = "/rollbar" +# +# ## HTTP basic auth +# #username = "" +# #password = "" +# +# [inputs.webhooks.papertrail] +# path = "/papertrail" +# +# ## HTTP basic auth +# #username = "" +# #password = "" +# +# [inputs.webhooks.particle] +# path = "/particle" +# +# ## HTTP basic auth +# #username = "" +# #password = "" +# +# [inputs.webhooks.artifactory] +# path = "/artifactory" + + +# # Input plugin to collect Windows Event Log messages +# # This plugin ONLY supports Windows +# [[inputs.win_eventlog]] +# ## Telegraf should have Administrator permissions to subscribe for some +# ## Windows Events channels (e.g. System log) +# +# ## LCID (Locale ID) for event rendering +# ## 1033 to force English language +# ## 0 to use default Windows locale +# # locale = 0 +# +# ## Name of eventlog, used only if xpath_query is empty +# ## Example: "Application" +# # eventlog_name = "" +# +# ## xpath_query can be in defined short form like "Event/System[EventID=999]" +# ## or you can form a XML Query. Refer to the Consuming Events article: +# ## https://docs.microsoft.com/en-us/windows/win32/wes/consuming-events +# ## XML query is the recommended form, because it is most flexible +# ## You can create or debug XML Query by creating Custom View in Windows Event Viewer +# ## and then copying resulting XML here +# xpath_query = ''' +# +# +# +# *[System[( (EventID >= 5152 and EventID <= 5158) or EventID=5379 or EventID=4672)]] +# +# +# +# +# +# +# +# +# +# +# +# +# +# +# ''' +# +# ## When true, event logs are read from the beginning; otherwise only future +# ## events will be logged. +# # from_beginning = false +# +# # Process UserData XML to fields, if this node exists in Event XML +# # process_userdata = true +# +# # Process EventData XML to fields, if this node exists in Event XML +# # process_eventdata = true +# +# ## Separator character to use for unrolled XML Data field names +# # separator = "_" +# +# ## Get only first line of Message field. For most events first line is +# ## usually more than enough +# # only_first_line_of_message = true +# +# ## Parse timestamp from TimeCreated.SystemTime event field. +# ## Will default to current time of telegraf processing on parsing error or if +# ## set to false +# # timestamp_from_event = true +# +# ## System field names: +# ## "Source", "EventID", "Version", "Level", "Task", "Opcode", "Keywords", +# ## "TimeCreated", "EventRecordID", "ActivityID", "RelatedActivityID", +# ## "ProcessID", "ThreadID", "ProcessName", "Channel", "Computer", "UserID", +# ## "UserName", "Message", "LevelText", "TaskText", "OpcodeText" +# ## +# ## In addition to System, Data fields can be unrolled from additional XML +# ## nodes in event. Human-readable representation of those nodes is formatted +# ## into event Message field, but XML is more machine-parsable +# +# ## Event fields to include as tags +# ## The values below are included by default. +# ## Globbing supported (e.g. "Level*" matches both "Level" and "LevelText") +# # event_tags = ["Source", "EventID", "Level", "LevelText", "Task", "TaskText", "Opcode", "OpcodeText", "Keywords", "Channel", "Computer"] +# +# ## Event fields to include +# ## All fields are sent by default. +# ## Globbing supported (e.g. "Level*" matches both "Level" and "LevelText") +# # event_fields = ["*"] +# +# ## Event fields to exclude +# ## Note that if you exclude all fields then no metrics are produced. A valid +# ## metric includes at least one field. +# ## Globbing supported (e.g. "Level*" matches both "Level" and "LevelText") +# # exclude_fields = [] +# +# ## Event fields to exclude if their value is empty or equals to zero +# ## The values below are included by default. +# ## Globbing supported (e.g. "Level*" matches both "Level" and "LevelText") +# # exclude_empty = ["Task", "Opcode", "*ActivityID", "UserID"] + + +# # This plugin implements the Zipkin http server to gather trace and timing data needed to troubleshoot latency problems in microservice architectures. +# [[inputs.zipkin]] +# ## URL path for span data +# # path = "/api/v1/spans" +# +# ## Port on which Telegraf listens +# # port = 9411 +# +# ## Maximum duration before timing out read of the request +# # read_timeout = "10s" +# ## Maximum duration before timing out write of the response +# # write_timeout = "10s" + diff --git a/agent/internal/service/component/data-collector/telegraf/conf/telegraf.conf.bak b/agent/internal/service/component/data-collector/telegraf/conf/telegraf.conf.bak new file mode 100644 index 0000000..c54d26d --- /dev/null +++ b/agent/internal/service/component/data-collector/telegraf/conf/telegraf.conf.bak @@ -0,0 +1,12770 @@ +# Telegraf Configuration +# +# Telegraf is entirely plugin driven. All metrics are gathered from the +# declared inputs, and sent to the declared outputs. +# +# Plugins must be declared in here to be active. +# To deactivate a plugin, comment out the name and any variables. +# +# Use 'telegraf -config telegraf.conf -test' to see what metrics a config +# file would generate. +# +# Environment variables can be used anywhere in this config file, simply surround +# them with ${}. For strings the variable must be within quotes (ie, "${STR_VAR}"), +# for numbers and booleans they should be plain (ie, ${INT_VAR}, ${BOOL_VAR}) + + +# Global tags can be specified here in key="value" format. +[global_tags] + # dc = "us-east-1" # will tag all metrics with dc=us-east-1 + # rack = "1a" + ## Environment variables can be used as tags, and throughout the config file + # user = "$USER" + +# Configuration for telegraf agent +[agent] + ## Default data collection interval for all inputs + interval = "10s" + ## Rounds collection interval to 'interval' + ## ie, if interval="10s" then always collect on :00, :10, :20, etc. + round_interval = true + + ## Telegraf will send metrics to outputs in batches of at most + ## metric_batch_size metrics. + ## This controls the size of writes that Telegraf sends to output plugins. + metric_batch_size = 1000 + + ## Maximum number of unwritten metrics per output. Increasing this value + ## allows for longer periods of output downtime without dropping metrics at the + ## cost of higher maximum memory usage. + metric_buffer_limit = 10000 + + ## Collection jitter is used to jitter the collection by a random amount. + ## Each plugin will sleep for a random time within jitter before collecting. + ## This can be used to avoid many plugins querying things like sysfs at the + ## same time, which can have a measurable effect on the system. + collection_jitter = "0s" + + ## Collection offset is used to shift the collection by the given amount. + ## This can be be used to avoid many plugins querying constraint devices + ## at the same time by manually scheduling them in time. + # collection_offset = "0s" + + ## Default flushing interval for all outputs. Maximum flush_interval will be + ## flush_interval + flush_jitter + flush_interval = "10s" + ## Jitter the flush interval by a random amount. This is primarily to avoid + ## large write spikes for users running a large number of telegraf instances. + ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s + flush_jitter = "0s" + + ## Collected metrics are rounded to the precision specified. Precision is + ## specified as an interval with an integer + unit (e.g. 0s, 10ms, 2us, 4s). + ## Valid time units are "ns", "us" (or "µs"), "ms", "s". + ## + ## By default or when set to "0s", precision will be set to the same + ## timestamp order as the collection interval, with the maximum being 1s: + ## ie, when interval = "10s", precision will be "1s" + ## when interval = "250ms", precision will be "1ms" + ## + ## Precision will NOT be used for service inputs. It is up to each individual + ## service input to set the timestamp at the appropriate precision. + precision = "0s" + + ## Log at debug level. + # debug = false + ## Log only error level messages. + # quiet = false + + ## Log target controls the destination for logs and can be one of "file", + ## "stderr" or, on Windows, "eventlog". When set to "file", the output file + ## is determined by the "logfile" setting. + # logtarget = "file" + + ## Name of the file to be logged to when using the "file" logtarget. If set to + ## the empty string then logs are written to stderr. + # logfile = "" + + ## The logfile will be rotated after the time interval specified. When set + ## to 0 no time based rotation is performed. Logs are rotated only when + ## written to, if there is no log activity rotation may be delayed. + # logfile_rotation_interval = "0h" + + ## The logfile will be rotated when it becomes larger than the specified + ## size. When set to 0 no size based rotation is performed. + # logfile_rotation_max_size = "0MB" + + ## Maximum number of rotated archives to keep, any older logs are deleted. + ## If set to -1, no archives are removed. + # logfile_rotation_max_archives = 5 + + ## Pick a timezone to use when logging or type 'local' for local time. + ## Example: America/Chicago + # log_with_timezone = "" + + ## Override default hostname, if empty use os.Hostname() + hostname = "" + ## If set to true, do no set the "host" tag in the telegraf agent. + omit_hostname = false + + ## Method of translating SNMP objects. Can be "netsnmp" (deprecated) which + ## translates by calling external programs snmptranslate and snmptable, + ## or "gosmi" which translates using the built-in gosmi library. + # snmp_translator = "netsnmp" + + ## Name of the file to load the state of plugins from and store the state to. + ## If uncommented and not empty, this file will be used to save the state of + ## stateful plugins on termination of Telegraf. If the file exists on start, + ## the state in the file will be restored for the plugins. + # statefile = "" + +############################################################################### +# SECRETSTORE PLUGINS # +############################################################################### + + +# # Secret-store to access Docker Secrets +# [[secretstores.docker]] +# ## Unique identifier for the secretstore. +# ## This id can later be used in plugins to reference the secrets +# ## in this secret-store via @{:} (mandatory) +# id = "docker_secretstore" +# +# ## Default Path to directory where docker stores the secrets file +# ## Current implementation in docker compose v2 only allows the following +# ## value for the path where the secrets are mounted at runtime +# # path = "/run/secrets" +# +# ## Allow dynamic secrets that are updated during runtime of telegraf +# ## Dynamic Secrets work only with `file` or `external` configuration +# ## in `secrets` section of the `docker-compose.yml` file +# # dynamic = false + + +# # Read secrets from a HTTP endpoint +# [[secretstores.http]] +# ## Unique identifier for the secret-store. +# ## This id can later be used in plugins to reference the secrets +# ## in this secret-store via @{:} (mandatory) +# id = "secretstore" +# +# ## URLs from which to read the secrets +# url = "http://localhost/secrets" +# +# ## Optional HTTP headers +# # headers = {"X-Special-Header" = "Special-Value"} +# +# ## Optional Token for Bearer Authentication via +# ## "Authorization: Bearer " header +# # token = "your-token" +# +# ## Optional Credentials for HTTP Basic Authentication +# # username = "username" +# # password = "pa$$word" +# +# ## OAuth2 Client Credentials. The options 'client_id', 'client_secret', and 'token_url' are required to use OAuth2. +# # client_id = "clientid" +# # client_secret = "secret" +# # token_url = "https://indentityprovider/oauth2/v1/token" +# # scopes = ["urn:opc:idm:__myscopes__"] +# +# ## HTTP Proxy support +# # use_system_proxy = false +# # http_proxy_url = "" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Minimal TLS version to accept by the client +# # tls_min_version = "TLS12" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Optional Cookie authentication +# # cookie_auth_url = "https://localhost/authMe" +# # cookie_auth_method = "POST" +# # cookie_auth_username = "username" +# # cookie_auth_password = "pa$$word" +# # cookie_auth_headers = { Content-Type = "application/json", X-MY-HEADER = "hello" } +# # cookie_auth_body = '{"username": "user", "password": "pa$$word", "authenticate": "me"}' +# ## When unset or set to zero the authentication will only happen once +# ## and will never renew the cookie. Set to a suitable duration if you +# ## require cookie renewal! +# # cookie_auth_renewal = "0s" +# +# ## Amount of time allowed to complete the HTTP request +# # timeout = "5s" +# +# ## List of success status codes +# # success_status_codes = [200] +# +# ## JSONata expression to transform the server response into a +# ## { "secret name": "secret value", ... } +# ## form. See https://jsonata.org for more information and a playground. +# # transformation = '' +# +# ## Cipher used to decrypt the secrets. +# ## In case your secrets are transmitted in an encrypted form, you need +# ## to specify the cipher used and provide the corresponding configuration. +# ## Please refer to https://github.com/influxdata/telegraf/blob/master/plugins/secretstores/http/README.md +# ## for supported values. +# # cipher = "none" +# +# ## AES cipher parameters +# # [secretstores.http.aes] +# # ## Key (hex-encoded) and initialization-vector (IV) for the decryption. +# # ## In case the key (and IV) is derived from a password, the values can +# # ## be omitted. +# # key = "" +# # init_vector = "" +# # +# # ## Parameters for password-based-key derivation. +# # ## These parameters must match the encryption side to derive the same +# # ## key on both sides! +# # # kdf_algorithm = "PBKDF2-HMAC-SHA256" +# # # password = "" +# # # salt = "" +# # # iterations = 0 + + +# # File based Javascript Object Signing and Encryption based secret-store +# [[secretstores.jose]] +# ## Unique identifier for the secret-store. +# ## This id can later be used in plugins to reference the secrets +# ## in this secret-store via @{:} (mandatory) +# id = "secretstore" +# +# ## Directory for storing the secrets +# path = "/etc/telegraf/secrets" +# +# ## Password to access the secrets. +# ## If no password is specified here, Telegraf will prompt for it at startup time. +# # password = "" + + +# # Secret-store to retrieve and maintain tokens from various OAuth2 services +# [[secretstores.oauth2]] +# ## Unique identifier for the secret-store. +# ## This id can later be used in plugins to reference the secrets +# ## in this secret-store via @{:} (mandatory) +# id = "secretstore" +# +# ## Service to retrieve the token(s) from +# ## Currently supported services are "custom", "auth0" and "AzureAD" +# # service = "custom" +# +# ## Setting to overwrite the queried token-endpoint +# ## This setting is optional for some serices but mandatory for others such +# ## as "custom" or "auth0". Please check the documentation at +# ## https://github.com/influxdata/telegraf/blob/master/plugins/secretstores/oauth2/README.md +# # token_endpoint = "" +# +# ## Tenant ID for the AzureAD service +# # tenant_id = "" +# +# ## Minimal remaining time until the token expires +# ## If a token expires less than the set duration in the future, the token is +# ## renewed. This is useful to avoid race-condition issues where a token is +# ## still valid, but isn't when the request reaches the API endpoint of +# ## your service using the token. +# # token_expiry_margin = "1s" +# +# ## Section for defining a token secret +# [[secretstores.oauth2.token]] +# ## Unique secret-key used for referencing the token via @{:} +# key = "" +# ## Client-ID and secret for the 2-legged OAuth flow +# client_id = "" +# client_secret = "" +# ## Scopes to send in the request +# # scopes = [] +# +# ## Additional (optional) parameters to include in the token request +# ## This might for example include the "audience" parameter required for +# ## auth0. +# # [secretstores.oauth2.token.parameters] +# # audience = "" + + +# # Operating System native secret-store +# [[secretstores.os]] +# ## Unique identifier for the secret-store. +# ## This id can later be used in plugins to reference the secrets +# ## in this secret-store via @{:} (mandatory) +# id = "secretstore" +# +# ## Keyring Name & Collection +# ## * Linux: keyring name used for the secrets, collection is unused +# ## * macOS: keyring specifies the macOS' Keychain name and collection is an +# ## optional Keychain service name +# ## * Windows: keys follow a fixed pattern in the form +# ## `::`. Please keep this in mind when +# ## creating secrets with the Windows credential tool. +# # keyring = "telegraf" +# # collection = "" +# +# ## macOS Keychain password +# ## If no password is specified here, Telegraf will prompt for it at startup +# ## time. +# # password = "" +# +# ## Allow dynamic secrets that are updated during runtime of telegraf +# # dynamic = false + + +############################################################################### +# OUTPUT PLUGINS # +############################################################################### + + +# # Configuration for sending metrics to InfluxDB 2.0 +# [[outputs.influxdb_v2]] +# ## The URLs of the InfluxDB cluster nodes. +# ## +# ## Multiple URLs can be specified for a single cluster, only ONE of the +# ## urls will be written to each interval. +# ## ex: urls = ["https://us-west-2-1.aws.cloud2.influxdata.com"] +# urls = ["http://127.0.0.1:8086"] +# +# ## Token for authentication. +# token = "" +# +# ## Organization is the name of the organization you wish to write to. +# organization = "" +# +# ## Destination bucket to write into. +# bucket = "" +# +# ## The value of this tag will be used to determine the bucket. If this +# ## tag is not set the 'bucket' option is used as the default. +# # bucket_tag = "" +# +# ## If true, the bucket tag will not be added to the metric. +# # exclude_bucket_tag = false +# +# ## Timeout for HTTP messages. +# # timeout = "5s" +# +# ## Additional HTTP headers +# # http_headers = {"X-Special-Header" = "Special-Value"} +# +# ## HTTP Proxy override, if unset values the standard proxy environment +# ## variables are consulted to determine which proxy, if any, should be used. +# # http_proxy = "http://corporate.proxy:3128" +# +# ## HTTP User-Agent +# # user_agent = "telegraf" +# +# ## Content-Encoding for write request body, can be set to "gzip" to +# ## compress body or "identity" to apply no encoding. +# # content_encoding = "gzip" +# +# ## Enable or disable uint support for writing uints influxdb 2.0. +# # influx_uint_support = false +# +# ## HTTP/2 Timeouts +# ## The following values control the HTTP/2 client's timeouts. These settings +# ## are generally not required unless a user is seeing issues with client +# ## disconnects. If a user does see issues, then it is suggested to set these +# ## values to "15s" for ping timeout and "30s" for read idle timeout and +# ## retry. +# ## +# ## Note that the timer for read_idle_timeout begins at the end of the last +# ## successful write and not at the beginning of the next write. +# # ping_timeout = "0s" +# # read_idle_timeout = "0s" +# +# ## Optional TLS Config for use on HTTP connections. +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Configuration for Amon Server to send metrics to. +# [[outputs.amon]] +# ## Amon Server Key +# server_key = "my-server-key" # required. +# +# ## Amon Instance URL +# amon_instance = "https://youramoninstance" # required +# +# ## Connection timeout. +# # timeout = "5s" + + +# # Publishes metrics to an AMQP broker +# [[outputs.amqp]] +# ## Broker to publish to. +# ## deprecated in 1.7; use the brokers option +# # url = "amqp://localhost:5672/influxdb" +# +# ## Brokers to publish to. If multiple brokers are specified a random broker +# ## will be selected anytime a connection is established. This can be +# ## helpful for load balancing when not using a dedicated load balancer. +# brokers = ["amqp://localhost:5672/influxdb"] +# +# ## Maximum messages to send over a connection. Once this is reached, the +# ## connection is closed and a new connection is made. This can be helpful for +# ## load balancing when not using a dedicated load balancer. +# # max_messages = 0 +# +# ## Exchange to declare and publish to. +# exchange = "telegraf" +# +# ## Exchange type; common types are "direct", "fanout", "topic", "header", "x-consistent-hash". +# # exchange_type = "topic" +# +# ## If true, exchange will be passively declared. +# # exchange_passive = false +# +# ## Exchange durability can be either "transient" or "durable". +# # exchange_durability = "durable" +# +# ## Additional exchange arguments. +# # exchange_arguments = { } +# # exchange_arguments = {"hash_property" = "timestamp"} +# +# ## Authentication credentials for the PLAIN auth_method. +# # username = "" +# # password = "" +# +# ## Auth method. PLAIN and EXTERNAL are supported +# ## Using EXTERNAL requires enabling the rabbitmq_auth_mechanism_ssl plugin as +# ## described here: https://www.rabbitmq.com/plugins.html +# # auth_method = "PLAIN" +# +# ## Metric tag to use as a routing key. +# ## ie, if this tag exists, its value will be used as the routing key +# # routing_tag = "host" +# +# ## Static routing key. Used when no routing_tag is set or as a fallback +# ## when the tag specified in routing tag is not found. +# # routing_key = "" +# # routing_key = "telegraf" +# +# ## Delivery Mode controls if a published message is persistent. +# ## One of "transient" or "persistent". +# # delivery_mode = "transient" +# +# ## InfluxDB database added as a message header. +# ## deprecated in 1.7; use the headers option +# # database = "telegraf" +# +# ## InfluxDB retention policy added as a message header +# ## deprecated in 1.7; use the headers option +# # retention_policy = "default" +# +# ## Static headers added to each published message. +# # headers = { } +# # headers = {"database" = "telegraf", "retention_policy" = "default"} +# +# ## Connection timeout. If not provided, will default to 5s. 0s means no +# ## timeout (not recommended). +# # timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Optional Proxy Configuration +# # use_proxy = false +# # proxy_url = "localhost:8888" +# +# ## If true use batch serialization format instead of line based delimiting. +# ## Only applies to data formats which are not line based such as JSON. +# ## Recommended to set to true. +# # use_batch_format = false +# +# ## Content encoding for message payloads, can be set to "gzip" to or +# ## "identity" to apply no encoding. +# ## +# ## Please note that when use_batch_format = false each amqp message contains only +# ## a single metric, it is recommended to use compression with batch format +# ## for best results. +# # content_encoding = "identity" +# +# ## Data format to output. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# # data_format = "influx" + + +# # Send metrics to Azure Application Insights +# [[outputs.application_insights]] +# ## Instrumentation key of the Application Insights resource. +# instrumentation_key = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxx" +# +# ## Regions that require endpoint modification https://docs.microsoft.com/en-us/azure/azure-monitor/app/custom-endpoints +# # endpoint_url = "https://dc.services.visualstudio.com/v2/track" +# +# ## Timeout for closing (default: 5s). +# # timeout = "5s" +# +# ## Enable additional diagnostic logging. +# # enable_diagnostic_logging = false +# +# ## NOTE: Due to the way TOML is parsed, tables must be at the END of the +# ## plugin definition, otherwise additional config options are read as part of +# ## the table +# +# ## Context Tag Sources add Application Insights context tags to a tag value. +# ## +# ## For list of allowed context tag keys see: +# ## https://github.com/microsoft/ApplicationInsights-Go/blob/master/appinsights/contracts/contexttagkeys.go +# # [outputs.application_insights.context_tag_sources] +# # "ai.cloud.role" = "kubernetes_container_name" +# # "ai.cloud.roleInstance" = "kubernetes_pod_name" + + +# # Sends metrics to Azure Data Explorer +# [[outputs.azure_data_explorer]] +# ## The URI property of the Azure Data Explorer resource on Azure +# ## ex: endpoint_url = https://myadxresource.australiasoutheast.kusto.windows.net +# endpoint_url = "" +# +# ## The Azure Data Explorer database that the metrics will be ingested into. +# ## The plugin will NOT generate this database automatically, it's expected that this database already exists before ingestion. +# ## ex: "exampledatabase" +# database = "" +# +# ## Timeout for Azure Data Explorer operations +# # timeout = "20s" +# +# ## Type of metrics grouping used when pushing to Azure Data Explorer. +# ## Default is "TablePerMetric" for one table per different metric. +# ## For more information, please check the plugin README. +# # metrics_grouping_type = "TablePerMetric" +# +# ## Name of the single table to store all the metrics (Only needed if metrics_grouping_type is "SingleTable"). +# # table_name = "" +# +# ## Creates tables and relevant mapping if set to true(default). +# ## Skips table and mapping creation if set to false, this is useful for running Telegraf with the lowest possible permissions i.e. table ingestor role. +# # create_tables = true +# +# ## Ingestion method to use. +# ## Available options are +# ## - managed -- streaming ingestion with fallback to batched ingestion or the "queued" method below +# ## - queued -- queue up metrics data and process sequentially +# # ingestion_type = "queued" + + +# # Send aggregate metrics to Azure Monitor +# [[outputs.azure_monitor]] +# ## Timeout for HTTP writes. +# # timeout = "20s" +# +# ## Set the namespace prefix, defaults to "Telegraf/". +# # namespace_prefix = "Telegraf/" +# +# ## Azure Monitor doesn't have a string value type, so convert string +# ## fields to dimensions (a.k.a. tags) if enabled. Azure Monitor allows +# ## a maximum of 10 dimensions so Telegraf will only send the first 10 +# ## alphanumeric dimensions. +# # strings_as_dimensions = false +# +# ## Both region and resource_id must be set or be available via the +# ## Instance Metadata service on Azure Virtual Machines. +# # +# ## Azure Region to publish metrics against. +# ## ex: region = "southcentralus" +# # region = "" +# # +# ## The Azure Resource ID against which metric will be logged, e.g. +# ## ex: resource_id = "/subscriptions//resourceGroups//providers/Microsoft.Compute/virtualMachines/" +# # resource_id = "" +# +# ## Optionally, if in Azure US Government, China, or other sovereign +# ## cloud environment, set the appropriate REST endpoint for receiving +# ## metrics. (Note: region may be unused in this context) +# # endpoint_url = "https://monitoring.core.usgovcloudapi.net" + + +# # Configuration for Google Cloud BigQuery to send entries +# [[outputs.bigquery]] +# ## Credentials File +# credentials_file = "/path/to/service/account/key.json" +# +# ## Google Cloud Platform Project +# project = "my-gcp-project" +# +# ## The namespace for the metric descriptor +# dataset = "telegraf" +# +# ## Timeout for BigQuery operations. +# # timeout = "5s" +# +# ## Character to replace hyphens on Metric name +# # replace_hyphen_to = "_" + + +# ## Configuration to publish Telegraf metrics to Clarify +# [[outputs.clarify]] +# ## Credentials File (Oauth 2.0 from Clarify integration) +# credentials_file = "/path/to/clarify/credentials.json" +# +# ## Clarify username password (Basic Auth from Clarify integration) +# username = "i-am-bob" +# password = "secret-password" +# +# ## Timeout for Clarify operations +# # timeout = "20s" +# +# ## Optional tags to be included when generating the unique ID for a signal in Clarify +# # id_tags = [] +# # clarify_id_tag = 'clarify_input_id' + + +# # Publish Telegraf metrics to a Google Cloud PubSub topic +# [[outputs.cloud_pubsub]] +# ## Required. Name of Google Cloud Platform (GCP) Project that owns +# ## the given PubSub topic. +# project = "my-project" +# +# ## Required. Name of PubSub topic to publish metrics to. +# topic = "my-topic" +# +# ## Content encoding for message payloads, can be set to "gzip" or +# ## "identity" to apply no encoding. +# # content_encoding = "identity" +# +# ## Required. Data format to consume. +# ## Each data format has its own unique set of configuration options. +# ## Read more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" +# +# ## Optional. Filepath for GCP credentials JSON file to authorize calls to +# ## PubSub APIs. If not set explicitly, Telegraf will attempt to use +# ## Application Default Credentials, which is preferred. +# # credentials_file = "path/to/my/creds.json" +# +# ## Optional. If true, will send all metrics per write in one PubSub message. +# # send_batched = true +# +# ## The following publish_* parameters specifically configures batching +# ## requests made to the GCP Cloud PubSub API via the PubSub Golang library. Read +# ## more here: https://godoc.org/cloud.google.com/go/pubsub#PublishSettings +# +# ## Optional. Send a request to PubSub (i.e. actually publish a batch) +# ## when it has this many PubSub messages. If send_batched is true, +# ## this is ignored and treated as if it were 1. +# # publish_count_threshold = 1000 +# +# ## Optional. Send a request to PubSub (i.e. actually publish a batch) +# ## when it has this many PubSub messages. If send_batched is true, +# ## this is ignored and treated as if it were 1 +# # publish_byte_threshold = 1000000 +# +# ## Optional. Specifically configures requests made to the PubSub API. +# # publish_num_go_routines = 2 +# +# ## Optional. Specifies a timeout for requests to the PubSub API. +# # publish_timeout = "30s" +# +# ## Optional. If true, published PubSub message data will be base64-encoded. +# # base64_data = false +# +# ## NOTE: Due to the way TOML is parsed, tables must be at the END of the +# ## plugin definition, otherwise additional config options are read as part of +# ## the table +# +# ## Optional. PubSub attributes to add to metrics. +# # [outputs.cloud_pubsub.attributes] +# # my_attr = "tag_value" + + +# # Configuration for AWS CloudWatch output. +# [[outputs.cloudwatch]] +# ## Amazon REGION +# region = "us-east-1" +# +# ## Amazon Credentials +# ## Credentials are loaded in the following order +# ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified +# ## 2) Assumed credentials via STS if role_arn is specified +# ## 3) explicit credentials from 'access_key' and 'secret_key' +# ## 4) shared profile from 'profile' +# ## 5) environment variables +# ## 6) shared credentials file +# ## 7) EC2 Instance Profile +# #access_key = "" +# #secret_key = "" +# #token = "" +# #role_arn = "" +# #web_identity_token_file = "" +# #role_session_name = "" +# #profile = "" +# #shared_credential_file = "" +# +# ## Endpoint to make request against, the correct endpoint is automatically +# ## determined and this option should only be set if you wish to override the +# ## default. +# ## ex: endpoint_url = "http://localhost:8000" +# # endpoint_url = "" +# +# ## Set http_proxy +# # use_system_proxy = false +# # http_proxy_url = "http://localhost:8888" +# +# ## Namespace for the CloudWatch MetricDatums +# namespace = "InfluxData/Telegraf" +# +# ## If you have a large amount of metrics, you should consider to send statistic +# ## values instead of raw metrics which could not only improve performance but +# ## also save AWS API cost. If enable this flag, this plugin would parse the required +# ## CloudWatch statistic fields (count, min, max, and sum) and send them to CloudWatch. +# ## You could use basicstats aggregator to calculate those fields. If not all statistic +# ## fields are available, all fields would still be sent as raw metrics. +# # write_statistics = false +# +# ## Enable high resolution metrics of 1 second (if not enabled, standard resolution are of 60 seconds precision) +# # high_resolution_metrics = false + + +# # Configuration for AWS CloudWatchLogs output. +# [[outputs.cloudwatch_logs]] +# ## The region is the Amazon region that you wish to connect to. +# ## Examples include but are not limited to: +# ## - us-west-1 +# ## - us-west-2 +# ## - us-east-1 +# ## - ap-southeast-1 +# ## - ap-southeast-2 +# ## ... +# region = "us-east-1" +# +# ## Amazon Credentials +# ## Credentials are loaded in the following order +# ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified +# ## 2) Assumed credentials via STS if role_arn is specified +# ## 3) explicit credentials from 'access_key' and 'secret_key' +# ## 4) shared profile from 'profile' +# ## 5) environment variables +# ## 6) shared credentials file +# ## 7) EC2 Instance Profile +# #access_key = "" +# #secret_key = "" +# #token = "" +# #role_arn = "" +# #web_identity_token_file = "" +# #role_session_name = "" +# #profile = "" +# #shared_credential_file = "" +# +# ## Endpoint to make request against, the correct endpoint is automatically +# ## determined and this option should only be set if you wish to override the +# ## default. +# ## ex: endpoint_url = "http://localhost:8000" +# # endpoint_url = "" +# +# ## Cloud watch log group. Must be created in AWS cloudwatch logs upfront! +# ## For example, you can specify the name of the k8s cluster here to group logs from all cluster in oine place +# log_group = "my-group-name" +# +# ## Log stream in log group +# ## Either log group name or reference to metric attribute, from which it can be parsed: +# ## tag: or field:. If log stream is not exist, it will be created. +# ## Since AWS is not automatically delete logs streams with expired logs entries (i.e. empty log stream) +# ## you need to put in place appropriate house-keeping (https://forums.aws.amazon.com/thread.jspa?threadID=178855) +# log_stream = "tag:location" +# +# ## Source of log data - metric name +# ## specify the name of the metric, from which the log data should be retrieved. +# ## I.e., if you are using docker_log plugin to stream logs from container, then +# ## specify log_data_metric_name = "docker_log" +# log_data_metric_name = "docker_log" +# +# ## Specify from which metric attribute the log data should be retrieved: +# ## tag: or field:. +# ## I.e., if you are using docker_log plugin to stream logs from container, then +# ## specify log_data_source = "field:message" +# log_data_source = "field:message" + + +# # Configuration for CrateDB to send metrics to. +# [[outputs.cratedb]] +# # A github.com/jackc/pgx/v4 connection string. +# # See https://pkg.go.dev/github.com/jackc/pgx/v4#ParseConfig +# url = "postgres://user:password@localhost/schema?sslmode=disable" +# # Timeout for all CrateDB queries. +# timeout = "5s" +# # Name of the table to store metrics in. +# table = "metrics" +# # If true, and the metrics table does not exist, create it automatically. +# table_create = true +# # The character(s) to replace any '.' in an object key with +# key_separator = "_" + + +# # Configuration for DataDog API to send metrics to. +# [[outputs.datadog]] +# ## Datadog API key +# apikey = "my-secret-key" +# +# ## Connection timeout. +# # timeout = "5s" +# +# ## Write URL override; useful for debugging. +# # url = "https://app.datadoghq.com/api/v1/series" +# +# ## Set http_proxy +# # use_system_proxy = false +# # http_proxy_url = "http://localhost:8888" +# +# ## Override the default (none) compression used to send data. +# ## Supports: "zlib", "none" +# # compression = "none" + + +# # Send metrics to nowhere at all +# [[outputs.discard]] +# # no configuration + + +# # Send telegraf metrics to a Dynatrace environment +# [[outputs.dynatrace]] +# ## For usage with the Dynatrace OneAgent you can omit any configuration, +# ## the only requirement is that the OneAgent is running on the same host. +# ## Only setup environment url and token if you want to monitor a Host without the OneAgent present. +# ## +# ## Your Dynatrace environment URL. +# ## For Dynatrace OneAgent you can leave this empty or set it to "http://127.0.0.1:14499/metrics/ingest" (default) +# ## For Dynatrace SaaS environments the URL scheme is "https://{your-environment-id}.live.dynatrace.com/api/v2/metrics/ingest" +# ## For Dynatrace Managed environments the URL scheme is "https://{your-domain}/e/{your-environment-id}/api/v2/metrics/ingest" +# url = "" +# +# ## Your Dynatrace API token. +# ## Create an API token within your Dynatrace environment, by navigating to Settings > Integration > Dynatrace API +# ## The API token needs data ingest scope permission. When using OneAgent, no API token is required. +# api_token = "" +# +# ## Optional prefix for metric names (e.g.: "telegraf") +# prefix = "telegraf" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Optional flag for ignoring tls certificate check +# # insecure_skip_verify = false +# +# ## Connection timeout, defaults to "5s" if not set. +# timeout = "5s" +# +# ## If you want metrics to be treated and reported as delta counters, add the metric names here +# additional_counters = [ ] +# +# ## NOTE: Due to the way TOML is parsed, tables must be at the END of the +# ## plugin definition, otherwise additional config options are read as part of +# ## the table +# +# ## Optional dimensions to be added to every metric +# # [outputs.dynatrace.default_dimensions] +# # default_key = "default value" + + +# # Configuration for Elasticsearch to send metrics to. +# [[outputs.elasticsearch]] +# ## The full HTTP endpoint URL for your Elasticsearch instance +# ## Multiple urls can be specified as part of the same cluster, +# ## this means that only ONE of the urls will be written to each interval +# urls = [ "http://node1.es.example.com:9200" ] # required. +# ## Elasticsearch client timeout, defaults to "5s" if not set. +# timeout = "5s" +# ## Set to true to ask Elasticsearch a list of all cluster nodes, +# ## thus it is not necessary to list all nodes in the urls config option +# enable_sniffer = false +# ## Set to true to enable gzip compression +# enable_gzip = false +# ## Set the interval to check if the Elasticsearch nodes are available +# ## Setting to "0s" will disable the health check (not recommended in production) +# health_check_interval = "10s" +# ## Set the timeout for periodic health checks. +# # health_check_timeout = "1s" +# ## HTTP basic authentication details. +# ## HTTP basic authentication details +# # username = "telegraf" +# # password = "mypassword" +# ## HTTP bearer token authentication details +# # auth_bearer_token = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9" +# +# ## Index Config +# ## The target index for metrics (Elasticsearch will create if it not exists). +# ## You can use the date specifiers below to create indexes per time frame. +# ## The metric timestamp will be used to decide the destination index name +# # %Y - year (2016) +# # %y - last two digits of year (00..99) +# # %m - month (01..12) +# # %d - day of month (e.g., 01) +# # %H - hour (00..23) +# # %V - week of the year (ISO week) (01..53) +# ## Additionally, you can specify a tag name using the notation {{tag_name}} +# ## which will be used as part of the index name. If the tag does not exist, +# ## the default tag value will be used. +# # index_name = "telegraf-{{host}}-%Y.%m.%d" +# # default_tag_value = "none" +# index_name = "telegraf-%Y.%m.%d" # required. +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Template Config +# ## Set to true if you want telegraf to manage its index template. +# ## If enabled it will create a recommended index template for telegraf indexes +# manage_template = true +# ## The template name used for telegraf indexes +# template_name = "telegraf" +# ## Set to true if you want telegraf to overwrite an existing template +# overwrite_template = false +# ## If set to true a unique ID hash will be sent as sha256(concat(timestamp,measurement,series-hash)) string +# ## it will enable data resend and update metric points avoiding duplicated metrics with diferent id's +# force_document_id = false +# +# ## Specifies the handling of NaN and Inf values. +# ## This option can have the following values: +# ## none -- do not modify field-values (default); will produce an error if NaNs or infs are encountered +# ## drop -- drop fields containing NaNs or infs +# ## replace -- replace with the value in "float_replacement_value" (default: 0.0) +# ## NaNs and inf will be replaced with the given number, -inf with the negative of that number +# # float_handling = "none" +# # float_replacement_value = 0.0 +# +# ## Pipeline Config +# ## To use a ingest pipeline, set this to the name of the pipeline you want to use. +# # use_pipeline = "my_pipeline" +# ## Additionally, you can specify a tag name using the notation {{tag_name}} +# ## which will be used as part of the pipeline name. If the tag does not exist, +# ## the default pipeline will be used as the pipeline. If no default pipeline is set, +# ## no pipeline is used for the metric. +# # use_pipeline = "{{es_pipeline}}" +# # default_pipeline = "my_pipeline" + + +# # Configuration for Event Hubs output plugin +# [[outputs.event_hubs]] +# ## The full connection string to the Event Hub (required) +# ## The shared access key must have "Send" permissions on the target Event Hub. +# connection_string = "Endpoint=sb://namespace.servicebus.windows.net/;SharedAccessKeyName=RootManageSharedAccessKey;SharedAccessKey=superSecret1234=;EntityPath=hubName" +# +# ## Client timeout (defaults to 30s) +# # timeout = "30s" +# +# ## Partition key +# ## Metric tag or field name to use for the event partition key. The value of +# ## this tag or field is set as the key for events if it exists. If both, tag +# ## and field, exist the tag is preferred. +# # partition_key = "" +# +# ## Set the maximum batch message size in bytes +# ## The allowable size depends on the Event Hub tier +# ## See: https://learn.microsoft.com/azure/event-hubs/event-hubs-quotas#basic-vs-standard-vs-premium-vs-dedicated-tiers +# ## Setting this to 0 means using the default size from the Azure Event Hubs Client library (1000000 bytes) +# # max_message_size = 1000000 +# +# ## Data format to output. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# data_format = "json" + + +# # Send metrics to command as input over stdin +# [[outputs.exec]] +# ## Command to ingest metrics via stdin. +# command = ["tee", "-a", "/dev/null"] +# +# ## Environment variables +# ## Array of "key=value" pairs to pass as environment variables +# ## e.g. "KEY=value", "USERNAME=John Doe", +# ## "LD_LIBRARY_PATH=/opt/custom/lib64:/usr/local/libs" +# # environment = [] +# +# ## Timeout for command to complete. +# # timeout = "5s" +# +# ## Data format to output. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# # data_format = "influx" + + +# # Run executable as long-running output plugin +# [[outputs.execd]] +# ## One program to run as daemon. +# ## NOTE: process and each argument should each be their own string +# command = ["my-telegraf-output", "--some-flag", "value"] +# +# ## Environment variables +# ## Array of "key=value" pairs to pass as environment variables +# ## e.g. "KEY=value", "USERNAME=John Doe", +# ## "LD_LIBRARY_PATH=/opt/custom/lib64:/usr/local/libs" +# # environment = [] +# +# ## Delay before the process is restarted after an unexpected termination +# restart_delay = "10s" +# +# ## Flag to determine whether execd should throw error when part of metrics is unserializable +# ## Setting this to true will skip the unserializable metrics and process the rest of metrics +# ## Setting this to false will throw error when encountering unserializable metrics and none will be processed +# ## This setting does not apply when use_batch_format is set. +# # ignore_serialization_error = false +# +# ## Use batch serialization instead of per metric. The batch format allows for the +# ## production of batch output formats and may more efficiently encode and write metrics. +# # use_batch_format = false +# +# ## Data format to export. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# data_format = "influx" + + +# # Send telegraf metrics to file(s) +# [[outputs.file]] +# ## Files to write to, "stdout" is a specially handled file. +# files = ["stdout", "/tmp/metrics.out"] +# +# ## Use batch serialization format instead of line based delimiting. The +# ## batch format allows for the production of non line based output formats and +# ## may more efficiently encode and write metrics. +# # use_batch_format = false +# +# ## The file will be rotated after the time interval specified. When set +# ## to 0 no time based rotation is performed. +# # rotation_interval = "0h" +# +# ## The logfile will be rotated when it becomes larger than the specified +# ## size. When set to 0 no size based rotation is performed. +# # rotation_max_size = "0MB" +# +# ## Maximum number of rotated archives to keep, any older logs are deleted. +# ## If set to -1, no archives are removed. +# # rotation_max_archives = 5 +# +# ## Data format to output. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# data_format = "influx" +# +# ## Compress output data with the specifed algorithm. +# ## If empty, compression will be disabled and files will be plain text. +# ## Supported algorithms are "zstd", "gzip" and "zlib". +# # compression_algorithm = "" +# +# ## Compression level for the algorithm above. +# ## Please note that different algorithms support different levels: +# ## zstd -- supports levels 1, 3, 7 and 11. +# ## gzip -- supports levels 0, 1 and 9. +# ## zlib -- supports levels 0, 1, and 9. +# ## By default the default compression level for each algorithm is used. +# # compression_level = -1 + + +# # Configuration for Graphite server to send metrics to +# [[outputs.graphite]] +# ## TCP endpoint for your graphite instance. +# ## If multiple endpoints are configured, the output will be load balanced. +# ## Only one of the endpoints will be written to with each iteration. +# servers = ["localhost:2003"] +# ## Prefix metrics name +# prefix = "" +# ## Graphite output template +# ## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# template = "host.tags.measurement.field" +# +# ## Strict sanitization regex +# ## This is the default sanitization regex that is used on data passed to the +# ## graphite serializer. Users can add additional characters here if required. +# ## Be aware that the characters, '/' '@' '*' are always replaced with '_', +# ## '..' is replaced with '.', and '\' is removed even if added to the +# ## following regex. +# # graphite_strict_sanitize_regex = '[^a-zA-Z0-9-:._=\p{L}]' +# +# ## Enable Graphite tags support +# # graphite_tag_support = false +# +# ## Applied sanitization mode when graphite tag support is enabled. +# ## * strict - uses the regex specified above +# ## * compatible - allows for greater number of characters +# # graphite_tag_sanitize_mode = "strict" +# +# ## Character for separating metric name and field for Graphite tags +# # graphite_separator = "." +# +# ## Graphite templates patterns +# ## 1. Template for cpu +# ## 2. Template for disk* +# ## 3. Default template +# # templates = [ +# # "cpu tags.measurement.host.field", +# # "disk* measurement.field", +# # "host.measurement.tags.field" +# #] +# +# ## timeout in seconds for the write connection to graphite +# # timeout = "2s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Send telegraf metrics to graylog +# [[outputs.graylog]] +# ## Endpoints for your graylog instances. +# servers = ["udp://127.0.0.1:12201"] +# +# ## Connection timeout. +# # timeout = "5s" +# +# ## The field to use as the GELF short_message, if unset the static string +# ## "telegraf" will be used. +# ## example: short_message_field = "message" +# # short_message_field = "" +# +# ## According to GELF payload specification, additional fields names must be prefixed +# ## with an underscore. Previous versions did not prefix custom field 'name' with underscore. +# ## Set to true for backward compatibility. +# # name_field_no_prefix = false +# +# ## Connection retry options +# ## Attempt to connect to the enpoints if the initial connection fails. +# ## If 'false', Telegraf will give up after 3 connection attempt and will +# ## exit with an error. If set to 'true', the plugin will retry to connect +# ## to the unconnected endpoints infinitely. +# # connection_retry = false +# ## Time to wait between connection retry attempts. +# # connection_retry_wait_time = "15s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Send telegraf metrics to GroundWork Monitor +# [[outputs.groundwork]] +# ## URL of your groundwork instance. +# url = "https://groundwork.example.com" +# +# ## Agent uuid for GroundWork API Server. +# agent_id = "" +# +# ## Username and password to access GroundWork API. +# username = "" +# password = "" +# +# ## Default application type to use in GroundWork client +# # default_app_type = "TELEGRAF" +# +# ## Default display name for the host with services(metrics). +# # default_host = "telegraf" +# +# ## Default service state. +# # default_service_state = "SERVICE_OK" +# +# ## The name of the tag that contains the hostname. +# # resource_tag = "host" +# +# ## The name of the tag that contains the host group name. +# # group_tag = "group" + + +# # Configurable HTTP health check resource based on metrics +# [[outputs.health]] +# ## Address and port to listen on. +# ## ex: service_address = "http://localhost:8080" +# ## service_address = "unix:///var/run/telegraf-health.sock" +# # service_address = "http://:8080" +# +# ## The maximum duration for reading the entire request. +# # read_timeout = "5s" +# ## The maximum duration for writing the entire response. +# # write_timeout = "5s" +# +# ## Username and password to accept for HTTP basic authentication. +# # basic_username = "user1" +# # basic_password = "secret" +# +# ## Allowed CA certificates for client certificates. +# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# +# ## TLS server certificate and private key. +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## NOTE: Due to the way TOML is parsed, tables must be at the END of the +# ## plugin definition, otherwise additional config options are read as part of +# ## the table +# +# ## One or more check sub-tables should be defined, it is also recommended to +# ## use metric filtering to limit the metrics that flow into this output. +# ## +# ## When using the default buffer sizes, this example will fail when the +# ## metric buffer is half full. +# ## +# ## namepass = ["internal_write"] +# ## tagpass = { output = ["influxdb"] } +# ## +# ## [[outputs.health.compares]] +# ## field = "buffer_size" +# ## lt = 5000.0 +# ## +# ## [[outputs.health.contains]] +# ## field = "buffer_size" + + +# # A plugin that can transmit metrics over HTTP +# [[outputs.http]] +# ## URL is the address to send metrics to +# url = "http://127.0.0.1:8080/telegraf" +# +# ## Timeout for HTTP message +# # timeout = "5s" +# +# ## HTTP method, one of: "POST" or "PUT" or "PATCH" +# # method = "POST" +# +# ## HTTP Basic Auth credentials +# # username = "username" +# # password = "pa$$word" +# +# ## OAuth2 Client Credentials Grant +# # client_id = "clientid" +# # client_secret = "secret" +# # token_url = "https://indentityprovider/oauth2/v1/token" +# # audience = "" +# # scopes = ["urn:opc:idm:__myscopes__"] +# +# ## Goole API Auth +# # google_application_credentials = "/etc/telegraf/example_secret.json" +# +# ## HTTP Proxy support +# # use_system_proxy = false +# # http_proxy_url = "" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Optional Cookie authentication +# # cookie_auth_url = "https://localhost/authMe" +# # cookie_auth_method = "POST" +# # cookie_auth_username = "username" +# # cookie_auth_password = "pa$$word" +# # cookie_auth_headers = '{"Content-Type": "application/json", "X-MY-HEADER":"hello"}' +# # cookie_auth_body = '{"username": "user", "password": "pa$$word", "authenticate": "me"}' +# ## cookie_auth_renewal not set or set to "0" will auth once and never renew the cookie +# # cookie_auth_renewal = "5m" +# +# ## Data format to output. +# ## Each data format has it's own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# # data_format = "influx" +# +# ## Use batch serialization format (default) instead of line based format. +# ## Batch format is more efficient and should be used unless line based +# ## format is really needed. +# # use_batch_format = true +# +# ## HTTP Content-Encoding for write request body, can be set to "gzip" to +# ## compress body or "identity" to apply no encoding. +# # content_encoding = "identity" +# +# ## MaxIdleConns controls the maximum number of idle (keep-alive) +# ## connections across all hosts. Zero means no limit. +# # max_idle_conn = 0 +# +# ## MaxIdleConnsPerHost, if non-zero, controls the maximum idle +# ## (keep-alive) connections to keep per-host. If zero, +# ## DefaultMaxIdleConnsPerHost is used(2). +# # max_idle_conn_per_host = 2 +# +# ## Idle (keep-alive) connection timeout. +# ## Maximum amount of time before idle connection is closed. +# ## Zero means no limit. +# # idle_conn_timeout = 0 +# +# ## Amazon Region +# #region = "us-east-1" +# +# ## Amazon Credentials +# ## Amazon Credentials are not built unless the following aws_service +# ## setting is set to a non-empty string. It may need to match the name of +# ## the service output to as well +# #aws_service = "execute-api" +# +# ## Credentials are loaded in the following order +# ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified +# ## 2) Assumed credentials via STS if role_arn is specified +# ## 3) explicit credentials from 'access_key' and 'secret_key' +# ## 4) shared profile from 'profile' +# ## 5) environment variables +# ## 6) shared credentials file +# ## 7) EC2 Instance Profile +# #access_key = "" +# #secret_key = "" +# #token = "" +# #role_arn = "" +# #web_identity_token_file = "" +# #role_session_name = "" +# #profile = "" +# #shared_credential_file = "" +# +# ## Optional list of statuscodes (<200 or >300) upon which requests should not be retried +# # non_retryable_statuscodes = [409, 413] +# +# ## NOTE: Due to the way TOML is parsed, tables must be at the END of the +# ## plugin definition, otherwise additional config options are read as part of +# ## the table +# +# ## Additional HTTP headers +# # [outputs.http.headers] +# # ## Should be set manually to "application/json" for json data_format +# # Content-Type = "text/plain; charset=utf-8" + + +# # Configuration for sending metrics to InfluxDB +# [[outputs.influxdb]] +# ## The full HTTP or UDP URL for your InfluxDB instance. +# ## +# ## Multiple URLs can be specified for a single cluster, only ONE of the +# ## urls will be written to each interval. +# # urls = ["unix:///var/run/influxdb.sock"] +# # urls = ["udp://127.0.0.1:8089"] +# # urls = ["http://127.0.0.1:8086"] +# +# ## The target database for metrics; will be created as needed. +# ## For UDP url endpoint database needs to be configured on server side. +# # database = "telegraf" +# +# ## The value of this tag will be used to determine the database. If this +# ## tag is not set the 'database' option is used as the default. +# # database_tag = "" +# +# ## If true, the 'database_tag' will not be included in the written metric. +# # exclude_database_tag = false +# +# ## If true, no CREATE DATABASE queries will be sent. Set to true when using +# ## Telegraf with a user without permissions to create databases or when the +# ## database already exists. +# # skip_database_creation = false +# +# ## Name of existing retention policy to write to. Empty string writes to +# ## the default retention policy. Only takes effect when using HTTP. +# # retention_policy = "" +# +# ## The value of this tag will be used to determine the retention policy. If this +# ## tag is not set the 'retention_policy' option is used as the default. +# # retention_policy_tag = "" +# +# ## If true, the 'retention_policy_tag' will not be included in the written metric. +# # exclude_retention_policy_tag = false +# +# ## Write consistency (clusters only), can be: "any", "one", "quorum", "all". +# ## Only takes effect when using HTTP. +# # write_consistency = "any" +# +# ## Timeout for HTTP messages. +# # timeout = "5s" +# +# ## HTTP Basic Auth +# # username = "telegraf" +# # password = "metricsmetricsmetricsmetrics" +# +# ## HTTP User-Agent +# # user_agent = "telegraf" +# +# ## UDP payload size is the maximum packet size to send. +# # udp_payload = "512B" +# +# ## Optional TLS Config for use on HTTP connections. +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## HTTP Proxy override, if unset values the standard proxy environment +# ## variables are consulted to determine which proxy, if any, should be used. +# # http_proxy = "http://corporate.proxy:3128" +# +# ## Additional HTTP headers +# # http_headers = {"X-Special-Header" = "Special-Value"} +# +# ## HTTP Content-Encoding for write request body, can be set to "gzip" to +# ## compress body or "identity" to apply no encoding. +# # content_encoding = "gzip" +# +# ## When true, Telegraf will output unsigned integers as unsigned values, +# ## i.e.: "42u". You will need a version of InfluxDB supporting unsigned +# ## integer values. Enabling this option will result in field type errors if +# ## existing data has been written. +# # influx_uint_support = false + + +# # Configuration for sending metrics to an Instrumental project +# [[outputs.instrumental]] +# ## Project API Token (required) +# api_token = "API Token" # required +# ## Prefix the metrics with a given name +# prefix = "" +# ## Stats output template (Graphite formatting) +# ## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#graphite +# template = "host.tags.measurement.field" +# ## Timeout in seconds to connect +# timeout = "2s" +# ## Debug true - Print communication to Instrumental +# debug = false + + +# # Save metrics to an IoTDB Database +# [[outputs.iotdb]] +# ## Configuration of IoTDB server connection +# host = "127.0.0.1" +# # port = "6667" +# +# ## Configuration of authentication +# # user = "root" +# # password = "root" +# +# ## Timeout to open a new session. +# ## A value of zero means no timeout. +# # timeout = "5s" +# +# ## Configuration of type conversion for 64-bit unsigned int +# ## IoTDB currently DOES NOT support unsigned integers (version 13.x). +# ## 32-bit unsigned integers are safely converted into 64-bit signed integers by the plugin, +# ## however, this is not true for 64-bit values in general as overflows may occur. +# ## The following setting allows to specify the handling of 64-bit unsigned integers. +# ## Available values are: +# ## - "int64" -- convert to 64-bit signed integers and accept overflows +# ## - "int64_clip" -- convert to 64-bit signed integers and clip the values on overflow to 9,223,372,036,854,775,807 +# ## - "text" -- convert to the string representation of the value +# # uint64_conversion = "int64_clip" +# +# ## Configuration of TimeStamp +# ## TimeStamp is always saved in 64bits int. timestamp_precision specifies the unit of timestamp. +# ## Available value: +# ## "second", "millisecond", "microsecond", "nanosecond"(default) +# # timestamp_precision = "nanosecond" +# +# ## Handling of tags +# ## Tags are not fully supported by IoTDB. +# ## A guide with suggestions on how to handle tags can be found here: +# ## https://iotdb.apache.org/UserGuide/Master/API/InfluxDB-Protocol.html +# ## +# ## Available values are: +# ## - "fields" -- convert tags to fields in the measurement +# ## - "device_id" -- attach tags to the device ID +# ## +# ## For Example, a metric named "root.sg.device" with the tags `tag1: "private"` and `tag2: "working"` and +# ## fields `s1: 100` and `s2: "hello"` will result in the following representations in IoTDB +# ## - "fields" -- root.sg.device, s1=100, s2="hello", tag1="private", tag2="working" +# ## - "device_id" -- root.sg.device.private.working, s1=100, s2="hello" +# # convert_tags_to = "device_id" + + +# # Configuration for the Kafka server to send metrics to +# [[outputs.kafka]] +# ## URLs of kafka brokers +# ## The brokers listed here are used to connect to collect metadata about a +# ## cluster. However, once the initial metadata collect is completed, telegraf +# ## will communicate solely with the kafka leader and not all defined brokers. +# brokers = ["localhost:9092"] +# +# ## Kafka topic for producer messages +# topic = "telegraf" +# +# ## The value of this tag will be used as the topic. If not set the 'topic' +# ## option is used. +# # topic_tag = "" +# +# ## If true, the 'topic_tag' will be removed from to the metric. +# # exclude_topic_tag = false +# +# ## Optional Client id +# # client_id = "Telegraf" +# +# ## Set the minimal supported Kafka version. Setting this enables the use of new +# ## Kafka features and APIs. Of particular interested, lz4 compression +# ## requires at least version 0.10.0.0. +# ## ex: version = "1.1.0" +# # version = "" +# +# ## The routing tag specifies a tagkey on the metric whose value is used as +# ## the message key. The message key is used to determine which partition to +# ## send the message to. This tag is prefered over the routing_key option. +# routing_tag = "host" +# +# ## The routing key is set as the message key and used to determine which +# ## partition to send the message to. This value is only used when no +# ## routing_tag is set or as a fallback when the tag specified in routing tag +# ## is not found. +# ## +# ## If set to "random", a random value will be generated for each message. +# ## +# ## When unset, no message key is added and each message is routed to a random +# ## partition. +# ## +# ## ex: routing_key = "random" +# ## routing_key = "telegraf" +# # routing_key = "" +# +# ## Compression codec represents the various compression codecs recognized by +# ## Kafka in messages. +# ## 0 : None +# ## 1 : Gzip +# ## 2 : Snappy +# ## 3 : LZ4 +# ## 4 : ZSTD +# # compression_codec = 0 +# +# ## Idempotent Writes +# ## If enabled, exactly one copy of each message is written. +# # idempotent_writes = false +# +# ## RequiredAcks is used in Produce Requests to tell the broker how many +# ## replica acknowledgements it must see before responding +# ## 0 : the producer never waits for an acknowledgement from the broker. +# ## This option provides the lowest latency but the weakest durability +# ## guarantees (some data will be lost when a server fails). +# ## 1 : the producer gets an acknowledgement after the leader replica has +# ## received the data. This option provides better durability as the +# ## client waits until the server acknowledges the request as successful +# ## (only messages that were written to the now-dead leader but not yet +# ## replicated will be lost). +# ## -1: the producer gets an acknowledgement after all in-sync replicas have +# ## received the data. This option provides the best durability, we +# ## guarantee that no messages will be lost as long as at least one in +# ## sync replica remains. +# # required_acks = -1 +# +# ## The maximum number of times to retry sending a metric before failing +# ## until the next flush. +# # max_retry = 3 +# +# ## The maximum permitted size of a message. Should be set equal to or +# ## smaller than the broker's 'message.max.bytes'. +# # max_message_bytes = 1000000 +# +# ## Optional TLS Config +# # enable_tls = false +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Period between keep alive probes. +# ## Defaults to the OS configuration if not specified or zero. +# # keep_alive_period = "15s" +# +# ## Optional SOCKS5 proxy to use when connecting to brokers +# # socks5_enabled = true +# # socks5_address = "127.0.0.1:1080" +# # socks5_username = "alice" +# # socks5_password = "pass123" +# +# ## Optional SASL Config +# # sasl_username = "kafka" +# # sasl_password = "secret" +# +# ## Optional SASL: +# ## one of: OAUTHBEARER, PLAIN, SCRAM-SHA-256, SCRAM-SHA-512, GSSAPI +# ## (defaults to PLAIN) +# # sasl_mechanism = "" +# +# ## used if sasl_mechanism is GSSAPI +# # sasl_gssapi_service_name = "" +# # ## One of: KRB5_USER_AUTH and KRB5_KEYTAB_AUTH +# # sasl_gssapi_auth_type = "KRB5_USER_AUTH" +# # sasl_gssapi_kerberos_config_path = "/" +# # sasl_gssapi_realm = "realm" +# # sasl_gssapi_key_tab_path = "" +# # sasl_gssapi_disable_pafxfast = false +# +# ## Access token used if sasl_mechanism is OAUTHBEARER +# # sasl_access_token = "" +# +# ## Arbitrary key value string pairs to pass as a TOML table. For example: +# # {logicalCluster = "cluster-042", poolId = "pool-027"} +# # sasl_extensions = {} +# +# ## SASL protocol version. When connecting to Azure EventHub set to 0. +# # sasl_version = 1 +# +# # Disable Kafka metadata full fetch +# # metadata_full = false +# +# ## Data format to output. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# # data_format = "influx" +# +# ## NOTE: Due to the way TOML is parsed, tables must be at the END of the +# ## plugin definition, otherwise additional config options are read as part of +# ## the table +# +# ## Optional topic suffix configuration. +# ## If the section is omitted, no suffix is used. +# ## Following topic suffix methods are supported: +# ## measurement - suffix equals to separator + measurement's name +# ## tags - suffix equals to separator + specified tags' values +# ## interleaved with separator +# +# ## Suffix equals to "_" + measurement name +# # [outputs.kafka.topic_suffix] +# # method = "measurement" +# # separator = "_" +# +# ## Suffix equals to "__" + measurement's "foo" tag value. +# ## If there's no such a tag, suffix equals to an empty string +# # [outputs.kafka.topic_suffix] +# # method = "tags" +# # keys = ["foo"] +# # separator = "__" +# +# ## Suffix equals to "_" + measurement's "foo" and "bar" +# ## tag values, separated by "_". If there is no such tags, +# ## their values treated as empty strings. +# # [outputs.kafka.topic_suffix] +# # method = "tags" +# # keys = ["foo", "bar"] +# # separator = "_" + + +# # Configuration for the AWS Kinesis output. +# [[outputs.kinesis]] +# ## Amazon REGION of kinesis endpoint. +# region = "ap-southeast-2" +# +# ## Amazon Credentials +# ## Credentials are loaded in the following order +# ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified +# ## 2) Assumed credentials via STS if role_arn is specified +# ## 3) explicit credentials from 'access_key' and 'secret_key' +# ## 4) shared profile from 'profile' +# ## 5) environment variables +# ## 6) shared credentials file +# ## 7) EC2 Instance Profile +# #access_key = "" +# #secret_key = "" +# #token = "" +# #role_arn = "" +# #web_identity_token_file = "" +# #role_session_name = "" +# #profile = "" +# #shared_credential_file = "" +# +# ## Endpoint to make request against, the correct endpoint is automatically +# ## determined and this option should only be set if you wish to override the +# ## default. +# ## ex: endpoint_url = "http://localhost:8000" +# # endpoint_url = "" +# +# ## Kinesis StreamName must exist prior to starting telegraf. +# streamname = "StreamName" +# +# ## Data format to output. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# data_format = "influx" +# +# ## debug will show upstream aws messages. +# debug = false +# +# ## NOTE: Due to the way TOML is parsed, tables must be at the END of the +# ## plugin definition, otherwise additional config options are read as part of +# ## the table +# +# ## The partition key can be calculated using one of several methods: +# ## +# ## Use a static value for all writes: +# # [outputs.kinesis.partition] +# # method = "static" +# # key = "howdy" +# # +# ## Use a random partition key on each write: +# # [outputs.kinesis.partition] +# # method = "random" +# # +# ## Use the measurement name as the partition key: +# # [outputs.kinesis.partition] +# # method = "measurement" +# # +# ## Use the value of a tag for all writes, if the tag is not set the empty +# ## default option will be used. When no default, defaults to "telegraf" +# # [outputs.kinesis.partition] +# # method = "tag" +# # key = "host" +# # default = "mykey" + + +# # Configuration for Librato API to send metrics to. +# [[outputs.librato]] +# ## Librato API Docs +# ## http://dev.librato.com/v1/metrics-authentication +# ## Librato API user +# api_user = "telegraf@influxdb.com" # required. +# ## Librato API token +# api_token = "my-secret-token" # required. +# ## Debug +# # debug = false +# ## Connection timeout. +# # timeout = "5s" +# ## Output source Template (same as graphite buckets) +# ## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#graphite +# ## This template is used in librato's source (not metric's name) +# template = "host" + + +# # A plugin that can send metrics over HTTPs to Logz.io +# [[outputs.logzio]] +# ## Connection timeout, defaults to "5s" if not set. +# # timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## Logz.io account token +# token = "your logz.io token" # required +# +# ## Use your listener URL for your Logz.io account region. +# # url = "https://listener.logz.io:8071" + + +# # A plugin that can transmit logs to Loki +# [[outputs.loki]] +# ## The domain of Loki +# domain = "https://loki.domain.tld" +# +# ## Endpoint to write api +# # endpoint = "/loki/api/v1/push" +# +# ## Connection timeout, defaults to "5s" if not set. +# # timeout = "5s" +# +# ## Basic auth credential +# # username = "loki" +# # password = "pass" +# +# ## Additional HTTP headers +# # http_headers = {"X-Scope-OrgID" = "1"} +# +# ## If the request must be gzip encoded +# # gzip_request = false +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## Metric Name Label +# ## Label to use for the metric name to when sending metrics. If set to an +# ## empty string, this will not add the label. This is NOT suggested as there +# ## is no way to differentiate between multiple metrics. +# # metric_name_label = "__name" + + +# # A plugin that can transmit logs to mongodb +# [[outputs.mongodb]] +# # connection string examples for mongodb +# dsn = "mongodb://localhost:27017" +# # dsn = "mongodb://mongod1:27017,mongod2:27017,mongod3:27017/admin&replicaSet=myReplSet&w=1" +# +# # overrides serverSelectionTimeoutMS in dsn if set +# # timeout = "30s" +# +# # default authentication, optional +# # authentication = "NONE" +# +# # for SCRAM-SHA-256 authentication +# # authentication = "SCRAM" +# # username = "root" +# # password = "***" +# +# # for x509 certificate authentication +# # authentication = "X509" +# # tls_ca = "ca.pem" +# # tls_key = "client.pem" +# # # tls_key_pwd = "changeme" # required for encrypted tls_key +# # insecure_skip_verify = false +# +# # database to store measurements and time series collections +# # database = "telegraf" +# +# # granularity can be seconds, minutes, or hours. +# # configuring this value will be based on your input collection frequency. +# # see https://docs.mongodb.com/manual/core/timeseries-collections/#create-a-time-series-collection +# # granularity = "seconds" +# +# # optionally set a TTL to automatically expire documents from the measurement collections. +# # ttl = "360h" + + +# # Configuration for MQTT server to send metrics to +# [[outputs.mqtt]] +# ## MQTT Brokers +# ## The list of brokers should only include the hostname or IP address and the +# ## port to the broker. This should follow the format `[{scheme}://]{host}:{port}`. For +# ## example, `localhost:1883` or `mqtt://localhost:1883`. +# ## Scheme can be any of the following: tcp://, mqtt://, tls://, mqtts:// +# ## non-TLS and TLS servers can not be mix-and-matched. +# servers = ["localhost:1883", ] # or ["mqtts://tls.example.com:1883"] +# +# ## Protocol can be `3.1.1` or `5`. Default is `3.1.1` +# # procotol = "3.1.1" +# +# ## MQTT Topic for Producer Messages +# ## MQTT outputs send metrics to this topic format: +# ## {{ .TopicPrefix }}/{{ .Hostname }}/{{ .PluginName }}/{{ .Tag "tag_key" }} +# ## (e.g. prefix/web01.example.com/mem/some_tag_value) +# ## Each path segment accepts either a template placeholder, an environment variable, or a tag key +# ## of the form `{{.Tag "tag_key_name"}}`. Empty path elements as well as special MQTT characters +# ## (such as `+` or `#`) are invalid to form the topic name and will lead to an error. +# ## In case a tag is missing in the metric, that path segment omitted for the final topic. +# topic = "telegraf/{{ .Hostname }}/{{ .PluginName }}" +# +# ## QoS policy for messages +# ## The mqtt QoS policy for sending messages. +# ## See https://www.ibm.com/support/knowledgecenter/en/SSFKSJ_9.0.0/com.ibm.mq.dev.doc/q029090_.htm +# ## 0 = at most once +# ## 1 = at least once +# ## 2 = exactly once +# # qos = 2 +# +# ## Keep Alive +# ## Defines the maximum length of time that the broker and client may not +# ## communicate. Defaults to 0 which turns the feature off. +# ## +# ## For version v2.0.12 and later mosquitto there is a bug +# ## (see https://github.com/eclipse/mosquitto/issues/2117), which requires +# ## this to be non-zero. As a reference eclipse/paho.mqtt.golang defaults to 30. +# # keep_alive = 0 +# +# ## username and password to connect MQTT server. +# # username = "telegraf" +# # password = "metricsmetricsmetricsmetrics" +# +# ## client ID +# ## The unique client id to connect MQTT server. If this parameter is not set +# ## then a random ID is generated. +# # client_id = "" +# +# ## Timeout for write operations. default: 5s +# # timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## When true, metrics will be sent in one MQTT message per flush. Otherwise, +# ## metrics are written one metric per MQTT message. +# ## DEPRECATED: Use layout option instead +# # batch = false +# +# ## When true, metric will have RETAIN flag set, making broker cache entries until someone +# ## actually reads it +# # retain = false +# +# ## Layout of the topics published. +# ## The following choices are available: +# ## non-batch -- send individual messages, one for each metric +# ## batch -- send all metric as a single message per MQTT topic +# ## NOTE: The following options will ignore the 'data_format' option and send single values +# ## field -- send individual messages for each field, appending its name to the metric topic +# ## homie-v4 -- send metrics with fields and tags according to the 4.0.0 specs +# ## see https://homieiot.github.io/specification/ +# # layout = "non-batch" +# +# ## HOMIE specific settings +# ## The following options provide templates for setting the device name +# ## and the node-ID for the topics. Both options are MANDATORY and can contain +# ## {{ .PluginName }} (metric name), {{ .Tag "key"}} (tag reference to 'key') +# ## or constant strings. The templays MAY NOT contain slashes! +# # homie_device_name = "" +# # homie_node_id = "" +# +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# data_format = "influx" +# +# ## NOTE: Due to the way TOML is parsed, tables must be at the END of the +# ## plugin definition, otherwise additional config options are read as part of +# ## the table +# +# ## Optional MQTT 5 publish properties +# ## These setting only apply if the "protocol" property is set to 5. This must +# ## be defined at the end of the plugin settings, otherwise TOML will assume +# ## anything else is part of this table. For more details on publish properties +# ## see the spec: +# ## https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901109 +# # [outputs.mqtt.v5] +# # content_type = "" +# # response_topic = "" +# # message_expiry = "0s" +# # topic_alias = 0 +# # [outputs.mqtt.v5.user_properties] +# # "key1" = "value 1" +# # "key2" = "value 2" + + +# # Send telegraf measurements to NATS +# [[outputs.nats]] +# ## URLs of NATS servers +# servers = ["nats://localhost:4222"] +# +# ## Optional client name +# # name = "" +# +# ## Optional credentials +# # username = "" +# # password = "" +# +# ## Optional NATS 2.0 and NATS NGS compatible user credentials +# # credentials = "/etc/telegraf/nats.creds" +# +# ## NATS subject for producer messages +# subject = "telegraf" +# +# ## Use Transport Layer Security +# # secure = false +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Data format to output. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# data_format = "influx" + + +# # Send aggregated metrics to Nebius.Cloud Monitoring +# [[outputs.nebius_cloud_monitoring]] +# ## Timeout for HTTP writes. +# # timeout = "20s" +# +# ## Nebius.Cloud monitoring API endpoint. Normally should not be changed +# # endpoint = "https://monitoring.api.il.nebius.cloud/monitoring/v2/data/write" + + +# # Send metrics to New Relic metrics endpoint +# [[outputs.newrelic]] +# ## The 'insights_key' parameter requires a NR license key. +# ## New Relic recommends you create one +# ## with a convenient name such as TELEGRAF_INSERT_KEY. +# ## reference: https://docs.newrelic.com/docs/apis/intro-apis/new-relic-api-keys/#ingest-license-key +# # insights_key = "New Relic License Key Here" +# +# ## Prefix to add to add to metric name for easy identification. +# ## This is very useful if your metric names are ambiguous. +# # metric_prefix = "" +# +# ## Timeout for writes to the New Relic API. +# # timeout = "15s" +# +# ## HTTP Proxy override. If unset use values from the standard +# ## proxy environment variables to determine proxy, if any. +# # http_proxy = "http://corporate.proxy:3128" +# +# ## Metric URL override to enable geographic location endpoints. +# # If not set use values from the standard +# # metric_url = "https://metric-api.newrelic.com/metric/v1" + + +# # Send telegraf measurements to NSQD +# [[outputs.nsq]] +# ## Location of nsqd instance listening on TCP +# server = "localhost:4150" +# ## NSQ topic for producer messages +# topic = "telegraf" +# +# ## Data format to output. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# data_format = "influx" + + +# # Send OpenTelemetry metrics over gRPC +# [[outputs.opentelemetry]] +# ## Override the default (localhost:4317) OpenTelemetry gRPC service +# ## address:port +# # service_address = "localhost:4317" +# +# ## Override the default (5s) request timeout +# # timeout = "5s" +# +# ## Optional TLS Config. +# ## +# ## Root certificates for verifying server certificates encoded in PEM format. +# # tls_ca = "/etc/telegraf/ca.pem" +# ## The public and private keypairs for the client encoded in PEM format. +# ## May contain intermediate certificates. +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS, but skip TLS chain and host verification. +# # insecure_skip_verify = false +# ## Send the specified TLS server name via SNI. +# # tls_server_name = "foo.example.com" +# +# ## Override the default (gzip) compression used to send data. +# ## Supports: "gzip", "none" +# # compression = "gzip" +# +# ## NOTE: Due to the way TOML is parsed, tables must be at the END of the +# ## plugin definition, otherwise additional config options are read as part of +# ## the table +# +# ## Configuration options for the Coralogix dialect +# ## Enable the following section of you use this plugin with a Coralogix endpoint +# # [outputs.opentelemetry.coralogix] +# # ## Your Coralogix private key (required). +# # ## Please note that this is sensitive data! +# # private_key = "your_coralogix_key" +# # +# # ## Application and subsystem names for the metrics (required) +# # application = "$NAMESPACE" +# # subsystem = "$HOSTNAME" +# +# ## Additional OpenTelemetry resource attributes +# # [outputs.opentelemetry.attributes] +# # "service.name" = "demo" +# +# ## Additional gRPC request metadata +# # [outputs.opentelemetry.headers] +# # key1 = "value1" + + +# # Configuration for OpenTSDB server to send metrics to +# [[outputs.opentsdb]] +# ## prefix for metrics keys +# prefix = "my.specific.prefix." +# +# ## DNS name of the OpenTSDB server +# ## Using "opentsdb.example.com" or "tcp://opentsdb.example.com" will use the +# ## telnet API. "http://opentsdb.example.com" will use the Http API. +# host = "opentsdb.example.com" +# +# ## Port of the OpenTSDB server +# port = 4242 +# +# ## Number of data points to send to OpenTSDB in Http requests. +# ## Not used with telnet API. +# http_batch_size = 50 +# +# ## URI Path for Http requests to OpenTSDB. +# ## Used in cases where OpenTSDB is located behind a reverse proxy. +# http_path = "/api/put" +# +# ## Debug true - Prints OpenTSDB communication +# debug = false +# +# ## Separator separates measurement name from field +# separator = "_" + + +# # Publishes metrics to a postgresql database +# [[outputs.postgresql]] +# ## Specify connection address via the standard libpq connection string: +# ## host=... user=... password=... sslmode=... dbname=... +# ## Or a URL: +# ## postgres://[user[:password]]@localhost[/dbname]?sslmode=[disable|verify-ca|verify-full] +# ## See https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING +# ## +# ## All connection parameters are optional. Environment vars are also supported. +# ## e.g. PGPASSWORD, PGHOST, PGUSER, PGDATABASE +# ## All supported vars can be found here: +# ## https://www.postgresql.org/docs/current/libpq-envars.html +# ## +# ## Non-standard parameters: +# ## pool_max_conns (default: 1) - Maximum size of connection pool for parallel (per-batch per-table) inserts. +# ## pool_min_conns (default: 0) - Minimum size of connection pool. +# ## pool_max_conn_lifetime (default: 0s) - Maximum age of a connection before closing. +# ## pool_max_conn_idle_time (default: 0s) - Maximum idle time of a connection before closing. +# ## pool_health_check_period (default: 0s) - Duration between health checks on idle connections. +# # connection = "" +# +# ## Postgres schema to use. +# # schema = "public" +# +# ## Store tags as foreign keys in the metrics table. Default is false. +# # tags_as_foreign_keys = false +# +# ## Suffix to append to table name (measurement name) for the foreign tag table. +# # tag_table_suffix = "_tag" +# +# ## Deny inserting metrics if the foreign tag can't be inserted. +# # foreign_tag_constraint = false +# +# ## Store all tags as a JSONB object in a single 'tags' column. +# # tags_as_jsonb = false +# +# ## Store all fields as a JSONB object in a single 'fields' column. +# # fields_as_jsonb = false +# +# ## Name of the timestamp column +# ## NOTE: Some tools (e.g. Grafana) require the default name so be careful! +# # timestamp_column_name = "time" +# +# ## Type of the timestamp column +# ## Currently, "timestamp without time zone" and "timestamp with time zone" +# ## are supported +# # timestamp_column_type = "timestamp without time zone" +# +# ## Templated statements to execute when creating a new table. +# # create_templates = [ +# # '''CREATE TABLE {{ .table }} ({{ .columns }})''', +# # ] +# +# ## Templated statements to execute when adding columns to a table. +# ## Set to an empty list to disable. Points containing tags for which there is no column will be skipped. Points +# ## containing fields for which there is no column will have the field omitted. +# # add_column_templates = [ +# # '''ALTER TABLE {{ .table }} ADD COLUMN IF NOT EXISTS {{ .columns|join ", ADD COLUMN IF NOT EXISTS " }}''', +# # ] +# +# ## Templated statements to execute when creating a new tag table. +# # tag_table_create_templates = [ +# # '''CREATE TABLE {{ .table }} ({{ .columns }}, PRIMARY KEY (tag_id))''', +# # ] +# +# ## Templated statements to execute when adding columns to a tag table. +# ## Set to an empty list to disable. Points containing tags for which there is no column will be skipped. +# # tag_table_add_column_templates = [ +# # '''ALTER TABLE {{ .table }} ADD COLUMN IF NOT EXISTS {{ .columns|join ", ADD COLUMN IF NOT EXISTS " }}''', +# # ] +# +# ## The postgres data type to use for storing unsigned 64-bit integer values (Postgres does not have a native +# ## unsigned 64-bit integer type). +# ## The value can be one of: +# ## numeric - Uses the PostgreSQL "numeric" data type. +# ## uint8 - Requires pguint extension (https://github.com/petere/pguint) +# # uint64_type = "numeric" +# +# ## When using pool_max_conns>1, and a temporary error occurs, the query is retried with an incremental backoff. This +# ## controls the maximum backoff duration. +# # retry_max_backoff = "15s" +# +# ## Approximate number of tag IDs to store in in-memory cache (when using tags_as_foreign_keys). +# ## This is an optimization to skip inserting known tag IDs. +# ## Each entry consumes approximately 34 bytes of memory. +# # tag_cache_size = 100000 +# +# ## Enable & set the log level for the Postgres driver. +# # log_level = "warn" # trace, debug, info, warn, error, none + + +# # Configuration for the Prometheus client to spawn +# [[outputs.prometheus_client]] +# ## Address to listen on. +# listen = ":9273" +# +# ## Maximum duration before timing out read of the request +# # read_timeout = "10s" +# ## Maximum duration before timing out write of the response +# # write_timeout = "10s" +# +# ## Metric version controls the mapping from Prometheus metrics into Telegraf metrics. +# ## See "Metric Format Configuration" in plugins/inputs/prometheus/README.md for details. +# ## Valid options: 1, 2 +# # metric_version = 1 +# +# ## Use HTTP Basic Authentication. +# # basic_username = "Foo" +# # basic_password = "Bar" +# +# ## If set, the IP Ranges which are allowed to access metrics. +# ## ex: ip_range = ["192.168.0.0/24", "192.168.1.0/30"] +# # ip_range = [] +# +# ## Path to publish the metrics on. +# # path = "/metrics" +# +# ## Expiration interval for each metric. 0 == no expiration +# # expiration_interval = "60s" +# +# ## Collectors to enable, valid entries are "gocollector" and "process". +# ## If unset, both are enabled. +# # collectors_exclude = ["gocollector", "process"] +# +# ## Send string metrics as Prometheus labels. +# ## Unless set to false all string metrics will be sent as labels. +# # string_as_label = true +# +# ## If set, enable TLS with the given certificate. +# # tls_cert = "/etc/ssl/telegraf.crt" +# # tls_key = "/etc/ssl/telegraf.key" +# +# ## Set one or more allowed client CA certificate file names to +# ## enable mutually authenticated TLS connections +# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# +# ## Export metric collection time. +# # export_timestamp = false +# +# ## Specify the metric type explicitly. +# ## This overrides the metric-type of the Telegraf metric. Globbing is allowed. +# # [outputs.prometheus_client.metric_types] +# # counter = [] +# # gauge = [] + + +# # Publishes metrics to a redis timeseries server +# [[outputs.redistimeseries]] +# ## The address of the RedisTimeSeries server. +# address = "127.0.0.1:6379" +# +# ## Redis ACL credentials +# # username = "" +# # password = "" +# # database = 0 +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# # insecure_skip_verify = false + + +# # Configuration for Riemann to send metrics to +# [[outputs.riemann]] +# ## The full TCP or UDP URL of the Riemann server +# url = "tcp://localhost:5555" +# +# ## Riemann event TTL, floating-point time in seconds. +# ## Defines how long that an event is considered valid for in Riemann +# # ttl = 30.0 +# +# ## Separator to use between measurement and field name in Riemann service name +# ## This does not have any effect if 'measurement_as_attribute' is set to 'true' +# separator = "/" +# +# ## Set measurement name as Riemann attribute 'measurement', instead of prepending it to the Riemann service name +# # measurement_as_attribute = false +# +# ## Send string metrics as Riemann event states. +# ## Unless enabled all string metrics will be ignored +# # string_as_state = false +# +# ## A list of tag keys whose values get sent as Riemann tags. +# ## If empty, all Telegraf tag values will be sent as tags +# # tag_keys = ["telegraf","custom_tag"] +# +# ## Additional Riemann tags to send. +# # tags = ["telegraf-output"] +# +# ## Description for Riemann event +# # description_text = "metrics collected from telegraf" +# +# ## Riemann client write timeout, defaults to "5s" if not set. +# # timeout = "5s" + + +# ## DEPRECATED: The "riemann_legacy" plugin is deprecated in version 1.3.0 and will be removed in 1.30.0, use 'outputs.riemann' instead (see https://github.com/influxdata/telegraf/issues/1878). +# # Configuration for the Riemann server to send metrics to +# [[outputs.riemann_legacy]] +# ## URL of server +# url = "localhost:5555" +# ## transport protocol to use either tcp or udp +# transport = "tcp" +# ## separator to use between input name and field name in Riemann service name +# separator = " " + + +# # Send aggregate metrics to Sensu Monitor +# [[outputs.sensu]] +# ## BACKEND API URL is the Sensu Backend API root URL to send metrics to +# ## (protocol, host, and port only). The output plugin will automatically +# ## append the corresponding backend API path +# ## /api/core/v2/namespaces/:entity_namespace/events/:entity_name/:check_name). +# ## +# ## Backend Events API reference: +# ## https://docs.sensu.io/sensu-go/latest/api/events/ +# ## +# ## AGENT API URL is the Sensu Agent API root URL to send metrics to +# ## (protocol, host, and port only). The output plugin will automatically +# ## append the correspeonding agent API path (/events). +# ## +# ## Agent API Events API reference: +# ## https://docs.sensu.io/sensu-go/latest/api/events/ +# ## +# ## NOTE: if backend_api_url and agent_api_url and api_key are set, the output +# ## plugin will use backend_api_url. If backend_api_url and agent_api_url are +# ## not provided, the output plugin will default to use an agent_api_url of +# ## http://127.0.0.1:3031 +# ## +# # backend_api_url = "http://127.0.0.1:8080" +# # agent_api_url = "http://127.0.0.1:3031" +# +# ## API KEY is the Sensu Backend API token +# ## Generate a new API token via: +# ## +# ## $ sensuctl cluster-role create telegraf --verb create --resource events,entities +# ## $ sensuctl cluster-role-binding create telegraf --cluster-role telegraf --group telegraf +# ## $ sensuctl user create telegraf --group telegraf --password REDACTED +# ## $ sensuctl api-key grant telegraf +# ## +# ## For more information on Sensu RBAC profiles & API tokens, please visit: +# ## - https://docs.sensu.io/sensu-go/latest/reference/rbac/ +# ## - https://docs.sensu.io/sensu-go/latest/reference/apikeys/ +# ## +# # api_key = "${SENSU_API_KEY}" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Timeout for HTTP message +# # timeout = "5s" +# +# ## HTTP Content-Encoding for write request body, can be set to "gzip" to +# ## compress body or "identity" to apply no encoding. +# # content_encoding = "identity" +# +# ## NOTE: Due to the way TOML is parsed, tables must be at the END of the +# ## plugin definition, otherwise additional config options are read as part of +# ## the table +# +# ## Sensu Event details +# ## +# ## Below are the event details to be sent to Sensu. The main portions of the +# ## event are the check, entity, and metrics specifications. For more information +# ## on Sensu events and its components, please visit: +# ## - Events - https://docs.sensu.io/sensu-go/latest/reference/events +# ## - Checks - https://docs.sensu.io/sensu-go/latest/reference/checks +# ## - Entities - https://docs.sensu.io/sensu-go/latest/reference/entities +# ## - Metrics - https://docs.sensu.io/sensu-go/latest/reference/events#metrics +# ## +# ## Check specification +# ## The check name is the name to give the Sensu check associated with the event +# ## created. This maps to check.metatadata.name in the event. +# [outputs.sensu.check] +# name = "telegraf" +# +# ## Entity specification +# ## Configure the entity name and namespace, if necessary. This will be part of +# ## the entity.metadata in the event. +# ## +# ## NOTE: if the output plugin is configured to send events to a +# ## backend_api_url and entity_name is not set, the value returned by +# ## os.Hostname() will be used; if the output plugin is configured to send +# ## events to an agent_api_url, entity_name and entity_namespace are not used. +# # [outputs.sensu.entity] +# # name = "server-01" +# # namespace = "default" +# +# ## Metrics specification +# ## Configure the tags for the metrics that are sent as part of the Sensu event +# # [outputs.sensu.tags] +# # source = "telegraf" +# +# ## Configure the handler(s) for processing the provided metrics +# # [outputs.sensu.metrics] +# # handlers = ["influxdb","elasticsearch"] + + +# # Send metrics and events to SignalFx +# [[outputs.signalfx]] +# ## SignalFx Org Access Token +# access_token = "my-secret-token" +# +# ## The SignalFx realm that your organization resides in +# signalfx_realm = "us9" # Required if ingest_url is not set +# +# ## You can optionally provide a custom ingest url instead of the +# ## signalfx_realm option above if you are using a gateway or proxy +# ## instance. This option takes precident over signalfx_realm. +# ingest_url = "https://my-custom-ingest/" +# +# ## Event typed metrics are omitted by default, +# ## If you require an event typed metric you must specify the +# ## metric name in the following list. +# included_event_names = ["plugin.metric_name"] + + +# # Generic socket writer capable of handling multiple socket types. +# [[outputs.socket_writer]] +# ## URL to connect to +# # address = "tcp://127.0.0.1:8094" +# # address = "tcp://example.com:http" +# # address = "tcp4://127.0.0.1:8094" +# # address = "tcp6://127.0.0.1:8094" +# # address = "tcp6://[2001:db8::1]:8094" +# # address = "udp://127.0.0.1:8094" +# # address = "udp4://127.0.0.1:8094" +# # address = "udp6://127.0.0.1:8094" +# # address = "unix:///tmp/telegraf.sock" +# # address = "unixgram:///tmp/telegraf.sock" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Period between keep alive probes. +# ## Only applies to TCP sockets. +# ## 0 disables keep alive probes. +# ## Defaults to the OS configuration. +# # keep_alive_period = "5m" +# +# ## Content encoding for message payloads, can be set to "gzip" or to +# ## "identity" to apply no encoding. +# ## +# # content_encoding = "identity" +# +# ## Data format to generate. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# # data_format = "influx" + + +# # Save metrics to an SQL Database +# [[outputs.sql]] +# ## Database driver +# ## Valid options: mssql (Microsoft SQL Server), mysql (MySQL), pgx (Postgres), +# ## sqlite (SQLite3), snowflake (snowflake.com) clickhouse (ClickHouse) +# # driver = "" +# +# ## Data source name +# ## The format of the data source name is different for each database driver. +# ## See the plugin readme for details. +# # data_source_name = "" +# +# ## Timestamp column name +# # timestamp_column = "timestamp" +# +# ## Table creation template +# ## Available template variables: +# ## {TABLE} - table name as a quoted identifier +# ## {TABLELITERAL} - table name as a quoted string literal +# ## {COLUMNS} - column definitions (list of quoted identifiers and types) +# # table_template = "CREATE TABLE {TABLE}({COLUMNS})" +# +# ## Table existence check template +# ## Available template variables: +# ## {TABLE} - tablename as a quoted identifier +# # table_exists_template = "SELECT 1 FROM {TABLE} LIMIT 1" +# +# ## Initialization SQL +# # init_sql = "" +# +# ## Maximum amount of time a connection may be idle. "0s" means connections are +# ## never closed due to idle time. +# # connection_max_idle_time = "0s" +# +# ## Maximum amount of time a connection may be reused. "0s" means connections +# ## are never closed due to age. +# # connection_max_lifetime = "0s" +# +# ## Maximum number of connections in the idle connection pool. 0 means unlimited. +# # connection_max_idle = 2 +# +# ## Maximum number of open connections to the database. 0 means unlimited. +# # connection_max_open = 0 +# +# ## NOTE: Due to the way TOML is parsed, tables must be at the END of the +# ## plugin definition, otherwise additional config options are read as part of +# ## the table +# +# ## Metric type to SQL type conversion +# ## The values on the left are the data types Telegraf has and the values on +# ## the right are the data types Telegraf will use when sending to a database. +# ## +# ## The database values used must be data types the destination database +# ## understands. It is up to the user to ensure that the selected data type is +# ## available in the database they are using. Refer to your database +# ## documentation for what data types are available and supported. +# #[outputs.sql.convert] +# # integer = "INT" +# # real = "DOUBLE" +# # text = "TEXT" +# # timestamp = "TIMESTAMP" +# # defaultvalue = "TEXT" +# # unsigned = "UNSIGNED" +# # bool = "BOOL" +# # ## This setting controls the behavior of the unsigned value. By default the +# # ## setting will take the integer value and append the unsigned value to it. The other +# # ## option is "literal", which will use the actual value the user provides to +# # ## the unsigned option. This is useful for a database like ClickHouse where +# # ## the unsigned value should use a value like "uint64". +# # # conversion_style = "unsigned_suffix" + + +# # Configuration for Google Cloud Stackdriver to send metrics to +# [[outputs.stackdriver]] +# ## GCP Project +# project = "erudite-bloom-151019" +# +# ## The namespace for the metric descriptor +# ## This is optional and users are encouraged to set the namespace as a +# ## resource label instead. If omitted it is not included in the metric name. +# namespace = "telegraf" +# +# ## Metric Type Prefix +# ## The DNS name used with the metric type as a prefix. +# # metric_type_prefix = "custom.googleapis.com" +# +# ## Metric Name Format +# ## Specifies the layout of the metric name, choose from: +# ## * path: 'metric_type_prefix_namespace_name_key' +# ## * official: 'metric_type_prefix/namespace_name_key/kind' +# # metric_name_format = "path" +# +# ## Metric Data Type +# ## By default, telegraf will use whatever type the metric comes in as. +# ## However, for some use cases, forcing int64, may be preferred for values: +# ## * source: use whatever was passed in +# ## * double: preferred datatype to allow queries by PromQL. +# # metric_data_type = "source" +# +# ## Tags as resource labels +# ## Tags defined in this option, when they exist, are added as a resource +# ## label and not included as a metric label. The values from tags override +# ## the values defined under the resource_labels config options. +# # tags_as_resource_label = [] +# +# ## Custom resource type +# # resource_type = "generic_node" +# +# ## NOTE: Due to the way TOML is parsed, tables must be at the END of the +# ## plugin definition, otherwise additional config options are read as part of +# ## the table +# +# ## Additional resource labels +# # [outputs.stackdriver.resource_labels] +# # node_id = "$HOSTNAME" +# # namespace = "myapp" +# # location = "eu-north0" + + +# # Configuration for active mq with stomp protocol to send metrics to +# [[outputs.stomp]] +# host = "localhost:61613" +# +# ## Queue name for producer messages +# queueName = "telegraf" +# +# ## Username and password if required by the Active MQ server. +# # username = "" +# # password = "" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## Data format to output. +# data_format = "json" + + +# # A plugin that can send metrics to Sumo Logic HTTP metric collector. +# [[outputs.sumologic]] +# ## Unique URL generated for your HTTP Metrics Source. +# ## This is the address to send metrics to. +# # url = "https://events.sumologic.net/receiver/v1/http/" +# +# ## Data format to be used for sending metrics. +# ## This will set the "Content-Type" header accordingly. +# ## Currently supported formats: +# ## * graphite - for Content-Type of application/vnd.sumologic.graphite +# ## * carbon2 - for Content-Type of application/vnd.sumologic.carbon2 +# ## * prometheus - for Content-Type of application/vnd.sumologic.prometheus +# ## +# ## More information can be found at: +# ## https://help.sumologic.com/03Send-Data/Sources/02Sources-for-Hosted-Collectors/HTTP-Source/Upload-Metrics-to-an-HTTP-Source#content-type-headers-for-metrics +# ## +# ## NOTE: +# ## When unset, telegraf will by default use the influx serializer which is currently unsupported +# ## in HTTP Source. +# data_format = "carbon2" +# +# ## Timeout used for HTTP request +# # timeout = "5s" +# +# ## Max HTTP request body size in bytes before compression (if applied). +# ## By default 1MB is recommended. +# ## NOTE: +# ## Bear in mind that in some serializer a metric even though serialized to multiple +# ## lines cannot be split any further so setting this very low might not work +# ## as expected. +# # max_request_body_size = 1000000 +# +# ## Additional, Sumo specific options. +# ## Full list can be found here: +# ## https://help.sumologic.com/03Send-Data/Sources/02Sources-for-Hosted-Collectors/HTTP-Source/Upload-Metrics-to-an-HTTP-Source#supported-http-headers +# +# ## Desired source name. +# ## Useful if you want to override the source name configured for the source. +# # source_name = "" +# +# ## Desired host name. +# ## Useful if you want to override the source host configured for the source. +# # source_host = "" +# +# ## Desired source category. +# ## Useful if you want to override the source category configured for the source. +# # source_category = "" +# +# ## Comma-separated key=value list of dimensions to apply to every metric. +# ## Custom dimensions will allow you to query your metrics at a more granular level. +# # dimensions = "" + + +# # Configuration for Syslog server to send metrics to +# [[outputs.syslog]] +# ## URL to connect to +# ## ex: address = "tcp://127.0.0.1:8094" +# ## ex: address = "tcp4://127.0.0.1:8094" +# ## ex: address = "tcp6://127.0.0.1:8094" +# ## ex: address = "tcp6://[2001:db8::1]:8094" +# ## ex: address = "udp://127.0.0.1:8094" +# ## ex: address = "udp4://127.0.0.1:8094" +# ## ex: address = "udp6://127.0.0.1:8094" +# address = "tcp://127.0.0.1:8094" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Period between keep alive probes. +# ## Only applies to TCP sockets. +# ## 0 disables keep alive probes. +# ## Defaults to the OS configuration. +# # keep_alive_period = "5m" +# +# ## The framing technique with which it is expected that messages are +# ## transported (default = "octet-counting"). Whether the messages come +# ## using the octect-counting (RFC5425#section-4.3.1, RFC6587#section-3.4.1), +# ## or the non-transparent framing technique (RFC6587#section-3.4.2). Must +# ## be one of "octet-counting", "non-transparent". +# # framing = "octet-counting" +# +# ## The trailer to be expected in case of non-transparent framing (default = "LF"). +# ## Must be one of "LF", or "NUL". +# # trailer = "LF" +# +# ## SD-PARAMs settings +# ## Syslog messages can contain key/value pairs within zero or more +# ## structured data sections. For each unrecognized metric tag/field a +# ## SD-PARAMS is created. +# ## +# ## Example: +# ## [[outputs.syslog]] +# ## sdparam_separator = "_" +# ## default_sdid = "default@32473" +# ## sdids = ["foo@123", "bar@456"] +# ## +# ## input => xyzzy,x=y foo@123_value=42,bar@456_value2=84,something_else=1 +# ## output (structured data only) => [foo@123 value=42][bar@456 value2=84][default@32473 something_else=1 x=y] +# +# ## SD-PARAMs separator between the sdid and tag/field key (default = "_") +# # sdparam_separator = "_" +# +# ## Default sdid used for tags/fields that don't contain a prefix defined in +# ## the explicit sdids setting below If no default is specified, no SD-PARAMs +# ## will be used for unrecognized field. +# # default_sdid = "default@32473" +# +# ## List of explicit prefixes to extract from tag/field keys and use as the +# ## SDID, if they match (see above example for more details): +# # sdids = ["foo@123", "bar@456"] +# +# ## Default severity value. Severity and Facility are used to calculate the +# ## message PRI value (RFC5424#section-6.2.1). Used when no metric field +# ## with key "severity_code" is defined. If unset, 5 (notice) is the default +# # default_severity_code = 5 +# +# ## Default facility value. Facility and Severity are used to calculate the +# ## message PRI value (RFC5424#section-6.2.1). Used when no metric field with +# ## key "facility_code" is defined. If unset, 1 (user-level) is the default +# # default_facility_code = 1 +# +# ## Default APP-NAME value (RFC5424#section-6.2.5) +# ## Used when no metric tag with key "appname" is defined. +# ## If unset, "Telegraf" is the default +# # default_appname = "Telegraf" + + +# # Configuration for sending metrics to Amazon Timestream. +# [[outputs.timestream]] +# ## Amazon Region +# region = "us-east-1" +# +# ## Amazon Credentials +# ## Credentials are loaded in the following order: +# ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified +# ## 2) Assumed credentials via STS if role_arn is specified +# ## 3) explicit credentials from 'access_key' and 'secret_key' +# ## 4) shared profile from 'profile' +# ## 5) environment variables +# ## 6) shared credentials file +# ## 7) EC2 Instance Profile +# #access_key = "" +# #secret_key = "" +# #token = "" +# #role_arn = "" +# #web_identity_token_file = "" +# #role_session_name = "" +# #profile = "" +# #shared_credential_file = "" +# +# ## Endpoint to make request against, the correct endpoint is automatically +# ## determined and this option should only be set if you wish to override the +# ## default. +# ## ex: endpoint_url = "http://localhost:8000" +# # endpoint_url = "" +# +# ## Timestream database where the metrics will be inserted. +# ## The database must exist prior to starting Telegraf. +# database_name = "yourDatabaseNameHere" +# +# ## Specifies if the plugin should describe the Timestream database upon starting +# ## to validate if it has access necessary permissions, connection, etc., as a safety check. +# ## If the describe operation fails, the plugin will not start +# ## and therefore the Telegraf agent will not start. +# describe_database_on_start = false +# +# ## Specifies how the data is organized in Timestream. +# ## Valid values are: single-table, multi-table. +# ## When mapping_mode is set to single-table, all of the data is stored in a single table. +# ## When mapping_mode is set to multi-table, the data is organized and stored in multiple tables. +# ## The default is multi-table. +# mapping_mode = "multi-table" +# +# ## Specifies if the plugin should create the table, if the table does not exist. +# create_table_if_not_exists = true +# +# ## Specifies the Timestream table magnetic store retention period in days. +# ## Check Timestream documentation for more details. +# ## NOTE: This property is valid when create_table_if_not_exists = true. +# create_table_magnetic_store_retention_period_in_days = 365 +# +# ## Specifies the Timestream table memory store retention period in hours. +# ## Check Timestream documentation for more details. +# ## NOTE: This property is valid when create_table_if_not_exists = true. +# create_table_memory_store_retention_period_in_hours = 24 +# +# ## Specifies how the data is written into Timestream. +# ## Valid values are: true, false +# ## When use_multi_measure_records is set to true, all of the tags and fields are stored +# ## as a single row in a Timestream table. +# ## When use_multi_measure_record is set to false, Timestream stores each field in a +# ## separate table row, thereby storing the tags multiple times (once for each field). +# ## The recommended setting is true. +# ## The default is false. +# use_multi_measure_records = "false" +# +# ## Specifies the measure_name to use when sending multi-measure records. +# ## NOTE: This property is valid when use_multi_measure_records=true and mapping_mode=multi-table +# measure_name_for_multi_measure_records = "telegraf_measure" +# +# ## Specifies the name of the table to write data into +# ## NOTE: This property is valid when mapping_mode=single-table. +# # single_table_name = "" +# +# ## Specifies the name of dimension when all of the data is being stored in a single table +# ## and the measurement name is transformed into the dimension value +# ## (see Mapping data from Influx to Timestream for details) +# ## NOTE: This property is valid when mapping_mode=single-table. +# # single_table_dimension_name_for_telegraf_measurement_name = "namespace" +# +# ## Only valid and optional if create_table_if_not_exists = true +# ## Specifies the Timestream table tags. +# ## Check Timestream documentation for more details +# # create_table_tags = { "foo" = "bar", "environment" = "dev"} +# +# ## Specify the maximum number of parallel go routines to ingest/write data +# ## If not specified, defaulted to 1 go routines +# max_write_go_routines = 25 +# +# ## Please see README.md to know how line protocol data is mapped to Timestream +# ## + + +# # Write metrics to Warp 10 +# [[outputs.warp10]] +# # Prefix to add to the measurement. +# prefix = "telegraf." +# +# # URL of the Warp 10 server +# warp_url = "http://localhost:8080" +# +# # Write token to access your app on warp 10 +# token = "Token" +# +# # Warp 10 query timeout +# # timeout = "15s" +# +# ## Print Warp 10 error body +# # print_error_body = false +# +# ## Max string error size +# # max_string_error_size = 511 +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# [[outputs.wavefront]] +# ## Url for Wavefront API or Wavefront proxy instance. +# ## Direct Ingestion via Wavefront API requires authentication. See below. +# url = "https://metrics.wavefront.com" +# +# ## Maximum number of metrics to send per HTTP request. This value should be higher than the `metric_batch_size`. Default is 10,000. Values higher than 40,000 are not recommended. +# # http_maximum_batch_size = 10000 +# +# ## prefix for metrics keys +# # prefix = "my.specific.prefix." +# +# ## whether to use "value" for name of simple fields. default is false +# # simple_fields = false +# +# ## character to use between metric and field name. default is . (dot) +# # metric_separator = "." +# +# ## Convert metric name paths to use metricSeparator character +# ## When true will convert all _ (underscore) characters in final metric name. default is true +# # convert_paths = true +# +# ## Use Strict rules to sanitize metric and tag names from invalid characters +# ## When enabled forward slash (/) and comma (,) will be accepted +# # use_strict = false +# +# ## Use Regex to sanitize metric and tag names from invalid characters +# ## Regex is more thorough, but significantly slower. default is false +# # use_regex = false +# +# ## point tags to use as the source name for Wavefront (if none found, host will be used) +# # source_override = ["hostname", "address", "agent_host", "node_host"] +# +# ## whether to convert boolean values to numeric values, with false -> 0.0 and true -> 1.0. default is true +# # convert_bool = true +# +# ## Truncate metric tags to a total of 254 characters for the tag name value. Wavefront will reject any +# ## data point exceeding this limit if not truncated. Defaults to 'false' to provide backwards compatibility. +# # truncate_tags = false +# +# ## Flush the internal buffers after each batch. This effectively bypasses the background sending of metrics +# ## normally done by the Wavefront SDK. This can be used if you are experiencing buffer overruns. The sending +# ## of metrics will block for a longer time, but this will be handled gracefully by the internal buffering in +# ## Telegraf. +# # immediate_flush = true +# +# ## Send internal metrics (starting with `~sdk.go`) for valid, invalid, and dropped metrics. default is true. +# # send_internal_metrics = true +# +# ## Optional TLS Config +# ## Set to true/false to enforce TLS being enabled/disabled. If not set, +# ## enable TLS only if any of the other options are specified. +# # tls_enable = +# ## Trusted root certificates for server +# # tls_ca = "/path/to/cafile" +# ## Used for TLS client certificate authentication +# # tls_cert = "/path/to/certfile" +# ## Used for TLS client certificate authentication +# # tls_key = "/path/to/keyfile" +# ## Send the specified TLS server name via SNI +# # tls_server_name = "kubernetes.example.com" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## HTTP Timeout +# # timeout="10s" +# +# ## Authentication for Direct Ingestion. +# ## Direct Ingestion requires one of: `token`,`auth_csp_api_token`, or `auth_csp_client_credentials` +# ## See https://docs.wavefront.com/csp_getting_started.html to learn more about using CSP credentials with Wavefront. +# ## Not required if using a Wavefront proxy. +# +# ## Wavefront API Token Authentication. Ignored if using a Wavefront proxy. +# ## 1. Click the gear icon at the top right in the Wavefront UI. +# ## 2. Click your account name (usually your email) +# ## 3. Click *API access*. +# # token = "YOUR_TOKEN" +# +# ## Optional. defaults to "https://console.cloud.vmware.com/" +# ## Ignored if using a Wavefront proxy or a Wavefront API token. +# # auth_csp_base_url=https://console.cloud.vmware.com +# +# ## CSP API Token Authentication for Wavefront. Ignored if using a Wavefront proxy. +# # auth_csp_api_token=CSP_API_TOKEN_HERE +# +# ## CSP Client Credentials Authentication Information for Wavefront. Ignored if using a Wavefront proxy. +# ## See also: https://docs.wavefront.com/csp_getting_started.html#whats-a-server-to-server-app +# # [outputs.wavefront.auth_csp_client_credentials] +# # app_id=CSP_APP_ID_HERE +# # app_secret=CSP_APP_SECRET_HERE +# # org_id=CSP_ORG_ID_HERE + + +# # A plugin that can transmit metrics over WebSocket. +# [[outputs.websocket]] +# ## URL is the address to send metrics to. Make sure ws or wss scheme is used. +# url = "ws://127.0.0.1:3000/telegraf" +# +# ## Timeouts (make sure read_timeout is larger than server ping interval or set to zero). +# # connect_timeout = "30s" +# # write_timeout = "30s" +# # read_timeout = "30s" +# +# ## Optionally turn on using text data frames (binary by default). +# # use_text_frames = false +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Optional SOCKS5 proxy to use +# # socks5_enabled = true +# # socks5_address = "127.0.0.1:1080" +# # socks5_username = "alice" +# # socks5_password = "pass123" +# +# ## Optional HTTP proxy to use +# # use_system_proxy = false +# # http_proxy_url = "http://localhost:8888" +# +# ## Data format to output. +# ## Each data format has it's own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# # data_format = "influx" +# +# ## NOTE: Due to the way TOML is parsed, tables must be at the END of the +# ## plugin definition, otherwise additional config options are read as part of +# ## the table +# +# ## Additional HTTP Upgrade headers +# # [outputs.websocket.headers] +# # Authorization = "Bearer " + + +# # Send aggregated metrics to Yandex.Cloud Monitoring +# [[outputs.yandex_cloud_monitoring]] +# ## Timeout for HTTP writes. +# # timeout = "20s" +# +# ## Yandex.Cloud monitoring API endpoint. Normally should not be changed +# # endpoint_url = "https://monitoring.api.cloud.yandex.net/monitoring/v2/data/write" +# +# ## All user metrics should be sent with "custom" service specified. Normally should not be changed +# # service = "custom" + + +############################################################################### +# PROCESSOR PLUGINS # +############################################################################### + + +# # Attach AWS EC2 metadata to metrics +# [[processors.aws_ec2]] +# ## Instance identity document tags to attach to metrics. +# ## For more information see: +# ## https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-identity-documents.html +# ## +# ## Available tags: +# ## * accountId +# ## * architecture +# ## * availabilityZone +# ## * billingProducts +# ## * imageId +# ## * instanceId +# ## * instanceType +# ## * kernelId +# ## * pendingTime +# ## * privateIp +# ## * ramdiskId +# ## * region +# ## * version +# imds_tags = [] +# +# ## EC2 instance tags retrieved with DescribeTags action. +# ## In case tag is empty upon retrieval it's omitted when tagging metrics. +# ## Note that in order for this to work, role attached to EC2 instance or AWS +# ## credentials available from the environment must have a policy attached, that +# ## allows ec2:DescribeTags. +# ## +# ## For more information see: +# ## https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeTags.html +# ec2_tags = [] +# +# ## Timeout for http requests made by against aws ec2 metadata endpoint. +# timeout = "10s" +# +# ## ordered controls whether or not the metrics need to stay in the same order +# ## this plugin received them in. If false, this plugin will change the order +# ## with requests hitting cached results moving through immediately and not +# ## waiting on slower lookups. This may cause issues for you if you are +# ## depending on the order of metrics staying the same. If so, set this to true. +# ## Keeping the metrics ordered may be slightly slower. +# ordered = false +# +# ## max_parallel_calls is the maximum number of AWS API calls to be in flight +# ## at the same time. +# ## It's probably best to keep this number fairly low. +# max_parallel_calls = 10 +# +# ## cache_ttl determines how long each cached item will remain in the cache before +# ## it is removed and subsequently needs to be queried for from the AWS API. By +# ## default, no items are cached. +# # cache_ttl = "0s" +# +# ## tag_cache_size determines how many of the values which are found in imds_tags +# ## or ec2_tags will be kept in memory for faster lookup on successive processing +# ## of metrics. You may want to adjust this if you have excessively large numbers +# ## of tags on your EC2 instances, and you are using the ec2_tags field. This +# ## typically does not need to be changed when using the imds_tags field. +# # tag_cache_size = 1000 +# +# ## log_cache_stats will emit a log line periodically to stdout with details of +# ## cache entries, hits, misses, and evacuations since the last time stats were +# ## emitted. This can be helpful in determining whether caching is being effective +# ## in your environment. Stats are emitted every 30 seconds. By default, this +# ## setting is disabled. + + +# # Apply metric modifications using override semantics. +# [[processors.clone]] +# ## All modifications on inputs and aggregators can be overridden: +# # name_override = "new_name" +# # name_prefix = "new_name_prefix" +# # name_suffix = "new_name_suffix" +# +# ## Tags to be added (all values must be strings) +# # [processors.clone.tags] +# # additional_tag = "tag_value" + + +# # Convert values to another metric value type +# [[processors.converter]] +# ## Tags to convert +# ## +# ## The table key determines the target type, and the array of key-values +# ## select the keys to convert. The array may contain globs. +# ## = [...] +# [processors.converter.tags] +# measurement = [] +# string = [] +# integer = [] +# unsigned = [] +# boolean = [] +# float = [] +# +# ## Optional tag to use as metric timestamp +# # timestamp = [] +# +# ## Format of the timestamp determined by the tag above. This can be any of +# ## "unix", "unix_ms", "unix_us", "unix_ns", or a valid Golang time format. +# ## It is required, when using the timestamp option. +# # timestamp_format = "" +# +# ## Fields to convert +# ## +# ## The table key determines the target type, and the array of key-values +# ## select the keys to convert. The array may contain globs. +# ## = [...] +# [processors.converter.fields] +# measurement = [] +# tag = [] +# string = [] +# integer = [] +# unsigned = [] +# boolean = [] +# float = [] +# +# ## Optional field to use as metric timestamp +# # timestamp = [] +# +# ## Format of the timestamp determined by the field above. This can be any +# ## of "unix", "unix_ms", "unix_us", "unix_ns", or a valid Golang time +# ## format. It is required, when using the timestamp option. +# # timestamp_format = "" + + +# # Dates measurements, tags, and fields that pass through this filter. +# [[processors.date]] +# ## New tag to create +# tag_key = "month" +# +# ## New field to create (cannot set both field_key and tag_key) +# # field_key = "month" +# +# ## Date format string, must be a representation of the Go "reference time" +# ## which is "Mon Jan 2 15:04:05 -0700 MST 2006". +# date_format = "Jan" +# +# ## If destination is a field, date format can also be one of +# ## "unix", "unix_ms", "unix_us", or "unix_ns", which will insert an integer field. +# # date_format = "unix" +# +# ## Offset duration added to the date string when writing the new tag. +# # date_offset = "0s" +# +# ## Timezone to use when creating the tag or field using a reference time +# ## string. This can be set to one of "UTC", "Local", or to a location name +# ## in the IANA Time Zone database. +# ## example: timezone = "America/Los_Angeles" +# # timezone = "UTC" + + +# # Filter metrics with repeating field values +# [[processors.dedup]] +# ## Maximum time to suppress output +# dedup_interval = "600s" + + +# ## Set default fields on your metric(s) when they are nil or empty +# [[processors.defaults]] +# ## Ensures a set of fields always exists on your metric(s) with their +# ## respective default value. +# ## For any given field pair (key = default), if it's not set, a field +# ## is set on the metric with the specified default. +# ## +# ## A field is considered not set if it is nil on the incoming metric; +# ## or it is not nil but its value is an empty string or is a string +# ## of one or more spaces. +# ## = +# [processors.defaults.fields] +# field_1 = "bar" +# time_idle = 0 +# is_error = true + + +# # Map enum values according to given table. +# [[processors.enum]] +# [[processors.enum.mapping]] +# ## Name of the field to map. Globs accepted. +# field = "status" +# +# ## Name of the tag to map. Globs accepted. +# # tag = "status" +# +# ## Destination tag or field to be used for the mapped value. By default the +# ## source tag or field is used, overwriting the original value. +# dest = "status_code" +# +# ## Default value to be used for all values not contained in the mapping +# ## table. When unset and no match is found, the original field will remain +# ## unmodified and the destination tag or field will not be created. +# # default = 0 +# +# ## Table of mappings +# [processors.enum.mapping.value_mappings] +# green = 1 +# amber = 2 +# red = 3 + + +# # Run executable as long-running processor plugin +# [[processors.execd]] +# ## One program to run as daemon. +# ## NOTE: process and each argument should each be their own string +# ## eg: command = ["/path/to/your_program", "arg1", "arg2"] +# command = ["cat"] +# +# ## Environment variables +# ## Array of "key=value" pairs to pass as environment variables +# ## e.g. "KEY=value", "USERNAME=John Doe", +# ## "LD_LIBRARY_PATH=/opt/custom/lib64:/usr/local/libs" +# # environment = [] +# +# ## Delay before the process is restarted after an unexpected termination +# # restart_delay = "10s" +# +# ## Serialization format for communicating with the executed program +# ## Please note that the corresponding data-format must exist both in +# ## parsers and serializers +# # data_format = "influx" + + +# # Performs file path manipulations on tags and fields +# [[processors.filepath]] +# ## Treat the tag value as a path and convert it to its last element, storing the result in a new tag +# # [[processors.filepath.basename]] +# # tag = "path" +# # dest = "basepath" +# +# ## Treat the field value as a path and keep all but the last element of path, typically the path's directory +# # [[processors.filepath.dirname]] +# # field = "path" +# +# ## Treat the tag value as a path, converting it to its the last element without its suffix +# # [[processors.filepath.stem]] +# # tag = "path" +# +# ## Treat the tag value as a path, converting it to the shortest path name equivalent +# ## to path by purely lexical processing +# # [[processors.filepath.clean]] +# # tag = "path" +# +# ## Treat the tag value as a path, converting it to a relative path that is lexically +# ## equivalent to the source path when joined to 'base_path' +# # [[processors.filepath.rel]] +# # tag = "path" +# # base_path = "/var/log" +# +# ## Treat the tag value as a path, replacing each separator character in path with a '/' character. Has only +# ## effect on Windows +# # [[processors.filepath.toslash]] +# # tag = "path" + + +# # Add a tag of the network interface name looked up over SNMP by interface number +# [[processors.ifname]] +# ## Name of tag holding the interface number +# # tag = "ifIndex" +# +# ## Name of output tag where service name will be added +# # dest = "ifName" +# +# ## Name of tag of the SNMP agent to request the interface name from +# # agent = "agent" +# +# ## Timeout for each request. +# # timeout = "5s" +# +# ## SNMP version; can be 1, 2, or 3. +# # version = 2 +# +# ## SNMP community string. +# # community = "public" +# +# ## Number of retries to attempt. +# # retries = 3 +# +# ## The GETBULK max-repetitions parameter. +# # max_repetitions = 10 +# +# ## SNMPv3 authentication and encryption options. +# ## +# ## Security Name. +# # sec_name = "myuser" +# ## Authentication protocol; one of "MD5", "SHA", or "". +# # auth_protocol = "MD5" +# ## Authentication password. +# # auth_password = "pass" +# ## Security Level; one of "noAuthNoPriv", "authNoPriv", or "authPriv". +# # sec_level = "authNoPriv" +# ## Context Name. +# # context_name = "" +# ## Privacy protocol used for encrypted messages; one of "DES", "AES" or "". +# # priv_protocol = "" +# ## Privacy password used for encrypted messages. +# # priv_password = "" +# +# ## max_parallel_lookups is the maximum number of SNMP requests to +# ## make at the same time. +# # max_parallel_lookups = 100 +# +# ## ordered controls whether or not the metrics need to stay in the +# ## same order this plugin received them in. If false, this plugin +# ## may change the order when data is cached. If you need metrics to +# ## stay in order set this to true. keeping the metrics ordered may +# ## be slightly slower +# # ordered = false +# +# ## cache_ttl is the amount of time interface names are cached for a +# ## given agent. After this period elapses if names are needed they +# ## will be retrieved again. +# # cache_ttl = "8h" + + +# # Lookup a key derived from metrics in a static file +# [[processors.lookup]] +# ## List of files containing the lookup-table +# files = ["path/to/lut.json", "path/to/another_lut.json"] +# +# ## Format of the lookup file(s) +# ## Available formats are: +# ## json -- JSON file with 'key: {tag-key: tag-value, ...}' mapping +# ## csv_key_name_value -- CSV file with 'key,tag-key,tag-value,...,tag-key,tag-value' mapping +# ## csv_key_values -- CSV file with a header containing tag-names and +# ## rows with 'key,tag-value,...,tag-value' mappings +# # format = "json" +# +# ## Template for generating the lookup-key from the metric. +# ## This is a Golang template (see https://pkg.go.dev/text/template) to +# ## access the metric name (`{{.Name}}`), a tag value (`{{.Tag "name"}}`) or +# ## a field value (`{{.Field "name"}}`). +# key = '{{.Tag "host"}}' + + +# # Adds noise to numerical fields +# [[processors.noise]] +# ## Specified the type of the random distribution. +# ## Can be "laplacian", "gaussian" or "uniform". +# # type = "laplacian +# +# ## Center of the distribution. +# ## Only used for Laplacian and Gaussian distributions. +# # mu = 0.0 +# +# ## Scale parameter for the Laplacian or Gaussian distribution +# # scale = 1.0 +# +# ## Upper and lower bound of the Uniform distribution +# # min = -1.0 +# # max = 1.0 +# +# ## Apply the noise only to numeric fields matching the filter criteria below. +# ## Excludes takes precedence over includes. +# # include_fields = [] +# # exclude_fields = [] + + +# # Apply metric modifications using override semantics. +# [[processors.override]] +# ## All modifications on inputs and aggregators can be overridden: +# # name_override = "new_name" +# # name_prefix = "new_name_prefix" +# # name_suffix = "new_name_suffix" +# +# ## Tags to be added (all values must be strings) +# # [processors.override.tags] +# # additional_tag = "tag_value" + + +# # Parse a value in a specified field(s)/tag(s) and add the result in a new metric +# [[processors.parser]] +# ## The name of the fields whose value will be parsed. +# parse_fields = ["message"] +# +# ## The name of the tags whose value will be parsed. +# # parse_tags = [] +# +# ## If true, incoming metrics are not emitted. +# # drop_original = false +# +# ## Merge Behavior +# ## Only has effect when drop_original is set to false. Possible options +# ## include: +# ## * override: emitted metrics are merged by overriding the original metric +# ## using the newly parsed metrics, but retains the original metric +# ## timestamp. +# ## * override-with-timestamp: the same as "override", but the timestamp is +# ## set based on the new metrics if present. +# # merge = "" +# +# ## The dataformat to be read from files +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Rotate a single valued metric into a multi field metric +# [[processors.pivot]] +# ## Tag to use for naming the new field. +# tag_key = "name" +# ## Field to use as the value of the new field. +# value_key = "value" + + +# # Given a tag/field of a TCP or UDP port number, add a tag/field of the service name looked up in the system services file +# [[processors.port_name]] +# ## Name of tag holding the port number +# # tag = "port" +# ## Or name of the field holding the port number +# # field = "port" +# +# ## Name of output tag or field (depending on the source) where service name will be added +# # dest = "service" +# +# ## Default tcp or udp +# # default_protocol = "tcp" +# +# ## Tag containing the protocol (tcp or udp, case-insensitive) +# # protocol_tag = "proto" +# +# ## Field containing the protocol (tcp or udp, case-insensitive) +# # protocol_field = "proto" + + +# # Print all metrics that pass through this filter. +# [[processors.printer]] + + +# # Transforms tag and field values as well as measurement, tag and field names with regex pattern +# [[processors.regex]] +# namepass = ["nginx_requests"] +# +# # Tag and field conversions defined in a separate sub-tables +# [[processors.regex.tags]] +# ## Tag to change, "*" will change every tag +# key = "resp_code" +# ## Regular expression to match on a tag value +# pattern = "^(\\d)\\d\\d$" +# ## Matches of the pattern will be replaced with this string. Use ${1} +# ## notation to use the text of the first submatch. +# replacement = "${1}xx" +# +# [[processors.regex.fields]] +# ## Field to change +# key = "request" +# ## All the power of the Go regular expressions available here +# ## For example, named subgroups +# pattern = "^/api(?P/[\\w/]+)\\S*" +# replacement = "${method}" +# ## If result_key is present, a new field will be created +# ## instead of changing existing field +# result_key = "method" +# +# # Multiple conversions may be applied for one field sequentially +# # Let's extract one more value +# [[processors.regex.fields]] +# key = "request" +# pattern = ".*category=(\\w+).*" +# replacement = "${1}" +# result_key = "search_category" +# +# # Rename metric fields +# [[processors.regex.field_rename]] +# ## Regular expression to match on a field name +# pattern = "^search_(\\w+)d$" +# ## Matches of the pattern will be replaced with this string. Use ${1} +# ## notation to use the text of the first submatch. +# replacement = "${1}" +# ## If the new field name already exists, you can either "overwrite" the +# ## existing one with the value of the renamed field OR you can "keep" +# ## both the existing and source field. +# # result_key = "keep" +# +# # Rename metric tags +# # [[processors.regex.tag_rename]] +# # ## Regular expression to match on a tag name +# # pattern = "^search_(\\w+)d$" +# # ## Matches of the pattern will be replaced with this string. Use ${1} +# # ## notation to use the text of the first submatch. +# # replacement = "${1}" +# # ## If the new tag name already exists, you can either "overwrite" the +# # ## existing one with the value of the renamed tag OR you can "keep" +# # ## both the existing and source tag. +# # # result_key = "keep" +# +# # Rename metrics +# # [[processors.regex.metric_rename]] +# # ## Regular expression to match on an metric name +# # pattern = "^search_(\\w+)d$" +# # ## Matches of the pattern will be replaced with this string. Use ${1} +# # ## notation to use the text of the first submatch. +# # replacement = "${1}" + + +# # Rename measurements, tags, and fields that pass through this filter. +# [[processors.rename]] +# ## Specify one sub-table per rename operation. +# [[processors.rename.replace]] +# measurement = "network_interface_throughput" +# dest = "throughput" +# +# [[processors.rename.replace]] +# tag = "hostname" +# dest = "host" +# +# [[processors.rename.replace]] +# field = "lower" +# dest = "min" +# +# [[processors.rename.replace]] +# field = "upper" +# dest = "max" + + +# # ReverseDNS does a reverse lookup on IP addresses to retrieve the DNS name +# [[processors.reverse_dns]] +# ## For optimal performance, you may want to limit which metrics are passed to this +# ## processor. eg: +# ## namepass = ["my_metric_*"] +# +# ## cache_ttl is how long the dns entries should stay cached for. +# ## generally longer is better, but if you expect a large number of diverse lookups +# ## you'll want to consider memory use. +# cache_ttl = "24h" +# +# ## lookup_timeout is how long should you wait for a single dns request to repsond. +# ## this is also the maximum acceptable latency for a metric travelling through +# ## the reverse_dns processor. After lookup_timeout is exceeded, a metric will +# ## be passed on unaltered. +# ## multiple simultaneous resolution requests for the same IP will only make a +# ## single rDNS request, and they will all wait for the answer for this long. +# lookup_timeout = "3s" +# +# ## max_parallel_lookups is the maximum number of dns requests to be in flight +# ## at the same time. Requesting hitting cached values do not count against this +# ## total, and neither do mulptiple requests for the same IP. +# ## It's probably best to keep this number fairly low. +# max_parallel_lookups = 10 +# +# ## ordered controls whether or not the metrics need to stay in the same order +# ## this plugin received them in. If false, this plugin will change the order +# ## with requests hitting cached results moving through immediately and not +# ## waiting on slower lookups. This may cause issues for you if you are +# ## depending on the order of metrics staying the same. If so, set this to true. +# ## keeping the metrics ordered may be slightly slower. +# ordered = false +# +# [[processors.reverse_dns.lookup]] +# ## get the ip from the field "source_ip", and put the result in the field "source_name" +# field = "source_ip" +# dest = "source_name" +# +# [[processors.reverse_dns.lookup]] +# ## get the ip from the tag "destination_ip", and put the result in the tag +# ## "destination_name". +# tag = "destination_ip" +# dest = "destination_name" +# +# ## If you would prefer destination_name to be a field instead, you can use a +# ## processors.converter after this one, specifying the order attribute. + + +# # Add the S2 Cell ID as a tag based on latitude and longitude fields +# [[processors.s2geo]] +# ## The name of the lat and lon fields containing WGS-84 latitude and +# ## longitude in decimal degrees. +# # lat_field = "lat" +# # lon_field = "lon" +# +# ## New tag to create +# # tag_key = "s2_cell_id" +# +# ## Cell level (see https://s2geometry.io/resources/s2cell_statistics.html) +# # cell_level = 9 + + +# # Scale values with a predefined range to a different output range. +# [[processors.scale]] +# ## It is possible to define multiple different scaling that can be applied +# ## do different sets of fields. Each scaling expects the following +# ## arguments: +# ## - input_minimum: Minimum expected input value +# ## - input_maximum: Maximum expected input value +# ## - output_minimum: Minimum desired output value +# ## - output_maximum: Maximum desired output value +# ## alternatively you can specify a scaling with factor and offset +# ## - factor: factor to scale the input value with +# ## - offset: additive offset for value after scaling +# ## - fields: a list of field names (or filters) to apply this scaling to +# +# ## Example: Scaling with minimum and maximum values +# # [processors.scale.scaling] +# # input_minimum = 0 +# # input_maximum = 1 +# # output_minimum = 0 +# # output_maximum = 100 +# # fields = ["temperature1", "temperature2"] +# +# ## Example: Scaling with factor and offset +# # [processors.scale.scaling] +# # factor = 10.0 +# # offset = -5.0 +# # fields = ["voltage*"] + + +# # Split a metric into one or more metrics with the specified field(s)/tag(s) +# [[processors.split]] +# ## Keeps the original metric by default +# # drop_original = false +# +# ## Template for an output metric +# ## Users can define multiple templates to split the original metric into +# ## multiple, potentially overlapping, metrics. +# [[processors.split.template]] +# ## New metric name +# name = "" +# +# ## List of tag keys for this metric template, accepts globs, e.g. "*" +# tags = [] +# +# ## List of field keys for this metric template, accepts globs, e.g. "*" +# fields = [] + + +# # Process metrics using a Starlark script +# [[processors.starlark]] +# ## The Starlark source can be set as a string in this configuration file, or +# ## by referencing a file containing the script. Only one source or script +# ## should be set at once. +# +# ## Source of the Starlark script. +# source = ''' +# def apply(metric): +# return metric +# ''' +# +# ## File containing a Starlark script. +# # script = "/usr/local/bin/myscript.star" +# +# ## The constants of the Starlark script. +# # [processors.starlark.constants] +# # max_size = 10 +# # threshold = 0.75 +# # default_name = "Julia" +# # debug_mode = true + + +# # Perform string processing on tags, fields, and measurements +# [[processors.strings]] +# ## Convert a field value to lowercase and store in a new field +# # [[processors.strings.lowercase]] +# # field = "uri_stem" +# # dest = "uri_stem_normalised" +# +# ## Convert a tag value to uppercase +# # [[processors.strings.uppercase]] +# # tag = "method" +# +# ## Convert a field value to titlecase +# # [[processors.strings.titlecase]] +# # field = "status" +# +# ## Trim leading and trailing whitespace using the default cutset +# # [[processors.strings.trim]] +# # field = "message" +# +# ## Trim leading characters in cutset +# # [[processors.strings.trim_left]] +# # field = "message" +# # cutset = "\t" +# +# ## Trim trailing characters in cutset +# # [[processors.strings.trim_right]] +# # field = "message" +# # cutset = "\r\n" +# +# ## Trim the given prefix from the field +# # [[processors.strings.trim_prefix]] +# # field = "my_value" +# # prefix = "my_" +# +# ## Trim the given suffix from the field +# # [[processors.strings.trim_suffix]] +# # field = "read_count" +# # suffix = "_count" +# +# ## Replace all non-overlapping instances of old with new +# # [[processors.strings.replace]] +# # measurement = "*" +# # old = ":" +# # new = "_" +# +# ## Trims strings based on width +# # [[processors.strings.left]] +# # field = "message" +# # width = 10 +# +# ## Decode a base64 encoded utf-8 string +# # [[processors.strings.base64decode]] +# # field = "message" +# +# ## Sanitize a string to ensure it is a valid utf-8 string +# ## Each run of invalid UTF-8 byte sequences is replaced by the replacement string, which may be empty +# # [[processors.strings.valid_utf8]] +# # field = "message" +# # replacement = "" + + +# # Restricts the number of tags that can pass through this filter and chooses which tags to preserve when over the limit. +# [[processors.tag_limit]] +# ## Maximum number of tags to preserve +# limit = 3 +# +# ## List of tags to preferentially preserve +# keep = ["environment", "region"] + + +# # Uses a Go template to create a new tag +# [[processors.template]] +# ## Go template used to create the tag name of the output. In order to +# ## ease TOML escaping requirements, you should use single quotes around +# ## the template string. +# tag = "topic" +# +# ## Go template used to create the tag value of the output. In order to +# ## ease TOML escaping requirements, you should use single quotes around +# ## the template string. +# template = '{{ .Tag "hostname" }}.{{ .Tag "level" }}' + + +# # Print all metrics that pass through this filter. +# [[processors.topk]] +# ## How many seconds between aggregations +# # period = 10 +# +# ## How many top buckets to return per field +# ## Every field specified to aggregate over will return k number of results. +# ## For example, 1 field with k of 10 will return 10 buckets. While 2 fields +# ## with k of 3 will return 6 buckets. +# # k = 10 +# +# ## Over which tags should the aggregation be done. Globs can be specified, in +# ## which case any tag matching the glob will aggregated over. If set to an +# ## empty list is no aggregation over tags is done +# # group_by = ['*'] +# +# ## The field(s) to aggregate +# ## Each field defined is used to create an independent aggregation. Each +# ## aggregation will return k buckets. If a metric does not have a defined +# ## field the metric will be dropped from the aggregation. Considering using +# ## the defaults processor plugin to ensure fields are set if required. +# # fields = ["value"] +# +# ## What aggregation function to use. Options: sum, mean, min, max +# # aggregation = "mean" +# +# ## Instead of the top k largest metrics, return the bottom k lowest metrics +# # bottomk = false +# +# ## The plugin assigns each metric a GroupBy tag generated from its name and +# ## tags. If this setting is different than "" the plugin will add a +# ## tag (which name will be the value of this setting) to each metric with +# ## the value of the calculated GroupBy tag. Useful for debugging +# # add_groupby_tag = "" +# +# ## These settings provide a way to know the position of each metric in +# ## the top k. The 'add_rank_field' setting allows to specify for which +# ## fields the position is required. If the list is non empty, then a field +# ## will be added to each and every metric for each string present in this +# ## setting. This field will contain the ranking of the group that +# ## the metric belonged to when aggregated over that field. +# ## The name of the field will be set to the name of the aggregation field, +# ## suffixed with the string '_topk_rank' +# # add_rank_fields = [] +# +# ## These settings provide a way to know what values the plugin is generating +# ## when aggregating metrics. The 'add_aggregate_field' setting allows to +# ## specify for which fields the final aggregation value is required. If the +# ## list is non empty, then a field will be added to each every metric for +# ## each field present in this setting. This field will contain +# ## the computed aggregation for the group that the metric belonged to when +# ## aggregated over that field. +# ## The name of the field will be set to the name of the aggregation field, +# ## suffixed with the string '_topk_aggregate' +# # add_aggregate_fields = [] + + +# # Rotate multi field metric into several single field metrics +# [[processors.unpivot]] +# ## Metric mode to pivot to +# ## Set to "tag", metrics are pivoted as a tag and the metric is kept as +# ## the original measurement name. Tag key name is set by tag_key value. +# ## Set to "metric" creates a new metric named the field name. With this +# ## option the tag_key is ignored. Be aware that this could lead to metric +# ## name conflicts! +# # use_fieldname_as = "tag" +# +# ## Tag to use for the name. +# # tag_key = "name" +# +# ## Field to use for the name of the value. +# # value_key = "value" + + +############################################################################### +# AGGREGATOR PLUGINS # +############################################################################### + + +# # Keep the aggregate basicstats of each metric passing through. +# [[aggregators.basicstats]] +# ## The period on which to flush & clear the aggregator. +# period = "30s" +# +# ## If true, the original metric will be dropped by the +# ## aggregator and will not get sent to the output plugins. +# drop_original = false +# +# ## Configures which basic stats to push as fields +# # stats = ["count","diff","rate","min","max","mean","non_negative_diff","non_negative_rate","percent_change","stdev","s2","sum","interval"] + + +# # Calculates a derivative for every field. +# [[aggregators.derivative]] +# ## The period in which to flush the aggregator. +# period = "30s" +# ## +# ## Suffix to append for the resulting derivative field. +# # suffix = "_rate" +# ## +# ## Field to use for the quotient when computing the derivative. +# ## When using a field as the derivation parameter the name of that field will +# ## be used for the resulting derivative, e.g. *fieldname_by_parameter*. +# ## By default the timestamps of the metrics are used and the suffix is omitted. +# # variable = "" +# ## +# ## Maximum number of roll-overs in case only one measurement is found during a period. +# # max_roll_over = 10 + + +# # Report the final metric of a series +# [[aggregators.final]] +# ## The period on which to flush & clear the aggregator. +# period = "30s" +# ## If true, the original metric will be dropped by the +# ## aggregator and will not get sent to the output plugins. +# drop_original = false +# +# ## The time that a series is not updated until considering it final. +# series_timeout = "5m" + + +# # Configuration for aggregate histogram metrics +# [[aggregators.histogram]] +# ## The period in which to flush the aggregator. +# period = "30s" +# +# ## If true, the original metric will be dropped by the +# ## aggregator and will not get sent to the output plugins. +# drop_original = false +# +# ## If true, the histogram will be reset on flush instead +# ## of accumulating the results. +# reset = false +# +# ## Whether bucket values should be accumulated. If set to false, "gt" tag will be added. +# ## Defaults to true. +# cumulative = true +# +# ## Expiration interval for each histogram. The histogram will be expired if +# ## there are no changes in any buckets for this time interval. 0 == no expiration. +# # expiration_interval = "0m" +# +# ## If true, aggregated histogram are pushed to output only if it was updated since +# ## previous push. Defaults to false. +# # push_only_on_update = false +# +# ## Example config that aggregates all fields of the metric. +# # [[aggregators.histogram.config]] +# # ## Right borders of buckets (with +Inf implicitly added). +# # buckets = [0.0, 15.6, 34.5, 49.1, 71.5, 80.5, 94.5, 100.0] +# # ## The name of metric. +# # measurement_name = "cpu" +# +# ## Example config that aggregates only specific fields of the metric. +# # [[aggregators.histogram.config]] +# # ## Right borders of buckets (with +Inf implicitly added). +# # buckets = [0.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0] +# # ## The name of metric. +# # measurement_name = "diskio" +# # ## The concrete fields of metric +# # fields = ["io_time", "read_time", "write_time"] + + +# # Merge metrics into multifield metrics by series key +# [[aggregators.merge]] +# ## If true, the original metric will be dropped by the +# ## aggregator and will not get sent to the output plugins. +# drop_original = true + + +# # Keep the aggregate min/max of each metric passing through. +# [[aggregators.minmax]] +# ## General Aggregator Arguments: +# ## The period on which to flush & clear the aggregator. +# period = "30s" +# ## If true, the original metric will be dropped by the +# ## aggregator and will not get sent to the output plugins. +# drop_original = false + + +# # Keep the aggregate quantiles of each metric passing through. +# [[aggregators.quantile]] +# ## General Aggregator Arguments: +# ## The period on which to flush & clear the aggregator. +# period = "30s" +# +# ## If true, the original metric will be dropped by the +# ## aggregator and will not get sent to the output plugins. +# drop_original = false +# +# ## Quantiles to output in the range [0,1] +# # quantiles = [0.25, 0.5, 0.75] +# +# ## Type of aggregation algorithm +# ## Supported are: +# ## "t-digest" -- approximation using centroids, can cope with large number of samples +# ## "exact R7" -- exact computation also used by Excel or NumPy (Hyndman & Fan 1996 R7) +# ## "exact R8" -- exact computation (Hyndman & Fan 1996 R8) +# ## NOTE: Do not use "exact" algorithms with large number of samples +# ## to not impair performance or memory consumption! +# # algorithm = "t-digest" +# +# ## Compression for approximation (t-digest). The value needs to be +# ## greater or equal to 1.0. Smaller values will result in more +# ## performance but less accuracy. +# # compression = 100.0 + + +# # Aggregate metrics using a Starlark script +# [[aggregators.starlark]] +# ## The Starlark source can be set as a string in this configuration file, or +# ## by referencing a file containing the script. Only one source or script +# ## should be set at once. +# ## +# ## Source of the Starlark script. +# source = ''' +# state = {} +# +# def add(metric): +# state["last"] = metric +# +# def push(): +# return state.get("last") +# +# def reset(): +# state.clear() +# ''' +# +# ## File containing a Starlark script. +# # script = "/usr/local/bin/myscript.star" +# +# ## The constants of the Starlark script. +# # [aggregators.starlark.constants] +# # max_size = 10 +# # threshold = 0.75 +# # default_name = "Julia" +# # debug_mode = true + + +# # Count the occurrence of values in fields. +# [[aggregators.valuecounter]] +# ## General Aggregator Arguments: +# ## The period on which to flush & clear the aggregator. +# period = "30s" +# ## If true, the original metric will be dropped by the +# ## aggregator and will not get sent to the output plugins. +# drop_original = false +# ## The fields for which the values will be counted +# fields = ["status"] + + +############################################################################### +# INPUT PLUGINS # +############################################################################### + + +# Read metrics about cpu usage +[[inputs.cpu]] + ## Whether to report per-cpu stats or not + percpu = true + ## Whether to report total system cpu stats or not + totalcpu = true + ## If true, collect raw CPU time metrics + collect_cpu_time = false + ## If true, compute and report the sum of all non-idle CPU states + report_active = false + ## If true and the info is available then add core_id and physical_id tags + core_tags = false + + +# Read metrics about disk usage by mount point +[[inputs.disk]] + ## By default stats will be gathered for all mount points. + ## Set mount_points will restrict the stats to only the specified mount points. + # mount_points = ["/"] + + ## Ignore mount points by filesystem type. + ignore_fs = ["tmpfs", "devtmpfs", "devfs", "iso9660", "overlay", "aufs", "squashfs"] + + ## Ignore mount points by mount options. + ## The 'mount' command reports options of all mounts in parathesis. + ## Bind mounts can be ignored with the special 'bind' option. + # ignore_mount_opts = [] + + +# Read metrics about disk IO by device +[[inputs.diskio]] + ## By default, telegraf will gather stats for all devices including + ## disk partitions. + ## Setting devices will restrict the stats to the specified devices. + ## NOTE: Globbing expressions (e.g. asterix) are not supported for + ## disk synonyms like '/dev/disk/by-id'. + # devices = ["sda", "sdb", "vd*", "/dev/disk/by-id/nvme-eui.00123deadc0de123"] + ## Uncomment the following line if you need disk serial numbers. + # skip_serial_number = false + # + ## On systems which support it, device metadata can be added in the form of + ## tags. + ## Currently only Linux is supported via udev properties. You can view + ## available properties for a device by running: + ## 'udevadm info -q property -n /dev/sda' + ## Note: Most, but not all, udev properties can be accessed this way. Properties + ## that are currently inaccessible include DEVTYPE, DEVNAME, and DEVPATH. + # device_tags = ["ID_FS_TYPE", "ID_FS_USAGE"] + # + ## Using the same metadata source as device_tags, you can also customize the + ## name of the device via templates. + ## The 'name_templates' parameter is a list of templates to try and apply to + ## the device. The template may contain variables in the form of '$PROPERTY' or + ## '${PROPERTY}'. The first template which does not contain any variables not + ## present for the device is used as the device name tag. + ## The typical use case is for LVM volumes, to get the VG/LV name instead of + ## the near-meaningless DM-0 name. + # name_templates = ["$ID_FS_LABEL","$DM_VG_NAME/$DM_LV_NAME"] + + +# Plugin to collect various Linux kernel statistics. +# This plugin ONLY supports Linux +[[inputs.kernel]] + ## Additional gather options + ## Possible options include: + ## * ksm - kernel same-page merging + # collect = [] + + +# Read metrics about memory usage +[[inputs.mem]] + # no configuration + + +# Get the number of processes and group them by status +# This plugin ONLY supports non-Windows +[[inputs.processes]] + ## Use sudo to run ps command on *BSD systems. Linux systems will read + ## /proc, so this does not apply there. + # use_sudo = false + + +# Read metrics about swap memory usage +[[inputs.swap]] + # no configuration + + +# Read metrics about system load & uptime +[[inputs.system]] + # no configuration + + +# # Gather ActiveMQ metrics +# [[inputs.activemq]] +# ## ActiveMQ WebConsole URL +# url = "http://127.0.0.1:8161" +# +# ## Required ActiveMQ Endpoint +# ## deprecated in 1.11; use the url option +# # server = "192.168.50.10" +# # port = 8161 +# +# ## Credentials for basic HTTP authentication +# # username = "admin" +# # password = "admin" +# +# ## Required ActiveMQ webadmin root path +# # webadmin = "admin" +# +# ## Maximum time to receive response. +# # response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read stats from aerospike server(s) +# [[inputs.aerospike]] +# ## Aerospike servers to connect to (with port) +# ## This plugin will query all namespaces the aerospike +# ## server has configured and get stats for them. +# servers = ["localhost:3000"] +# +# # username = "telegraf" +# # password = "pa$$word" +# +# ## Optional TLS Config +# # enable_tls = false +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# # tls_name = "tlsname" +# ## If false, skip chain & host verification +# # insecure_skip_verify = true +# +# # Feature Options +# # Add namespace variable to limit the namespaces executed on +# # Leave blank to do all +# # disable_query_namespaces = true # default false +# # namespaces = ["namespace1", "namespace2"] +# +# # Enable set level telemetry +# # query_sets = true # default: false +# # Add namespace set combinations to limit sets executed on +# # Leave blank to do all sets +# # sets = ["namespace1/set1", "namespace1/set2", "namespace3"] +# +# # Histograms +# # enable_ttl_histogram = true # default: false +# # enable_object_size_linear_histogram = true # default: false +# +# # by default, aerospike produces a 100 bucket histogram +# # this is not great for most graphing tools, this will allow +# # the ability to squash this to a smaller number of buckets +# # To have a balanced histogram, the number of buckets chosen +# # should divide evenly into 100. +# # num_histogram_buckets = 100 # default: 10 + + +# # Query statistics from AMD Graphics cards using rocm-smi binary +# [[inputs.amd_rocm_smi]] +# ## Optional: path to rocm-smi binary, defaults to $PATH via exec.LookPath +# # bin_path = "/opt/rocm/bin/rocm-smi" +# +# ## Optional: timeout for GPU polling +# # timeout = "5s" + + +# # Read Apache status information (mod_status) +# [[inputs.apache]] +# ## An array of URLs to gather from, must be directed at the machine +# ## readable version of the mod_status page including the auto query string. +# ## Default is "http://localhost/server-status?auto". +# urls = ["http://localhost/server-status?auto"] +# +# ## Credentials for basic HTTP authentication. +# # username = "myuser" +# # password = "mypassword" +# +# ## Maximum time to receive response. +# # response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Monitor APC UPSes connected to apcupsd +# [[inputs.apcupsd]] +# # A list of running apcupsd server to connect to. +# # If not provided will default to tcp://127.0.0.1:3551 +# servers = ["tcp://127.0.0.1:3551"] +# +# ## Timeout for dialing server. +# timeout = "5s" + + +# # Gather metrics from Apache Aurora schedulers +# [[inputs.aurora]] +# ## Schedulers are the base addresses of your Aurora Schedulers +# schedulers = ["http://127.0.0.1:8081"] +# +# ## Set of role types to collect metrics from. +# ## +# ## The scheduler roles are checked each interval by contacting the +# ## scheduler nodes; zookeeper is not contacted. +# # roles = ["leader", "follower"] +# +# ## Timeout is the max time for total network operations. +# # timeout = "5s" +# +# ## Username and password are sent using HTTP Basic Auth. +# # username = "username" +# # password = "pa$$word" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Gather Azure resources metrics from Azure Monitor API +# [[inputs.azure_monitor]] +# # can be found under Overview->Essentials in the Azure portal for your application/service +# subscription_id = "<>" +# # can be obtained by registering an application under Azure Active Directory +# client_id = "<>" +# # can be obtained by registering an application under Azure Active Directory +# client_secret = "<>" +# # can be found under Azure Active Directory->Properties +# tenant_id = "<>" +# +# # resource target #1 to collect metrics from +# [[inputs.azure_monitor.resource_target]] +# # can be found undet Overview->Essentials->JSON View in the Azure portal for your application/service +# # must start with 'resourceGroups/...' ('/subscriptions/xxxxxxxx-xxxx-xxxx-xxx-xxxxxxxxxxxx' +# # must be removed from the beginning of Resource ID property value) +# resource_id = "<>" +# # the metric names to collect +# # leave the array empty to use all metrics available to this resource +# metrics = [ "<>", "<>" ] +# # metrics aggregation type value to collect +# # can be 'Total', 'Count', 'Average', 'Minimum', 'Maximum' +# # leave the array empty to collect all aggregation types values for each metric +# aggregations = [ "<>", "<>" ] +# +# # resource target #2 to collect metrics from +# [[inputs.azure_monitor.resource_target]] +# resource_id = "<>" +# metrics = [ "<>", "<>" ] +# aggregations = [ "<>", "<>" ] +# +# # resource group target #1 to collect metrics from resources under it with resource type +# [[inputs.azure_monitor.resource_group_target]] +# # the resource group name +# resource_group = "<>" +# +# # defines the resources to collect metrics from +# [[inputs.azure_monitor.resource_group_target.resource]] +# # the resource type +# resource_type = "<>" +# metrics = [ "<>", "<>" ] +# aggregations = [ "<>", "<>" ] +# +# # defines the resources to collect metrics from +# [[inputs.azure_monitor.resource_group_target.resource]] +# resource_type = "<>" +# metrics = [ "<>", "<>" ] +# aggregations = [ "<>", "<>" ] +# +# # resource group target #2 to collect metrics from resources under it with resource type +# [[inputs.azure_monitor.resource_group_target]] +# resource_group = "<>" +# +# [[inputs.azure_monitor.resource_group_target.resource]] +# resource_type = "<>" +# metrics = [ "<>", "<>" ] +# aggregations = [ "<>", "<>" ] +# +# # subscription target #1 to collect metrics from resources under it with resource type +# [[inputs.azure_monitor.subscription_target]] +# resource_type = "<>" +# metrics = [ "<>", "<>" ] +# aggregations = [ "<>", "<>" ] +# +# # subscription target #2 to collect metrics from resources under it with resource type +# [[inputs.azure_monitor.subscription_target]] +# resource_type = "<>" +# metrics = [ "<>", "<>" ] +# aggregations = [ "<>", "<>" ] + + +# # Gather Azure Storage Queue metrics +# [[inputs.azure_storage_queue]] +# ## Required Azure Storage Account name +# account_name = "mystorageaccount" +# +# ## Required Azure Storage Account access key +# account_key = "storageaccountaccesskey" +# +# ## Set to false to disable peeking age of oldest message (executes faster) +# # peek_oldest_message_age = true + + +# # Read metrics of bcache from stats_total and dirty_data +# # This plugin ONLY supports Linux +# [[inputs.bcache]] +# ## Bcache sets path +# ## If not specified, then default is: +# bcachePath = "/sys/fs/bcache" +# +# ## By default, Telegraf gather stats for all bcache devices +# ## Setting devices will restrict the stats to the specified +# ## bcache devices. +# bcacheDevs = ["bcache0"] + + +# # Collects Beanstalkd server and tubes stats +# [[inputs.beanstalkd]] +# ## Server to collect data from +# server = "localhost:11300" +# +# ## List of tubes to gather stats about. +# ## If no tubes specified then data gathered for each tube on server reported by list-tubes command +# tubes = ["notifications"] + + +# # Read metrics exposed by Beat +# [[inputs.beat]] +# ## An URL from which to read Beat-formatted JSON +# ## Default is "http://127.0.0.1:5066". +# url = "http://127.0.0.1:5066" +# +# ## Enable collection of the listed stats +# ## An empty list means collect all. Available options are currently +# ## "beat", "libbeat", "system" and "filebeat". +# # include = ["beat", "libbeat", "filebeat"] +# +# ## HTTP method +# # method = "GET" +# +# ## Optional HTTP headers +# # headers = {"X-Special-Header" = "Special-Value"} +# +# ## Override HTTP "Host" header +# # host_header = "logstash.example.com" +# +# ## Timeout for HTTP requests +# # timeout = "5s" +# +# ## Optional HTTP Basic Auth credentials +# # username = "username" +# # password = "pa$$word" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read BIND nameserver XML statistics +# [[inputs.bind]] +# ## An array of BIND XML statistics URI to gather stats. +# ## Default is "http://localhost:8053/xml/v3". +# # urls = ["http://localhost:8053/xml/v3"] +# # gather_memory_contexts = false +# # gather_views = false +# +# ## Timeout for http requests made by bind nameserver +# # timeout = "4s" + + +# # Collect bond interface status, slaves statuses and failures count +# [[inputs.bond]] +# ## Sets 'proc' directory path +# ## If not specified, then default is /proc +# # host_proc = "/proc" +# +# ## Sets 'sys' directory path +# ## If not specified, then default is /sys +# # host_sys = "/sys" +# +# ## By default, telegraf gather stats for all bond interfaces +# ## Setting interfaces will restrict the stats to the specified +# ## bond interfaces. +# # bond_interfaces = ["bond0"] +# +# ## Tries to collect additional bond details from /sys/class/net/{bond} +# ## currently only useful for LACP (mode 4) bonds +# # collect_sys_details = false + + +# # Collect Kafka topics and consumers status from Burrow HTTP API. +# [[inputs.burrow]] +# ## Burrow API endpoints in format "schema://host:port". +# ## Default is "http://localhost:8000". +# servers = ["http://localhost:8000"] +# +# ## Override Burrow API prefix. +# ## Useful when Burrow is behind reverse-proxy. +# # api_prefix = "/v3/kafka" +# +# ## Maximum time to receive response. +# # response_timeout = "5s" +# +# ## Limit per-server concurrent connections. +# ## Useful in case of large number of topics or consumer groups. +# # concurrent_connections = 20 +# +# ## Filter clusters, default is no filtering. +# ## Values can be specified as glob patterns. +# # clusters_include = [] +# # clusters_exclude = [] +# +# ## Filter consumer groups, default is no filtering. +# ## Values can be specified as glob patterns. +# # groups_include = [] +# # groups_exclude = [] +# +# ## Filter topics, default is no filtering. +# ## Values can be specified as glob patterns. +# # topics_include = [] +# # topics_exclude = [] +# +# ## Credentials for basic HTTP authentication. +# # username = "" +# # password = "" +# +# ## Optional SSL config +# # ssl_ca = "/etc/telegraf/ca.pem" +# # ssl_cert = "/etc/telegraf/cert.pem" +# # ssl_key = "/etc/telegraf/key.pem" +# # insecure_skip_verify = false + + +# # Collects performance metrics from the MON, OSD, MDS and RGW nodes +# # in a Ceph storage cluster. +# [[inputs.ceph]] +# ## This is the recommended interval to poll. Too frequent and you +# ## will lose data points due to timeouts during rebalancing and recovery +# interval = '1m' +# +# ## All configuration values are optional, defaults are shown below +# +# ## location of ceph binary +# ceph_binary = "/usr/bin/ceph" +# +# ## directory in which to look for socket files +# socket_dir = "/var/run/ceph" +# +# ## prefix of MON and OSD socket files, used to determine socket type +# mon_prefix = "ceph-mon" +# osd_prefix = "ceph-osd" +# mds_prefix = "ceph-mds" +# rgw_prefix = "ceph-client" +# +# ## suffix used to identify socket files +# socket_suffix = "asok" +# +# ## Ceph user to authenticate as, ceph will search for the corresponding +# ## keyring e.g. client.admin.keyring in /etc/ceph, or the explicit path +# ## defined in the client section of ceph.conf for example: +# ## +# ## [client.telegraf] +# ## keyring = /etc/ceph/client.telegraf.keyring +# ## +# ## Consult the ceph documentation for more detail on keyring generation. +# ceph_user = "client.admin" +# +# ## Ceph configuration to use to locate the cluster +# ceph_config = "/etc/ceph/ceph.conf" +# +# ## Whether to gather statistics via the admin socket +# gather_admin_socket_stats = true +# +# ## Whether to gather statistics via ceph commands, requires ceph_user +# ## and ceph_config to be specified +# gather_cluster_stats = false + + +# # Read specific statistics per cgroup +# # This plugin ONLY supports Linux +# [[inputs.cgroup]] +# ## Directories in which to look for files, globs are supported. +# ## Consider restricting paths to the set of cgroups you really +# ## want to monitor if you have a large number of cgroups, to avoid +# ## any cardinality issues. +# # paths = [ +# # "/sys/fs/cgroup/memory", +# # "/sys/fs/cgroup/memory/child1", +# # "/sys/fs/cgroup/memory/child2/*", +# # ] +# ## cgroup stat fields, as file names, globs are supported. +# ## these file names are appended to each path from above. +# # files = ["memory.*usage*", "memory.limit_in_bytes"] + + +# # Get standard chrony metrics, requires chronyc executable. +# [[inputs.chrony]] +# ## If true, chronyc tries to perform a DNS lookup for the time server. +# # dns_lookup = false + + +# # Pull Metric Statistics from Amazon CloudWatch +# [[inputs.cloudwatch]] +# ## Amazon Region +# region = "us-east-1" +# +# ## Amazon Credentials +# ## Credentials are loaded in the following order +# ## 1) Web identity provider credentials via STS if role_arn and +# ## web_identity_token_file are specified +# ## 2) Assumed credentials via STS if role_arn is specified +# ## 3) explicit credentials from 'access_key' and 'secret_key' +# ## 4) shared profile from 'profile' +# ## 5) environment variables +# ## 6) shared credentials file +# ## 7) EC2 Instance Profile +# # access_key = "" +# # secret_key = "" +# # token = "" +# # role_arn = "" +# # web_identity_token_file = "" +# # role_session_name = "" +# # profile = "" +# # shared_credential_file = "" +# +# ## If you are using CloudWatch cross-account observability, you can +# ## set IncludeLinkedAccounts to true in a monitoring account +# ## and collect metrics from the linked source accounts +# # include_linked_accounts = false +# +# ## Endpoint to make request against, the correct endpoint is automatically +# ## determined and this option should only be set if you wish to override the +# ## default. +# ## ex: endpoint_url = "http://localhost:8000" +# # endpoint_url = "" +# +# ## Set http_proxy +# # use_system_proxy = false +# # http_proxy_url = "http://localhost:8888" +# +# ## The minimum period for Cloudwatch metrics is 1 minute (60s). However not +# ## all metrics are made available to the 1 minute period. Some are collected +# ## at 3 minute, 5 minute, or larger intervals. +# ## See https://aws.amazon.com/cloudwatch/faqs/#monitoring. +# ## Note that if a period is configured that is smaller than the minimum for a +# ## particular metric, that metric will not be returned by the Cloudwatch API +# ## and will not be collected by Telegraf. +# # +# ## Requested CloudWatch aggregation Period (required) +# ## Must be a multiple of 60s. +# period = "5m" +# +# ## Collection Delay (required) +# ## Must account for metrics availability via CloudWatch API +# delay = "5m" +# +# ## Recommended: use metric 'interval' that is a multiple of 'period' to avoid +# ## gaps or overlap in pulled data +# interval = "5m" +# +# ## Recommended if "delay" and "period" are both within 3 hours of request +# ## time. Invalid values will be ignored. Recently Active feature will only +# ## poll for CloudWatch ListMetrics values that occurred within the last 3h. +# ## If enabled, it will reduce total API usage of the CloudWatch ListMetrics +# ## API and require less memory to retain. +# ## Do not enable if "period" or "delay" is longer than 3 hours, as it will +# ## not return data more than 3 hours old. +# ## See https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_ListMetrics.html +# #recently_active = "PT3H" +# +# ## Configure the TTL for the internal cache of metrics. +# # cache_ttl = "1h" +# +# ## Metric Statistic Namespaces (required) +# namespaces = ["AWS/ELB"] +# +# ## Maximum requests per second. Note that the global default AWS rate limit +# ## is 50 reqs/sec, so if you define multiple namespaces, these should add up +# ## to a maximum of 50. +# ## See http://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_limits.html +# # ratelimit = 25 +# +# ## Timeout for http requests made by the cloudwatch client. +# # timeout = "5s" +# +# ## Batch Size +# ## The size of each batch to send requests to Cloudwatch. 500 is the +# ## suggested largest size. If a request gets to large (413 errors), consider +# ## reducing this amount. +# # batch_size = 500 +# +# ## Namespace-wide statistic filters. These allow fewer queries to be made to +# ## cloudwatch. +# # statistic_include = ["average", "sum", "minimum", "maximum", sample_count"] +# # statistic_exclude = [] +# +# ## Metrics to Pull +# ## Defaults to all Metrics in Namespace if nothing is provided +# ## Refreshes Namespace available metrics every 1h +# #[[inputs.cloudwatch.metrics]] +# # names = ["Latency", "RequestCount"] +# # +# # ## Statistic filters for Metric. These allow for retrieving specific +# # ## statistics for an individual metric. +# # # statistic_include = ["average", "sum", "minimum", "maximum", sample_count"] +# # # statistic_exclude = [] +# # +# # ## Dimension filters for Metric. +# # ## All dimensions defined for the metric names must be specified in order +# # ## to retrieve the metric statistics. +# # ## 'value' has wildcard / 'glob' matching support such as 'p-*'. +# # [[inputs.cloudwatch.metrics.dimensions]] +# # name = "LoadBalancerName" +# # value = "p-example" + + +# # Collects conntrack stats from the configured directories and files. +# # This plugin ONLY supports Linux +# [[inputs.conntrack]] +# ## The following defaults would work with multiple versions of conntrack. +# ## Note the nf_ and ip_ filename prefixes are mutually exclusive across +# ## kernel versions, as are the directory locations. +# +# ## Look through /proc/net/stat/nf_conntrack for these metrics +# ## all - aggregated statistics +# ## percpu - include detailed statistics with cpu tag +# collect = ["all", "percpu"] +# +# ## User-specified directories and files to look through +# ## Directories to search within for the conntrack files above. +# ## Missing directories will be ignored. +# dirs = ["/proc/sys/net/ipv4/netfilter","/proc/sys/net/netfilter"] +# +# ## Superset of filenames to look for within the conntrack dirs. +# ## Missing files will be ignored. +# files = ["ip_conntrack_count","ip_conntrack_max", +# "nf_conntrack_count","nf_conntrack_max"] + + +# # Gather health check statuses from services registered in Consul +# [[inputs.consul]] +# ## Consul server address +# # address = "localhost:8500" +# +# ## URI scheme for the Consul server, one of "http", "https" +# # scheme = "http" +# +# ## Metric version controls the mapping from Consul metrics into +# ## Telegraf metrics. Version 2 moved all fields with string values +# ## to tags. +# ## +# ## example: metric_version = 1; deprecated in 1.16 +# ## metric_version = 2; recommended version +# # metric_version = 1 +# +# ## ACL token used in every request +# # token = "" +# +# ## HTTP Basic Authentication username and password. +# # username = "" +# # password = "" +# +# ## Data center to query the health checks from +# # datacenter = "" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = true +# +# ## Consul checks' tag splitting +# # When tags are formatted like "key:value" with ":" as a delimiter then +# # they will be splitted and reported as proper key:value in Telegraf +# # tag_delimiter = ":" + + +# # Read metrics from the Consul Agent API +# [[inputs.consul_agent]] +# ## URL for the Consul agent +# # url = "http://127.0.0.1:8500" +# +# ## Use auth token for authorization. +# ## If both are set, an error is thrown. +# ## If both are empty, no token will be used. +# # token_file = "/path/to/auth/token" +# ## OR +# # token = "a1234567-40c7-9048-7bae-378687048181" +# +# ## Set timeout (default 5 seconds) +# # timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = /path/to/cafile +# # tls_cert = /path/to/certfile +# # tls_key = /path/to/keyfile + + +# # Read per-node and per-bucket metrics from Couchbase +# [[inputs.couchbase]] +# ## specify servers via a url matching: +# ## [protocol://][:password]@address[:port] +# ## e.g. +# ## http://couchbase-0.example.com/ +# ## http://admin:secret@couchbase-0.example.com:8091/ +# ## +# ## If no servers are specified, then localhost is used as the host. +# ## If no protocol is specified, HTTP is used. +# ## If no port is specified, 8091 is used. +# servers = ["http://localhost:8091"] +# +# ## Filter bucket fields to include only here. +# # bucket_stats_included = ["quota_percent_used", "ops_per_sec", "disk_fetches", "item_count", "disk_used", "data_used", "mem_used"] +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification (defaults to false) +# ## If set to false, tls_cert and tls_key are required +# # insecure_skip_verify = false +# +# ## Whether to collect cluster-wide bucket statistics +# ## It is recommended to disable this in favor of node_stats +# ## to get a better view of the cluster. +# # cluster_bucket_stats = true +# +# ## Whether to collect bucket stats for each individual node +# # node_bucket_stats = false +# +# ## List of additional stats to collect, choose from: +# ## * autofailover +# # additional_stats = [] + + +# # Read CouchDB Stats from one or more servers +# [[inputs.couchdb]] +# ## Works with CouchDB stats endpoints out of the box +# ## Multiple Hosts from which to read CouchDB stats: +# hosts = ["http://localhost:8086/_stats"] +# +# ## Use HTTP Basic Authentication. +# # basic_username = "telegraf" +# # basic_password = "p@ssw0rd" + + +# # Fetch metrics from a CSGO SRCDS +# [[inputs.csgo]] +# ## Specify servers using the following format: +# ## servers = [ +# ## ["ip1:port1", "rcon_password1"], +# ## ["ip2:port2", "rcon_password2"], +# ## ] +# # +# ## If no servers are specified, no data will be collected +# servers = [] + + +# # Input plugin for DC/OS metrics +# [[inputs.dcos]] +# ## The DC/OS cluster URL. +# cluster_url = "https://dcos-master-1" +# +# ## The ID of the service account. +# service_account_id = "telegraf" +# ## The private key file for the service account. +# service_account_private_key = "/etc/telegraf/telegraf-sa-key.pem" +# +# ## Path containing login token. If set, will read on every gather. +# # token_file = "/home/dcos/.dcos/token" +# +# ## In all filter options if both include and exclude are empty all items +# ## will be collected. Arrays may contain glob patterns. +# ## +# ## Node IDs to collect metrics from. If a node is excluded, no metrics will +# ## be collected for its containers or apps. +# # node_include = [] +# # node_exclude = [] +# ## Container IDs to collect container metrics from. +# # container_include = [] +# # container_exclude = [] +# ## Container IDs to collect app metrics from. +# # app_include = [] +# # app_exclude = [] +# +# ## Maximum concurrent connections to the cluster. +# # max_connections = 10 +# ## Maximum time to receive a response from cluster. +# # response_timeout = "20s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## If false, skip chain & host verification +# # insecure_skip_verify = true +# +# ## Recommended filtering to reduce series cardinality. +# # [inputs.dcos.tagdrop] +# # path = ["/var/lib/mesos/slave/slaves/*"] + + +# # Read metrics from one or many disque servers +# [[inputs.disque]] +# ## An array of URI to gather stats about. Specify an ip or hostname +# ## with optional port and password. +# ## ie disque://localhost, disque://10.10.3.33:18832, 10.0.0.1:10000, etc. +# ## If no servers are specified, then localhost is used as the host. +# servers = ["localhost"] + + +# # Provide a native collection for dmsetup based statistics for dm-cache +# # This plugin ONLY supports Linux +# [[inputs.dmcache]] +# ## Whether to report per-device stats or not +# per_device = true + + +# # Query given DNS server and gives statistics +# [[inputs.dns_query]] +# ## servers to query +# servers = ["8.8.8.8"] +# +# ## Network is the network protocol name. +# # network = "udp" +# +# ## Domains or subdomains to query. +# # domains = ["."] +# +# ## Query record type. +# ## Possible values: A, AAAA, CNAME, MX, NS, PTR, TXT, SOA, SPF, SRV. +# # record_type = "A" +# +# ## Dns server port. +# # port = 53 +# +# ## Query timeout +# # timeout = "2s" +# +# ## Include the specified additional properties in the resulting metric. +# ## The following values are supported: +# ## "first_ip" -- return IP of the first A and AAAA answer +# ## "all_ips" -- return IPs of all A and AAAA answers +# # include_fields = [] + + +# # Read metrics about docker containers +# [[inputs.docker]] +# ## Docker Endpoint +# ## To use TCP, set endpoint = "tcp://[ip]:[port]" +# ## To use environment variables (ie, docker-machine), set endpoint = "ENV" +# endpoint = "unix:///var/run/docker.sock" +# +# ## Set to true to collect Swarm metrics(desired_replicas, running_replicas) +# ## Note: configure this in one of the manager nodes in a Swarm cluster. +# ## configuring in multiple Swarm managers results in duplication of metrics. +# gather_services = false +# +# ## Only collect metrics for these containers. Values will be appended to +# ## container_name_include. +# ## Deprecated (1.4.0), use container_name_include +# container_names = [] +# +# ## Set the source tag for the metrics to the container ID hostname, eg first 12 chars +# source_tag = false +# +# ## Containers to include and exclude. Collect all if empty. Globs accepted. +# container_name_include = [] +# container_name_exclude = [] +# +# ## Container states to include and exclude. Globs accepted. +# ## When empty only containers in the "running" state will be captured. +# ## example: container_state_include = ["created", "restarting", "running", "removing", "paused", "exited", "dead"] +# ## example: container_state_exclude = ["created", "restarting", "running", "removing", "paused", "exited", "dead"] +# # container_state_include = [] +# # container_state_exclude = [] +# +# ## Timeout for docker list, info, and stats commands +# timeout = "5s" +# +# ## Whether to report for each container per-device blkio (8:0, 8:1...), +# ## network (eth0, eth1, ...) and cpu (cpu0, cpu1, ...) stats or not. +# ## Usage of this setting is discouraged since it will be deprecated in favor of 'perdevice_include'. +# ## Default value is 'true' for backwards compatibility, please set it to 'false' so that 'perdevice_include' setting +# ## is honored. +# perdevice = true +# +# ## Specifies for which classes a per-device metric should be issued +# ## Possible values are 'cpu' (cpu0, cpu1, ...), 'blkio' (8:0, 8:1, ...) and 'network' (eth0, eth1, ...) +# ## Please note that this setting has no effect if 'perdevice' is set to 'true' +# # perdevice_include = ["cpu"] +# +# ## Whether to report for each container total blkio and network stats or not. +# ## Usage of this setting is discouraged since it will be deprecated in favor of 'total_include'. +# ## Default value is 'false' for backwards compatibility, please set it to 'true' so that 'total_include' setting +# ## is honored. +# total = false +# +# ## Specifies for which classes a total metric should be issued. Total is an aggregated of the 'perdevice' values. +# ## Possible values are 'cpu', 'blkio' and 'network' +# ## Total 'cpu' is reported directly by Docker daemon, and 'network' and 'blkio' totals are aggregated by this plugin. +# ## Please note that this setting has no effect if 'total' is set to 'false' +# # total_include = ["cpu", "blkio", "network"] +# +# ## docker labels to include and exclude as tags. Globs accepted. +# ## Note that an empty array for both will include all labels as tags +# docker_label_include = [] +# docker_label_exclude = [] +# +# ## Which environment variables should we use as a tag +# tag_env = ["JAVA_HOME", "HEAP_SIZE"] +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read metrics about dovecot servers +# [[inputs.dovecot]] +# ## specify dovecot servers via an address:port list +# ## e.g. +# ## localhost:24242 +# ## or as an UDS socket +# ## e.g. +# ## /var/run/dovecot/old-stats +# ## +# ## If no servers are specified, then localhost is used as the host. +# servers = ["localhost:24242"] +# +# ## Type is one of "user", "domain", "ip", or "global" +# type = "global" +# +# ## Wildcard matches like "*.com". An empty string "" is same as "*" +# ## If type = "ip" filters should be +# filters = [""] + + +# # Reads metrics from DPDK applications using v2 telemetry interface. +# # This plugin ONLY supports Linux +# [[inputs.dpdk]] +# ## Path to DPDK telemetry socket. This shall point to v2 version of DPDK +# ## telemetry interface. +# # socket_path = "/var/run/dpdk/rte/dpdk_telemetry.v2" +# +# ## Duration that defines how long the connected socket client will wait for +# ## a response before terminating connection. +# ## This includes both writing to and reading from socket. Since it's local +# ## socket access to a fast packet processing application, the timeout should +# ## be sufficient for most users. +# ## Setting the value to 0 disables the timeout (not recommended) +# # socket_access_timeout = "200ms" +# +# ## Enables telemetry data collection for selected device types. +# ## Adding "ethdev" enables collection of telemetry from DPDK NICs +# ## (stats, xstats, link_status). +# ## Adding "rawdev" enables collection of telemetry from DPDK Raw Devices +# ## (xstats). +# # device_types = ["ethdev"] +# +# ## List of custom, application-specific telemetry commands to query +# ## The list of available commands depend on the application deployed. +# ## Applications can register their own commands via telemetry library API +# ## http://doc.dpdk.org/guides/prog_guide/telemetry_lib.html#registering-commands +# ## For L3 Forwarding with Power Management Sample Application this could be: +# ## additional_commands = ["/l3fwd-power/stats"] +# # additional_commands = [] +# +# ## Allows turning off collecting data for individual "ethdev" commands. +# ## Remove "/ethdev/link_status" from list to gather link status metrics. +# [inputs.dpdk.ethdev] +# exclude_commands = ["/ethdev/link_status"] +# +# ## When running multiple instances of the plugin it's recommended to add a +# ## unique tag to each instance to identify metrics exposed by an instance +# ## of DPDK application. This is useful when multiple DPDK apps run on a +# ## single host. +# ## [inputs.dpdk.tags] +# ## dpdk_instance = "my-fwd-app" + + +# # Read metrics about ECS containers +# [[inputs.ecs]] +# ## ECS metadata url. +# ## Metadata v2 API is used if set explicitly. Otherwise, +# ## v3 metadata endpoint API is used if available. +# # endpoint_url = "" +# +# ## Containers to include and exclude. Globs accepted. +# ## Note that an empty array for both will include all containers +# # container_name_include = [] +# # container_name_exclude = [] +# +# ## Container states to include and exclude. Globs accepted. +# ## When empty only containers in the "RUNNING" state will be captured. +# ## Possible values are "NONE", "PULLED", "CREATED", "RUNNING", +# ## "RESOURCES_PROVISIONED", "STOPPED". +# # container_status_include = [] +# # container_status_exclude = [] +# +# ## ecs labels to include and exclude as tags. Globs accepted. +# ## Note that an empty array for both will include all labels as tags +# ecs_label_include = [ "com.amazonaws.ecs.*" ] +# ecs_label_exclude = [] +# +# ## Timeout for queries. +# # timeout = "5s" + + +# # Read stats from one or more Elasticsearch servers or clusters +# [[inputs.elasticsearch]] +# ## specify a list of one or more Elasticsearch servers +# ## you can add username and password to your url to use basic authentication: +# ## servers = ["http://user:pass@localhost:9200"] +# servers = ["http://localhost:9200"] +# +# ## Timeout for HTTP requests to the elastic search server(s) +# http_timeout = "5s" +# +# ## When local is true (the default), the node will read only its own stats. +# ## Set local to false when you want to read the node stats from all nodes +# ## of the cluster. +# local = true +# +# ## Set cluster_health to true when you want to obtain cluster health stats +# cluster_health = false +# +# ## Adjust cluster_health_level when you want to obtain detailed health stats +# ## The options are +# ## - indices (default) +# ## - cluster +# # cluster_health_level = "indices" +# +# ## Set cluster_stats to true when you want to obtain cluster stats. +# cluster_stats = false +# +# ## Only gather cluster_stats from the master node. +# ## To work this require local = true +# cluster_stats_only_from_master = true +# +# ## Indices to collect; can be one or more indices names or _all +# ## Use of wildcards is allowed. Use a wildcard at the end to retrieve index +# ## names that end with a changing value, like a date. +# indices_include = ["_all"] +# +# ## One of "shards", "cluster", "indices" +# ## Currently only "shards" is implemented +# indices_level = "shards" +# +# ## node_stats is a list of sub-stats that you want to have gathered. +# ## Valid options are "indices", "os", "process", "jvm", "thread_pool", +# ## "fs", "transport", "http", "breaker". Per default, all stats are gathered. +# # node_stats = ["jvm", "http"] +# +# ## HTTP Basic Authentication username and password. +# # username = "" +# # password = "" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Sets the number of most recent indices to return for indices that are +# ## configured with a date-stamped suffix. Each 'indices_include' entry +# ## ending with a wildcard (*) or glob matching pattern will group together +# ## all indices that match it, and sort them by the date or number after +# ## the wildcard. Metrics then are gathered for only the +# ## 'num_most_recent_indices' amount of most recent indices. +# # num_most_recent_indices = 0 + + +# # Derive metrics from aggregating Elasticsearch query results +# [[inputs.elasticsearch_query]] +# ## The full HTTP endpoint URL for your Elasticsearch instance +# ## Multiple urls can be specified as part of the same cluster, +# ## this means that only ONE of the urls will be written to each interval. +# urls = [ "http://node1.es.example.com:9200" ] # required. +# +# ## Elasticsearch client timeout, defaults to "5s". +# # timeout = "5s" +# +# ## Set to true to ask Elasticsearch a list of all cluster nodes, +# ## thus it is not necessary to list all nodes in the urls config option +# # enable_sniffer = false +# +# ## Set the interval to check if the Elasticsearch nodes are available +# ## This option is only used if enable_sniffer is also set (0s to disable it) +# # health_check_interval = "10s" +# +# ## HTTP basic authentication details (eg. when using x-pack) +# # username = "telegraf" +# # password = "mypassword" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# [[inputs.elasticsearch_query.aggregation]] +# ## measurement name for the results of the aggregation query +# measurement_name = "measurement" +# +# ## Elasticsearch indexes to query (accept wildcards). +# index = "index-*" +# +# ## The date/time field in the Elasticsearch index (mandatory). +# date_field = "@timestamp" +# +# ## If the field used for the date/time field in Elasticsearch is also using +# ## a custom date/time format it may be required to provide the format to +# ## correctly parse the field. +# ## +# ## If using one of the built in elasticsearch formats this is not required. +# # date_field_custom_format = "" +# +# ## Time window to query (eg. "1m" to query documents from last minute). +# ## Normally should be set to same as collection interval +# query_period = "1m" +# +# ## Lucene query to filter results +# # filter_query = "*" +# +# ## Fields to aggregate values (must be numeric fields) +# # metric_fields = ["metric"] +# +# ## Aggregation function to use on the metric fields +# ## Must be set if 'metric_fields' is set +# ## Valid values are: avg, sum, min, max, sum +# # metric_function = "avg" +# +# ## Fields to be used as tags +# ## Must be text, non-analyzed fields. Metric aggregations are performed +# ## per tag +# # tags = ["field.keyword", "field2.keyword"] +# +# ## Set to true to not ignore documents when the tag(s) above are missing +# # include_missing_tag = false +# +# ## String value of the tag when the tag does not exist +# ## Used when include_missing_tag is true +# # missing_tag_value = "null" + + +# # Returns ethtool statistics for given interfaces +# # This plugin ONLY supports Linux +# [[inputs.ethtool]] +# ## List of interfaces to pull metrics for +# # interface_include = ["eth0"] +# +# ## List of interfaces to ignore when pulling metrics. +# # interface_exclude = ["eth1"] +# +# ## Plugin behavior for downed interfaces +# ## Available choices: +# ## - expose: collect & report metrics for down interfaces +# ## - skip: ignore interfaces that are marked down +# # down_interfaces = "expose" +# +# ## Reading statistics from interfaces in additional namespaces is also +# ## supported, so long as the namespaces are named (have a symlink in +# ## /var/run/netns). The telegraf process will also need the CAP_SYS_ADMIN +# ## permission. +# ## By default, only the current namespace will be used. For additional +# ## namespace support, at least one of `namespace_include` and +# ## `namespace_exclude` must be provided. +# ## To include all namespaces, set `namespace_include` to `["*"]`. +# ## The initial namespace (if anonymous) can be specified with the empty +# ## string (""). +# +# ## List of namespaces to pull metrics for +# # namespace_include = [] +# +# ## List of namespace to ignore when pulling metrics. +# # namespace_exclude = [] +# +# ## Some drivers declare statistics with extra whitespace, different spacing, +# ## and mix cases. This list, when enabled, can be used to clean the keys. +# ## Here are the current possible normalizations: +# ## * snakecase: converts fooBarBaz to foo_bar_baz +# ## * trim: removes leading and trailing whitespace +# ## * lower: changes all capitalized letters to lowercase +# ## * underscore: replaces spaces with underscores +# # normalize_keys = ["snakecase", "trim", "lower", "underscore"] + + +# # Read metrics from one or more commands that can output to stdout +# [[inputs.exec]] +# ## Commands array +# commands = [ +# "/tmp/test.sh", +# "/usr/bin/mycollector --foo=bar", +# "/tmp/collect_*.sh" +# ] +# +# ## Environment variables +# ## Array of "key=value" pairs to pass as environment variables +# ## e.g. "KEY=value", "USERNAME=John Doe", +# ## "LD_LIBRARY_PATH=/opt/custom/lib64:/usr/local/libs" +# # environment = [] +# +# ## Timeout for each command to complete. +# timeout = "5s" +# +# ## measurement name suffix (for separating different commands) +# name_suffix = "_mycollector" +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Read metrics from fail2ban. +# [[inputs.fail2ban]] +# ## Use sudo to run fail2ban-client +# # use_sudo = false +# +# ## Use the given socket instead of the default one +# # socket = "/var/run/fail2ban/fail2ban.sock" + + +# # Read devices value(s) from a Fibaro controller +# [[inputs.fibaro]] +# ## Required Fibaro controller address/hostname. +# ## Note: at the time of writing this plugin, Fibaro only implemented http - no https available +# url = "http://:80" +# +# ## Required credentials to access the API (http://) +# username = "" +# password = "" +# +# ## Amount of time allowed to complete the HTTP request +# # timeout = "5s" +# +# ## Fibaro Device Type +# ## By default, this plugin will attempt to read using the HC2 API. For HC3 +# ## devices, set this to "HC3" +# # device_type = "HC2" + + +# # Parse a complete file each interval +# [[inputs.file]] +# ## Files to parse each interval. Accept standard unix glob matching rules, +# ## as well as ** to match recursive files and directories. +# files = ["/tmp/metrics.out"] +# +# ## Character encoding to use when interpreting the file contents. Invalid +# ## characters are replaced using the unicode replacement character. When set +# ## to the empty string the data is not decoded to text. +# ## ex: character_encoding = "utf-8" +# ## character_encoding = "utf-16le" +# ## character_encoding = "utf-16be" +# ## character_encoding = "" +# # character_encoding = "" +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" +# +# +# ## Name a tag containing the name of the file the data was parsed from. Leave empty +# ## to disable. Cautious when file name variation is high, this can increase the cardinality +# ## significantly. Read more about cardinality here: +# ## https://docs.influxdata.com/influxdb/cloud/reference/glossary/#series-cardinality +# # file_tag = "" + + +# # Count files in a directory +# [[inputs.filecount]] +# ## Directories to gather stats about. +# ## This accept standard unit glob matching rules, but with the addition of +# ## ** as a "super asterisk". ie: +# ## /var/log/** -> recursively find all directories in /var/log and count files in each directories +# ## /var/log/*/* -> find all directories with a parent dir in /var/log and count files in each directories +# ## /var/log -> count all files in /var/log and all of its subdirectories +# directories = ["/var/cache/apt", "/tmp"] +# +# ## Only count files that match the name pattern. Defaults to "*". +# name = "*" +# +# ## Count files in subdirectories. Defaults to true. +# recursive = true +# +# ## Only count regular files. Defaults to true. +# regular_only = true +# +# ## Follow all symlinks while walking the directory tree. Defaults to false. +# follow_symlinks = false +# +# ## Only count files that are at least this size. If size is +# ## a negative number, only count files that are smaller than the +# ## absolute value of size. Acceptable units are B, KiB, MiB, KB, ... +# ## Without quotes and units, interpreted as size in bytes. +# size = "0B" +# +# ## Only count files that have not been touched for at least this +# ## duration. If mtime is negative, only count files that have been +# ## touched in this duration. Defaults to "0s". +# mtime = "0s" + + +# # Read stats about given file(s) +# [[inputs.filestat]] +# ## Files to gather stats about. +# ## These accept standard unix glob matching rules, but with the addition of +# ## ** as a "super asterisk". See https://github.com/gobwas/glob. +# files = ["/etc/telegraf/telegraf.conf", "/var/log/**.log"] +# +# ## If true, read the entire file and calculate an md5 checksum. +# md5 = false + + +# # Read real time temps from fireboard.io servers +# [[inputs.fireboard]] +# ## Specify auth token for your account +# auth_token = "invalidAuthToken" +# ## You can override the fireboard server URL if necessary +# # url = https://fireboard.io/api/v1/devices.json +# ## You can set a different http_timeout if you need to +# ## You should set a string using an number and time indicator +# ## for example "12s" for 12 seconds. +# # http_timeout = "4s" + + +# # Read metrics exposed by fluentd in_monitor plugin +# [[inputs.fluentd]] +# ## This plugin reads information exposed by fluentd (using /api/plugins.json endpoint). +# ## +# ## Endpoint: +# ## - only one URI is allowed +# ## - https is not supported +# endpoint = "http://localhost:24220/api/plugins.json" +# +# ## Define which plugins have to be excluded (based on "type" field - e.g. monitor_agent) +# exclude = [ +# "monitor_agent", +# "dummy", +# ] + + +# # Gather repository information from GitHub hosted repositories. +# [[inputs.github]] +# ## List of repositories to monitor +# repositories = [ +# "influxdata/telegraf", +# "influxdata/influxdb" +# ] +# +# ## Github API access token. Unauthenticated requests are limited to 60 per hour. +# # access_token = "" +# +# ## Github API enterprise url. Github Enterprise accounts must specify their base url. +# # enterprise_base_url = "" +# +# ## Timeout for HTTP requests. +# # http_timeout = "5s" +# +# ## List of additional fields to query. +# ## NOTE: Getting those fields might involve issuing additional API-calls, so please +# ## make sure you do not exceed the rate-limit of GitHub. +# ## +# ## Available fields are: +# ## - pull-requests -- number of open and closed pull requests (2 API-calls per repository) +# # additional_fields = [] + + +# # Gather metrics by iterating the files located on a Cloud Storage Bucket. +# [[inputs.google_cloud_storage]] +# ## Required. Name of Cloud Storage bucket to ingest metrics from. +# bucket = "my-bucket" +# +# ## Optional. Prefix of Cloud Storage bucket keys to list metrics from. +# # key_prefix = "my-bucket" +# +# ## Key that will store the offsets in order to pick up where the ingestion was left. +# offset_key = "offset_key" +# +# ## Key that will store the offsets in order to pick up where the ingestion was left. +# objects_per_iteration = 10 +# +# ## Required. Data format to consume. +# ## Each data format has its own unique set of configuration options. +# ## Read more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" +# +# ## Optional. Filepath for GCP credentials JSON file to authorize calls to +# ## Google Cloud Storage APIs. If not set explicitly, Telegraf will attempt to use +# ## Application Default Credentials, which is preferred. +# # credentials_file = "path/to/my/creds.json" + + +# # Read flattened metrics from one or more GrayLog HTTP endpoints +# [[inputs.graylog]] +# ## API endpoint, currently supported API: +# ## +# ## - multiple (e.g. http://:9000/api/system/metrics/multiple) +# ## - namespace (e.g. http://:9000/api/system/metrics/namespace/{namespace}) +# ## +# ## For namespace endpoint, the metrics array will be ignored for that call. +# ## Endpoint can contain namespace and multiple type calls. +# ## +# ## Please check http://[graylog-server-ip]:9000/api/api-browser for full list +# ## of endpoints +# servers = [ +# "http://[graylog-server-ip]:9000/api/system/metrics/multiple", +# ] +# +# ## Set timeout (default 5 seconds) +# # timeout = "5s" +# +# ## Metrics list +# ## List of metrics can be found on Graylog webservice documentation. +# ## Or by hitting the web service api at: +# ## http://[graylog-host]:9000/api/system/metrics +# metrics = [ +# "jvm.cl.loaded", +# "jvm.memory.pools.Metaspace.committed" +# ] +# +# ## Username and password +# username = "" +# password = "" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read metrics of HAProxy, via stats socket or http endpoints +# [[inputs.haproxy]] +# ## List of stats endpoints. Metrics can be collected from both http and socket +# ## endpoints. Examples of valid endpoints: +# ## - http://myhaproxy.com:1936/haproxy?stats +# ## - https://myhaproxy.com:8000/stats +# ## - socket:/run/haproxy/admin.sock +# ## - /run/haproxy/*.sock +# ## - tcp://127.0.0.1:1936 +# ## +# ## Server addresses not starting with 'http://', 'https://', 'tcp://' will be +# ## treated as possible sockets. When specifying local socket, glob patterns are +# ## supported. +# servers = ["http://myhaproxy.com:1936/haproxy?stats"] +# +# ## By default, some of the fields are renamed from what haproxy calls them. +# ## Setting this option to true results in the plugin keeping the original +# ## field names. +# # keep_field_names = false +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Monitor disks' temperatures using hddtemp +# [[inputs.hddtemp]] +# ## By default, telegraf gathers temps data from all disks detected by the +# ## hddtemp. +# ## +# ## Only collect temps from the selected disks. +# ## +# ## A * as the device name will return the temperature values of all disks. +# ## +# # address = "127.0.0.1:7634" +# # devices = ["sda", "*"] + + +# # Read formatted metrics from one or more HTTP endpoints +# [[inputs.http]] +# ## One or more URLs from which to read formatted metrics +# urls = [ +# "http://localhost/metrics" +# ] +# +# ## HTTP method +# # method = "GET" +# +# ## Optional HTTP headers +# # headers = {"X-Special-Header" = "Special-Value"} +# +# ## HTTP entity-body to send with POST/PUT requests. +# # body = "" +# +# ## HTTP Content-Encoding for write request body, can be set to "gzip" to +# ## compress body or "identity" to apply no encoding. +# # content_encoding = "identity" +# +# ## Optional Bearer token settings to use for the API calls. +# ## Use either the token itself or the token file if you need a token. +# # token = "eyJhbGc...Qssw5c" +# # token_file = "/path/to/file" +# +# ## Optional HTTP Basic Auth Credentials +# # username = "username" +# # password = "pa$$word" +# +# ## OAuth2 Client Credentials. The options 'client_id', 'client_secret', and 'token_url' are required to use OAuth2. +# # client_id = "clientid" +# # client_secret = "secret" +# # token_url = "https://indentityprovider/oauth2/v1/token" +# # scopes = ["urn:opc:idm:__myscopes__"] +# +# ## HTTP Proxy support +# # use_system_proxy = false +# # http_proxy_url = "" +# +# ## Optional TLS Config +# ## Set to true/false to enforce TLS being enabled/disabled. If not set, +# ## enable TLS only if any of the other options are specified. +# # tls_enable = +# ## Trusted root certificates for server +# # tls_ca = "/path/to/cafile" +# ## Used for TLS client certificate authentication +# # tls_cert = "/path/to/certfile" +# ## Used for TLS client certificate authentication +# # tls_key = "/path/to/keyfile" +# ## Send the specified TLS server name via SNI +# # tls_server_name = "kubernetes.example.com" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Optional Cookie authentication +# # cookie_auth_url = "https://localhost/authMe" +# # cookie_auth_method = "POST" +# # cookie_auth_username = "username" +# # cookie_auth_password = "pa$$word" +# # cookie_auth_headers = { Content-Type = "application/json", X-MY-HEADER = "hello" } +# # cookie_auth_body = '{"username": "user", "password": "pa$$word", "authenticate": "me"}' +# ## cookie_auth_renewal not set or set to "0" will auth once and never renew the cookie +# # cookie_auth_renewal = "5m" +# +# ## Amount of time allowed to complete the HTTP request +# # timeout = "5s" +# +# ## List of success status codes +# # success_status_codes = [200] +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# # data_format = "influx" +# + + +# # HTTP/HTTPS request given an address a method and a timeout +# [[inputs.http_response]] +# ## List of urls to query. +# # urls = ["http://localhost"] +# +# ## Set http_proxy. +# ## Telegraf uses the system wide proxy settings if it's is not set. +# # http_proxy = "http://localhost:8888" +# +# ## Set response_timeout (default 5 seconds) +# # response_timeout = "5s" +# +# ## HTTP Request Method +# # method = "GET" +# +# ## Whether to follow redirects from the server (defaults to false) +# # follow_redirects = false +# +# ## Optional file with Bearer token +# ## file content is added as an Authorization header +# # bearer_token = "/path/to/file" +# +# ## Optional HTTP Basic Auth Credentials +# # username = "username" +# # password = "pa$$word" +# +# ## Optional HTTP Request Body +# # body = ''' +# # {'fake':'data'} +# # ''' +# +# ## Optional name of the field that will contain the body of the response. +# ## By default it is set to an empty String indicating that the body's +# ## content won't be added +# # response_body_field = '' +# +# ## Maximum allowed HTTP response body size in bytes. +# ## 0 means to use the default of 32MiB. +# ## If the response body size exceeds this limit a "body_read_error" will +# ## be raised. +# # response_body_max_size = "32MiB" +# +# ## Optional substring or regex match in body of the response (case sensitive) +# # response_string_match = "\"service_status\": \"up\"" +# # response_string_match = "ok" +# # response_string_match = "\".*_status\".?:.?\"up\"" +# +# ## Expected response status code. +# ## The status code of the response is compared to this value. If they match, +# ## the field "response_status_code_match" will be 1, otherwise it will be 0. +# ## If the expected status code is 0, the check is disabled and the field +# ## won't be added. +# # response_status_code = 0 +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# ## Use the given name as the SNI server name on each URL +# # tls_server_name = "" +# ## TLS renegotiation method, choose from "never", "once", "freely" +# # tls_renegotiation_method = "never" +# +# ## HTTP Request Headers (all values must be strings) +# # [inputs.http_response.headers] +# # Host = "github.com" +# +# ## Optional setting to map response http headers into tags +# ## If the http header is not present on the request, no corresponding tag will +# ## be added. If multiple instances of the http header are present, only the +# ## first value will be used. +# # http_header_tags = {"HTTP_HEADER" = "TAG_NAME"} +# +# ## Interface to use when dialing an address +# # interface = "eth0" + + +# ## DEPRECATED: The "httpjson" plugin is deprecated in version 1.6.0 and will be removed in 1.30.0, use 'inputs.http' instead. +# # Read flattened metrics from one or more JSON HTTP endpoints +# [[inputs.httpjson]] +# ## NOTE This plugin only reads numerical measurements, strings and booleans +# ## will be ignored. +# +# ## Name for the service being polled. Will be appended to the name of the +# ## measurement e.g. "httpjson_webserver_stats". +# ## +# ## Deprecated (1.3.0): Use name_override, name_suffix, name_prefix instead. +# name = "webserver_stats" +# +# ## URL of each server in the service's cluster +# servers = [ +# "http://localhost:9999/stats/", +# "http://localhost:9998/stats/", +# ] +# ## Set response_timeout (default 5 seconds) +# response_timeout = "5s" +# +# ## HTTP method to use: GET or POST (case-sensitive) +# method = "GET" +# +# ## Tags to extract from top-level of JSON server response. +# # tag_keys = [ +# # "my_tag_1", +# # "my_tag_2" +# # ] +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## HTTP Request Parameters (all values must be strings). For "GET" requests, data +# ## will be included in the query. For "POST" requests, data will be included +# ## in the request body as "x-www-form-urlencoded". +# # [inputs.httpjson.parameters] +# # event_type = "cpu_spike" +# # threshold = "0.75" +# +# ## HTTP Request Headers (all values must be strings). +# # [inputs.httpjson.headers] +# # X-Auth-Token = "my-xauth-token" +# # apiVersion = "v1" + + +# # Gathers huge pages measurements. +# # This plugin ONLY supports Linux +# [[inputs.hugepages]] +# ## Supported huge page types: +# ## - "root" - based on root huge page control directory: +# ## /sys/kernel/mm/hugepages +# ## - "per_node" - based on per NUMA node directories: +# ## /sys/devices/system/node/node[0-9]*/hugepages +# ## - "meminfo" - based on /proc/meminfo file +# # types = ["root", "per_node"] + + +# # Gather Icinga2 status +# [[inputs.icinga2]] +# ## Required Icinga2 server address +# # server = "https://localhost:5665" +# +# ## Collected Icinga2 objects ("services", "hosts") +# ## Specify at least one object to collect from /v1/objects endpoint. +# # objects = ["services"] +# +# ## Collect metrics from /v1/status endpoint +# ## Choose from: +# ## "ApiListener", "CIB", "IdoMysqlConnection", "IdoPgsqlConnection" +# # status = [] +# +# ## Credentials for basic HTTP authentication +# # username = "admin" +# # password = "admin" +# +# ## Maximum time to receive response. +# # response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = true + + +# # Gets counters from all InfiniBand cards and ports installed +# # This plugin ONLY supports Linux +# [[inputs.infiniband]] +# # no configuration + + +# # Read InfluxDB-formatted JSON metrics from one or more HTTP endpoints +# [[inputs.influxdb]] +# ## Works with InfluxDB debug endpoints out of the box, +# ## but other services can use this format too. +# ## See the influxdb plugin's README for more details. +# +# ## Multiple URLs from which to read InfluxDB-formatted JSON +# ## Default is "http://localhost:8086/debug/vars". +# urls = [ +# "http://localhost:8086/debug/vars" +# ] +# +# ## Username and password to send using HTTP Basic Authentication. +# # username = "" +# # password = "" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## http request & header timeout +# timeout = "5s" + + +# # Intel Baseband Accelerator Input Plugin collects metrics from both dedicated and integrated +# # Intel devices that provide Wireless Baseband hardware acceleration. +# # This plugin ONLY supports Linux. +# [[inputs.intel_baseband]] +# ## Path to socket exposed by pf-bb-config for CLI interaction (mandatory). +# ## In version v23.03 of pf-bb-config the path is created according to the schema: +# ## "/tmp/pf_bb_config.0000\:\:..sock" where 0000\:\:. is the PCI device ID. +# socket_path = "" +# +# ## Path to log file exposed by pf-bb-config with telemetry to read (mandatory). +# ## In version v23.03 of pf-bb-config the path is created according to the schema: +# ## "/var/log/pf_bb_cfg_0000\:\:..log" where 0000\:\:. is the PCI device ID. +# log_file_path = "" +# +# ## Specifies plugin behavior regarding unreachable socket (which might not have been initialized yet). +# ## Available choices: +# ## - error: Telegraf will return an error on startup if socket is unreachable +# ## - ignore: Telegraf will ignore error regarding unreachable socket on both startup and gather +# # unreachable_socket_behavior = "error" +# +# ## Duration that defines how long the connected socket client will wait for +# ## a response before terminating connection. +# ## Since it's local socket access to a fast packet processing application, the timeout should +# ## be sufficient for most users. +# ## Setting the value to 0 disables the timeout (not recommended). +# # socket_access_timeout = "1s" +# +# ## Duration that defines maximum time plugin will wait for pf-bb-config to write telemetry to the log file. +# ## Timeout may differ depending on the environment. +# ## Must be equal or larger than 50ms. +# # wait_for_telemetry_timeout = "1s" + + +# ## Reads metrics from DPDK using v2 telemetry interface. +# ## This plugin ONLY supports Linux +# [[inputs.intel_dlb]] +# ## Path to DPDK telemetry socket. +# # socket_path = "/var/run/dpdk/rte/dpdk_telemetry.v2" +# +# ## Default eventdev command list, it gathers metrics from socket by given commands. +# ## Supported options: +# ## "/eventdev/dev_xstats", "/eventdev/port_xstats", +# ## "/eventdev/queue_xstats", "/eventdev/queue_links" +# # eventdev_commands = ["/eventdev/dev_xstats", "/eventdev/port_xstats", "/eventdev/queue_xstats", "/eventdev/queue_links"] +# +# ## Detect DLB devices based on device id. +# ## Currently, only supported and tested device id is `0x2710`. +# ## Configuration added to support forward compatibility. +# # dlb_device_types = ["0x2710"] +# +# ## Specifies plugin behavior regarding unreachable socket (which might not have been initialized yet). +# ## Available choices: +# ## - error: Telegraf will return an error on startup if socket is unreachable +# ## - ignore: Telegraf will ignore error regarding unreachable socket on both startup and gather +# # unreachable_socket_behavior = "error" + + +# # Intel Platform Monitoring Technology plugin exposes Intel PMT metrics available through the Intel PMT kernel space. +# # This plugin ONLY supports Linux. +# [[inputs.intel_pmt]] +# ## Filepath to PMT XML within local copies of XML files from PMT repository. +# ## The filepath should be absolute. +# spec = "/home/telegraf/Intel-PMT/xml/pmt.xml" +# +# ## Enable metrics by their datatype. +# ## See the Enabling Metrics section in README for more details. +# ## If empty, all metrics are enabled. +# ## When used, the alternative option samples_enabled should NOT be used. +# # datatypes_enabled = [] +# +# ## Enable metrics by their name. +# ## See the Enabling Metrics section in README for more details. +# ## If empty, all metrics are enabled. +# ## When used, the alternative option datatypes_enabled should NOT be used. +# # samples_enabled = [] + + +# # Intel PowerStat plugin enables monitoring of platform metrics (power, TDP) +# # and per-CPU metrics like temperature, power and utilization. +# # This plugin ONLY supports Linux +# [[inputs.intel_powerstat]] +# ## The user can choose which package metrics are monitored by the plugin with +# ## the package_metrics setting: +# ## - The default, will collect "current_power_consumption", +# ## "current_dram_power_consumption" and "thermal_design_power" +# ## - Leaving this setting empty means no package metrics will be collected +# ## - Finally, a user can specify individual metrics to capture from the +# ## supported options list +# ## Supported options: +# ## "current_power_consumption", "current_dram_power_consumption", +# ## "thermal_design_power", "max_turbo_frequency", "uncore_frequency", +# ## "cpu_base_frequency" +# # package_metrics = ["current_power_consumption", "current_dram_power_consumption", "thermal_design_power"] +# +# ## The user can choose which per-CPU metrics are monitored by the plugin in +# ## cpu_metrics array. +# ## Empty or missing array means no per-CPU specific metrics will be collected +# ## by the plugin. +# ## Supported options: +# ## "cpu_frequency", "cpu_c0_state_residency", "cpu_c1_state_residency", +# ## "cpu_c6_state_residency", "cpu_busy_cycles", "cpu_temperature", +# ## "cpu_busy_frequency" +# ## ATTENTION: cpu_busy_cycles is DEPRECATED - use cpu_c0_state_residency +# # cpu_metrics = [] + + +# # Collect statistics about itself +# [[inputs.internal]] +# ## If true, collect telegraf memory stats. +# # collect_memstats = true +# +# ## If true, collect metrics from Go's runtime.metrics. For a full list see: +# ## https://pkg.go.dev/runtime/metrics +# # collect_gostats = false + + +# # Monitors internet speed using speedtest.net service +# [[inputs.internet_speed]] +# ## This plugin downloads many MB of data each time it is run. As such +# ## consider setting a higher interval for this plugin to reduce the +# ## demand on your internet connection. +# # interval = "60m" +# +# ## Enable to reduce memory usage +# # memory_saving_mode = false +# +# ## Caches the closest server location +# # cache = false +# +# ## Number of concurrent connections +# ## By default or set to zero, the number of CPU cores is used. Use this to +# ## reduce the impact on system performance or to increase the connections on +# ## faster connections to ensure the fastest speed. +# # connections = 0 +# +# ## Test mode +# ## By default, a single sever is used for testing. This may work for most, +# ## however, setting to "multi" will reach out to multiple servers in an +# ## attempt to get closer to ideal internet speeds. +# # test_mode = "single" +# +# ## Server ID exclude filter +# ## Allows the user to exclude or include specific server IDs received by +# ## speedtest-go. Values in the exclude option will be skipped over. Values in +# ## the include option are the only options that will be picked from. +# ## +# ## See the list of servers speedtest-go will return at: +# ## https://www.speedtest.net/api/js/servers?engine=js&limit=10 +# ## +# # server_id_exclude = [] +# # server_id_include = [] + + +# # This plugin gathers interrupts data from /proc/interrupts and /proc/softirqs. +# [[inputs.interrupts]] +# ## When set to true, cpu metrics are tagged with the cpu. Otherwise cpu is +# ## stored as a field. +# ## +# ## The default is false for backwards compatibility, and will be changed to +# ## true in a future version. It is recommended to set to true on new +# ## deployments. +# # cpu_as_tag = false +# +# ## To filter which IRQs to collect, make use of tagpass / tagdrop, i.e. +# # [inputs.interrupts.tagdrop] +# # irq = [ "NET_RX", "TASKLET" ] + + +# # Read metrics from the bare metal servers via IPMI +# [[inputs.ipmi_sensor]] +# ## optionally specify the path to the ipmitool executable +# # path = "/usr/bin/ipmitool" +# ## +# ## Setting 'use_sudo' to true will make use of sudo to run ipmitool. +# ## Sudo must be configured to allow the telegraf user to run ipmitool +# ## without a password. +# # use_sudo = false +# ## +# ## optionally force session privilege level. Can be CALLBACK, USER, OPERATOR, ADMINISTRATOR +# # privilege = "ADMINISTRATOR" +# ## +# ## optionally specify one or more servers via a url matching +# ## [username[:password]@][protocol[(address)]] +# ## e.g. +# ## root:passwd@lan(127.0.0.1) +# ## +# ## if no servers are specified, local machine sensor stats will be queried +# ## +# # servers = ["USERID:PASSW0RD@lan(192.168.1.1)"] +# +# ## Recommended: use metric 'interval' that is a multiple of 'timeout' to avoid +# ## gaps or overlap in pulled data +# interval = "30s" +# +# ## Timeout for the ipmitool command to complete. Default is 20 seconds. +# timeout = "20s" +# +# ## Schema Version: (Optional, defaults to version 1) +# metric_version = 2 +# +# ## Optionally provide the hex key for the IMPI connection. +# # hex_key = "" +# +# ## If ipmitool should use a cache +# ## for me ipmitool runs about 2 to 10 times faster with cache enabled on HP G10 servers (when using ubuntu20.04) +# ## the cache file may not work well for you if some sensors come up late +# # use_cache = false +# +# ## Path to the ipmitools cache file (defaults to OS temp dir) +# ## The provided path must exist and must be writable +# # cache_path = "" + + +# # Gather packets and bytes counters from Linux ipsets +# [[inputs.ipset]] +# ## By default, we only show sets which have already matched at least 1 packet. +# ## set include_unmatched_sets = true to gather them all. +# include_unmatched_sets = false +# ## Adjust your sudo settings appropriately if using this option ("sudo ipset save") +# ## You can avoid using sudo or root, by setting appropriate privileges for +# ## the telegraf.service systemd service. +# use_sudo = false +# ## The default timeout of 1s for ipset execution can be overridden here: +# # timeout = "1s" +# + + +# # Gather packets and bytes throughput from iptables +# # This plugin ONLY supports Linux +# [[inputs.iptables]] +# ## iptables require root access on most systems. +# ## Setting 'use_sudo' to true will make use of sudo to run iptables. +# ## Users must configure sudo to allow telegraf user to run iptables with +# ## no password. +# ## iptables can be restricted to only list command "iptables -nvL". +# use_sudo = false +# ## Setting 'use_lock' to true runs iptables with the "-w" option. +# ## Adjust your sudo settings appropriately if using this option +# ## ("iptables -w 5 -nvl") +# use_lock = false +# ## Define an alternate executable, such as "ip6tables". Default is "iptables". +# # binary = "ip6tables" +# ## defines the table to monitor: +# table = "filter" +# ## defines the chains to monitor. +# ## NOTE: iptables rules without a comment will not be monitored. +# ## Read the plugin documentation for more information. +# chains = [ "INPUT" ] + + +# # Collect virtual and real server stats from Linux IPVS +# # This plugin ONLY supports Linux +# [[inputs.ipvs]] +# # no configuration + + +# # Read jobs and cluster metrics from Jenkins instances +# [[inputs.jenkins]] +# ## The Jenkins URL in the format "schema://host:port" +# url = "http://my-jenkins-instance:8080" +# # username = "admin" +# # password = "admin" +# +# ## Set response_timeout +# response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use SSL but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Optional Max Job Build Age filter +# ## Default 1 hour, ignore builds older than max_build_age +# # max_build_age = "1h" +# +# ## Optional Sub Job Depth filter +# ## Jenkins can have unlimited layer of sub jobs +# ## This config will limit the layers of pulling, default value 0 means +# ## unlimited pulling until no more sub jobs +# # max_subjob_depth = 0 +# +# ## Optional Sub Job Per Layer +# ## In workflow-multibranch-plugin, each branch will be created as a sub job. +# ## This config will limit to call only the lasted branches in each layer, +# ## empty will use default value 10 +# # max_subjob_per_layer = 10 +# +# ## Jobs to include or exclude from gathering +# ## When using both lists, job_exclude has priority. +# ## Wildcards are supported: [ "jobA/*", "jobB/subjob1/*"] +# # job_include = [ "*" ] +# # job_exclude = [ ] +# +# ## Nodes to include or exclude from gathering +# ## When using both lists, node_exclude has priority. +# # node_include = [ "*" ] +# # node_exclude = [ ] +# +# ## Worker pool for jenkins plugin only +# ## Empty this field will use default value 5 +# # max_connections = 5 +# +# ## When set to true will add node labels as a comma-seperated tag. If none, +# ## are found, then a tag with the value of 'none' is used. Finally, if a +# ## lable contains a comma it is replaced with an underscore. +# # node_labels_as_tag = false + + +# ## DEPRECATED: The "jolokia" plugin is deprecated in version 1.5.0 and will be removed in 1.30.0, use 'inputs.jolokia2' instead. +# # Read JMX metrics through Jolokia +# [[inputs.jolokia]] +# ## This is the context root used to compose the jolokia url +# ## NOTE that Jolokia requires a trailing slash at the end of the context root +# context = "/jolokia/" +# +# ## This specifies the mode used +# # mode = "proxy" +# # +# ## When in proxy mode this section is used to specify further +# ## proxy address configurations. +# ## Remember to change host address to fit your environment. +# # [inputs.jolokia.proxy] +# # host = "127.0.0.1" +# # port = "8080" +# +# ## Optional http timeouts +# ## +# ## response_header_timeout, if non-zero, specifies the amount of time to wait +# ## for a server's response headers after fully writing the request. +# # response_header_timeout = "3s" +# ## +# ## client_timeout specifies a time limit for requests made by this client. +# ## Includes connection time, any redirects, and reading the response body. +# # client_timeout = "4s" +# +# ## List of servers exposing jolokia read service +# [[inputs.jolokia.servers]] +# name = "as-server-01" +# host = "127.0.0.1" +# port = "8080" +# # username = "myuser" +# # password = "mypassword" +# +# ## List of metrics collected on above servers +# ## Each metric consists in a name, a jmx path and either +# ## a pass or drop slice attribute. +# ## This collect all heap memory usage metrics. +# [[inputs.jolokia.metrics]] +# name = "heap_memory_usage" +# mbean = "java.lang:type=Memory" +# attribute = "HeapMemoryUsage" +# +# ## This collect thread counts metrics. +# [[inputs.jolokia.metrics]] +# name = "thread_count" +# mbean = "java.lang:type=Threading" +# attribute = "TotalStartedThreadCount,ThreadCount,DaemonThreadCount,PeakThreadCount" +# +# ## This collect number of class loaded/unloaded counts metrics. +# [[inputs.jolokia.metrics]] +# name = "class_count" +# mbean = "java.lang:type=ClassLoading" +# attribute = "LoadedClassCount,UnloadedClassCount,TotalLoadedClassCount" + + +# # Read JMX metrics from a Jolokia REST agent endpoint +# [[inputs.jolokia2_agent]] +# # default_tag_prefix = "" +# # default_field_prefix = "" +# # default_field_separator = "." +# +# # Add agents URLs to query +# urls = ["http://localhost:8080/jolokia"] +# # username = "" +# # password = "" +# # response_timeout = "5s" +# +# ## Optional origin URL to include as a header in the request. Some endpoints +# ## may reject an empty origin. +# # origin = "" +# +# ## Optional TLS config +# # tls_ca = "/var/private/ca.pem" +# # tls_cert = "/var/private/client.pem" +# # tls_key = "/var/private/client-key.pem" +# # insecure_skip_verify = false +# +# ## Add metrics to read +# [[inputs.jolokia2_agent.metric]] +# name = "java_runtime" +# mbean = "java.lang:type=Runtime" +# paths = ["Uptime"] + + +# # Read JMX metrics from a Jolokia REST proxy endpoint +# [[inputs.jolokia2_proxy]] +# # default_tag_prefix = "" +# # default_field_prefix = "" +# # default_field_separator = "." +# +# ## Proxy agent +# url = "http://localhost:8080/jolokia" +# # username = "" +# # password = "" +# # response_timeout = "5s" +# +# ## Optional origin URL to include as a header in the request. Some endpoints +# ## may reject an empty origin. +# # origin = "" +# +# ## Optional TLS config +# # tls_ca = "/var/private/ca.pem" +# # tls_cert = "/var/private/client.pem" +# # tls_key = "/var/private/client-key.pem" +# # insecure_skip_verify = false +# +# ## Add proxy targets to query +# # default_target_username = "" +# # default_target_password = "" +# [[inputs.jolokia2_proxy.target]] +# url = "service:jmx:rmi:///jndi/rmi://targethost:9999/jmxrmi" +# # username = "" +# # password = "" +# +# ## Add metrics to read +# [[inputs.jolokia2_proxy.metric]] +# name = "java_runtime" +# mbean = "java.lang:type=Runtime" +# paths = ["Uptime"] + + +# # Read Kapacitor-formatted JSON metrics from one or more HTTP endpoints +# [[inputs.kapacitor]] +# ## Multiple URLs from which to read Kapacitor-formatted JSON +# ## Default is "http://localhost:9092/kapacitor/v1/debug/vars". +# urls = [ +# "http://localhost:9092/kapacitor/v1/debug/vars" +# ] +# +# ## Time limit for http requests +# timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Get kernel statistics from /proc/vmstat +# # This plugin ONLY supports Linux +# [[inputs.kernel_vmstat]] +# # no configuration + + +# # Read status information from one or more Kibana servers +# [[inputs.kibana]] +# ## Specify a list of one or more Kibana servers +# servers = ["http://localhost:5601"] +# +# ## Timeout for HTTP requests +# timeout = "5s" +# +# ## HTTP Basic Auth credentials +# # username = "username" +# # password = "pa$$word" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read metrics from the Kubernetes api +# [[inputs.kube_inventory]] +# ## URL for the Kubernetes API. +# ## If empty in-cluster config with POD's service account token will be used. +# # url = "" +# +# ## Namespace to use. Set to "" to use all namespaces. +# # namespace = "default" +# +# ## Use bearer token for authorization. ('bearer_token' takes priority) +# ## +# ## Ignored if url is empty and in-cluster config is used. +# ## +# ## If both of these are empty, we'll use the default serviceaccount: +# ## at: /var/run/secrets/kubernetes.io/serviceaccount/token +# ## +# ## To auto-refresh the token, please use a file with the bearer_token option. +# ## If given a string, Telegraf cannot refresh the token periodically. +# # bearer_token = "/var/run/secrets/kubernetes.io/serviceaccount/token" +# ## OR +# ## deprecated in 1.24.0; use bearer_token with a file +# # bearer_token_string = "abc_123" +# +# ## Set response_timeout (default 5 seconds) +# # response_timeout = "5s" +# +# ## Optional Resources to exclude from gathering +# ## Leave them with blank with try to gather everything available. +# ## Values can be - "daemonsets", deployments", "endpoints", "ingress", +# ## "nodes", "persistentvolumes", "persistentvolumeclaims", "pods", "services", +# ## "statefulsets" +# # resource_exclude = [ "deployments", "nodes", "statefulsets" ] +# +# ## Optional Resources to include when gathering +# ## Overrides resource_exclude if both set. +# # resource_include = [ "deployments", "nodes", "statefulsets" ] +# +# ## selectors to include and exclude as tags. Globs accepted. +# ## Note that an empty array for both will include all selectors as tags +# ## selector_exclude overrides selector_include if both set. +# # selector_include = [] +# # selector_exclude = ["*"] +# +# ## Optional TLS Config +# ## Trusted root certificates for server +# # tls_ca = "/path/to/cafile" +# ## Used for TLS client certificate authentication +# # tls_cert = "/path/to/certfile" +# ## Used for TLS client certificate authentication +# # tls_key = "/path/to/keyfile" +# ## Send the specified TLS server name via SNI +# # tls_server_name = "kubernetes.example.com" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Uncomment to remove deprecated metrics. +# # fielddrop = ["terminated_reason"] + + +# # Read metrics from the kubernetes kubelet api +# [[inputs.kubernetes]] +# ## URL for the kubelet, if empty read metrics from all nodes in the cluster +# url = "http://127.0.0.1:10255" +# +# ## Use bearer token for authorization. ('bearer_token' takes priority) +# ## If both of these are empty, we'll use the default serviceaccount: +# ## at: /var/run/secrets/kubernetes.io/serviceaccount/token +# ## +# ## To re-read the token at each interval, please use a file with the +# ## bearer_token option. If given a string, Telegraf will always use that +# ## token. +# # bearer_token = "/var/run/secrets/kubernetes.io/serviceaccount/token" +# ## OR +# # bearer_token_string = "abc_123" +# +# ## Pod labels to be added as tags. An empty array for both include and +# ## exclude will include all labels. +# # label_include = [] +# # label_exclude = ["*"] +# +# ## Set response_timeout (default 5 seconds) +# # response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = /path/to/cafile +# # tls_cert = /path/to/certfile +# # tls_key = /path/to/keyfile +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read metrics from a LeoFS Server via SNMP +# [[inputs.leofs]] +# ## An array of URLs of the form: +# ## host [ ":" port] +# servers = ["127.0.0.1:4010"] + + +# # The libvirt plugin collects statistics from virtualized guests using virtualization libvirt API. +# [[inputs.libvirt]] +# ## Domain names from which libvirt gather statistics. +# ## By default (empty or missing array) the plugin gather statistics from each domain registered in the host system. +# # domains = [] +# +# ## Libvirt connection URI with hypervisor. +# ## The plugin supports multiple transport protocols and approaches which are configurable via the URI. +# ## The general URI form: driver[+transport]://[username@][hostname][:port]/[path][?extraparameters] +# ## Supported transport protocols: ssh, tcp, tls, unix +# ## URI examples for each type of transport protocol: +# ## 1. SSH: qemu+ssh:///system?keyfile=/&known_hosts=/ +# ## 2. TCP: qemu+tcp:///system +# ## 3. TLS: qemu+tls:///system?pkipath=/certs_dir/ +# ## 4. UNIX: qemu+unix:///system?socket=/ +# ## Default URI is qemu:///system +# # libvirt_uri = "qemu:///system" +# +# ## Statistics groups for which libvirt plugin will gather statistics. +# ## Supported statistics groups: state, cpu_total, balloon, vcpu, interface, block, perf, iothread, memory, dirtyrate +# ## Empty array means no metrics for statistics groups will be exposed by the plugin. +# ## By default the plugin will gather all available statistics. +# # statistics_groups = ["state", "cpu_total", "balloon", "vcpu", "interface", "block", "perf", "iothread", "memory", "dirtyrate"] +# +# ## A list containing additional statistics to be exposed by libvirt plugin. +# ## Supported additional statistics: vcpu_mapping +# ## By default (empty or missing array) the plugin will not collect additional statistics. +# # additional_statistics = [] +# + + +# # Provides Linux CPU metrics +# # This plugin ONLY supports Linux +# [[inputs.linux_cpu]] +# ## Path for sysfs filesystem. +# ## See https://www.kernel.org/doc/Documentation/filesystems/sysfs.txt +# ## Defaults: +# # host_sys = "/sys" +# +# ## CPU metrics collected by the plugin. +# ## Supported options: +# ## "cpufreq", "thermal" +# ## Defaults: +# # metrics = ["cpufreq"] + + +# # Provides Linux sysctl fs metrics +# [[inputs.linux_sysctl_fs]] +# # no configuration + + +# # Read metrics exposed by Logstash +# [[inputs.logstash]] +# ## The URL of the exposed Logstash API endpoint. +# url = "http://127.0.0.1:9600" +# +# ## Use Logstash 5 single pipeline API, set to true when monitoring +# ## Logstash 5. +# # single_pipeline = false +# +# ## Enable optional collection components. Can contain +# ## "pipelines", "process", and "jvm". +# # collect = ["pipelines", "process", "jvm"] +# +# ## Timeout for HTTP requests. +# # timeout = "5s" +# +# ## Optional HTTP Basic Auth credentials. +# # username = "username" +# # password = "pa$$word" +# +# ## Optional TLS Config. +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## Use TLS but skip chain & host verification. +# # insecure_skip_verify = false +# +# ## Optional HTTP headers. +# # [inputs.logstash.headers] +# # "X-Special-Header" = "Special-Value" + + +# # Read metrics from local Lustre service on OST, MDS +# # This plugin ONLY supports Linux +# [[inputs.lustre2]] +# ## An array of /proc globs to search for Lustre stats +# ## If not specified, the default will work on Lustre 2.5.x +# ## +# # ost_procfiles = [ +# # "/proc/fs/lustre/obdfilter/*/stats", +# # "/proc/fs/lustre/osd-ldiskfs/*/stats", +# # "/proc/fs/lustre/obdfilter/*/job_stats", +# # "/proc/fs/lustre/obdfilter/*/exports/*/stats", +# # ] +# # mds_procfiles = [ +# # "/proc/fs/lustre/mdt/*/md_stats", +# # "/proc/fs/lustre/mdt/*/job_stats", +# # "/proc/fs/lustre/mdt/*/exports/*/stats", +# # ] + + +# # Read metrics about LVM physical volumes, volume groups, logical volumes. +# [[inputs.lvm]] +# ## Use sudo to run LVM commands +# use_sudo = false +# +# ## The default location of the pvs binary can be overridden with: +# #pvs_binary = "/usr/sbin/pvs" +# +# ## The default location of the vgs binary can be overridden with: +# #vgs_binary = "/usr/sbin/vgs" +# +# ## The default location of the lvs binary can be overridden with: +# #lvs_binary = "/usr/sbin/lvs" + + +# # Gathers metrics from the /3.0/reports MailChimp API +# [[inputs.mailchimp]] +# ## MailChimp API key +# ## get from https://admin.mailchimp.com/account/api/ +# api_key = "" # required +# +# ## Reports for campaigns sent more than days_old ago will not be collected. +# ## 0 means collect all and is the default value. +# days_old = 0 +# +# ## Campaign ID to get, if empty gets all campaigns, this option overrides days_old +# # campaign_id = "" + + +# # Retrieves information on a specific host in a MarkLogic Cluster +# [[inputs.marklogic]] +# ## Base URL of the MarkLogic HTTP Server. +# url = "http://localhost:8002" +# +# ## List of specific hostnames to retrieve information. At least (1) required. +# # hosts = ["hostname1", "hostname2"] +# +# ## Using HTTP Basic Authentication. Management API requires 'manage-user' role privileges +# # username = "myuser" +# # password = "mypassword" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read metrics from one or many mcrouter servers. +# [[inputs.mcrouter]] +# ## An array of address to gather stats about. Specify an ip or hostname +# ## with port. ie tcp://localhost:11211, tcp://10.0.0.1:11211, etc. +# servers = ["tcp://localhost:11211", "unix:///var/run/mcrouter.sock"] +# +# ## Timeout for metric collections from all servers. Minimum timeout is "1s". +# # timeout = "5s" + + +# # Get kernel statistics from /proc/mdstat +# # This plugin ONLY supports Linux +# [[inputs.mdstat]] +# ## Sets file path +# ## If not specified, then default is /proc/mdstat +# # file_name = "/proc/mdstat" + + +# # Read metrics from one or many memcached servers. +# [[inputs.memcached]] +# # An array of address to gather stats about. Specify an ip on hostname +# # with optional port. ie localhost, 10.0.0.1:11211, etc. +# servers = ["localhost:11211"] +# # An array of unix memcached sockets to gather stats about. +# # unix_sockets = ["/var/run/memcached.sock"] +# +# ## Optional TLS Config +# # enable_tls = false +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## If false, skip chain & host verification +# # insecure_skip_verify = true + + +# # Telegraf plugin for gathering metrics from N Mesos masters +# [[inputs.mesos]] +# ## Timeout, in ms. +# timeout = 100 +# +# ## A list of Mesos masters. +# masters = ["http://localhost:5050"] +# +# ## Master metrics groups to be collected, by default, all enabled. +# master_collections = [ +# "resources", +# "master", +# "system", +# "agents", +# "frameworks", +# "framework_offers", +# "tasks", +# "messages", +# "evqueue", +# "registrar", +# "allocator", +# ] +# +# ## A list of Mesos slaves, default is [] +# # slaves = [] +# +# ## Slave metrics groups to be collected, by default, all enabled. +# # slave_collections = [ +# # "resources", +# # "agent", +# # "system", +# # "executors", +# # "tasks", +# # "messages", +# # ] +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Collects scores from a Minecraft server's scoreboard using the RCON protocol +# [[inputs.minecraft]] +# ## Address of the Minecraft server. +# # server = "localhost" +# +# ## Server RCON Port. +# # port = "25575" +# +# ## Server RCON Password. +# password = "" +# +# ## Uncomment to remove deprecated metric components. +# # tagdrop = ["server"] + + +# # Generate metrics for test and demonstration purposes +# [[inputs.mock]] +# ## Set the metric name to use for reporting +# metric_name = "mock" +# +# ## Optional string key-value pairs of tags to add to all metrics +# # [inputs.mock.tags] +# # "key" = "value" +# +# ## One or more mock data fields *must* be defined. +# # [[inputs.mock.constant]] +# # name = "constant" +# # value = value_of_any_type +# # [[inputs.mock.random]] +# # name = "rand" +# # min = 1.0 +# # max = 6.0 +# # [[inputs.mock.sine_wave]] +# # name = "wave" +# # amplitude = 1.0 +# # period = 0.5 +# # [[inputs.mock.step]] +# # name = "plus_one" +# # start = 0.0 +# # step = 1.0 +# # [[inputs.mock.stock]] +# # name = "abc" +# # price = 50.00 +# # volatility = 0.2 + + +# # Retrieve data from MODBUS slave devices +# [[inputs.modbus]] +# ## Connection Configuration +# ## +# ## The plugin supports connections to PLCs via MODBUS/TCP, RTU over TCP, ASCII over TCP or +# ## via serial line communication in binary (RTU) or readable (ASCII) encoding +# ## +# ## Device name +# name = "Device" +# +# ## Slave ID - addresses a MODBUS device on the bus +# ## Range: 0 - 255 [0 = broadcast; 248 - 255 = reserved] +# slave_id = 1 +# +# ## Timeout for each request +# timeout = "1s" +# +# ## Maximum number of retries and the time to wait between retries +# ## when a slave-device is busy. +# # busy_retries = 0 +# # busy_retries_wait = "100ms" +# +# # TCP - connect via Modbus/TCP +# controller = "tcp://localhost:502" +# +# ## Serial (RS485; RS232) +# ## For RS485 specific setting check the end of the configuration. +# ## For unix-like operating systems use: +# # controller = "file:///dev/ttyUSB0" +# ## For Windows operating systems use: +# # controller = "COM1" +# # baud_rate = 9600 +# # data_bits = 8 +# # parity = "N" +# # stop_bits = 1 +# +# ## Transmission mode for Modbus packets depending on the controller type. +# ## For Modbus over TCP you can choose between "TCP" , "RTUoverTCP" and +# ## "ASCIIoverTCP". +# ## For Serial controllers you can choose between "RTU" and "ASCII". +# ## By default this is set to "auto" selecting "TCP" for ModbusTCP connections +# ## and "RTU" for serial connections. +# # transmission_mode = "auto" +# +# ## Trace the connection to the modbus device as debug messages +# ## Note: You have to enable telegraf's debug mode to see those messages! +# # debug_connection = false +# +# ## Define the configuration schema +# ## |---register -- define fields per register type in the original style (only supports one slave ID) +# ## |---request -- define fields on a requests base +# ## |---metric -- define fields on a metric base +# configuration_type = "register" +# ## --- "register" configuration style --- +# +# ## Measurements +# ## +# +# ## Digital Variables, Discrete Inputs and Coils +# ## measurement - the (optional) measurement name, defaults to "modbus" +# ## name - the variable name +# ## data_type - the (optional) output type, can be BOOL or UINT16 (default) +# ## address - variable address +# +# discrete_inputs = [ +# { name = "start", address = [0]}, +# { name = "stop", address = [1]}, +# { name = "reset", address = [2]}, +# { name = "emergency_stop", address = [3]}, +# ] +# coils = [ +# { name = "motor1_run", address = [0]}, +# { name = "motor1_jog", address = [1]}, +# { name = "motor1_stop", address = [2]}, +# ] +# +# ## Analog Variables, Input Registers and Holding Registers +# ## measurement - the (optional) measurement name, defaults to "modbus" +# ## name - the variable name +# ## byte_order - the ordering of bytes +# ## |---AB, ABCD - Big Endian +# ## |---BA, DCBA - Little Endian +# ## |---BADC - Mid-Big Endian +# ## |---CDAB - Mid-Little Endian +# ## data_type - INT8L, INT8H, UINT8L, UINT8H (low and high byte variants) +# ## INT16, UINT16, INT32, UINT32, INT64, UINT64, +# ## FLOAT16-IEEE, FLOAT32-IEEE, FLOAT64-IEEE (IEEE 754 binary representation) +# ## FIXED, UFIXED (fixed-point representation on input) +# ## FLOAT32 is a deprecated alias for UFIXED for historic reasons, should be avoided +# ## scale - the final numeric variable representation +# ## address - variable address +# +# holding_registers = [ +# { name = "power_factor", byte_order = "AB", data_type = "FIXED", scale=0.01, address = [8]}, +# { name = "voltage", byte_order = "AB", data_type = "FIXED", scale=0.1, address = [0]}, +# { name = "energy", byte_order = "ABCD", data_type = "FIXED", scale=0.001, address = [5,6]}, +# { name = "current", byte_order = "ABCD", data_type = "FIXED", scale=0.001, address = [1,2]}, +# { name = "frequency", byte_order = "AB", data_type = "UFIXED", scale=0.1, address = [7]}, +# { name = "power", byte_order = "ABCD", data_type = "UFIXED", scale=0.1, address = [3,4]}, +# ] +# input_registers = [ +# { name = "tank_level", byte_order = "AB", data_type = "INT16", scale=1.0, address = [0]}, +# { name = "tank_ph", byte_order = "AB", data_type = "INT16", scale=1.0, address = [1]}, +# { name = "pump1_speed", byte_order = "ABCD", data_type = "INT32", scale=1.0, address = [3,4]}, +# ] +# +# ## --- "request" configuration style --- +# +# ## Per request definition +# ## +# +# ## Define a request sent to the device +# ## Multiple of those requests can be defined. Data will be collated into metrics at the end of data collection. +# [[inputs.modbus.request]] +# ## ID of the modbus slave device to query. +# ## If you need to query multiple slave-devices, create several "request" definitions. +# slave_id = 1 +# +# ## Byte order of the data. +# ## |---ABCD -- Big Endian (Motorola) +# ## |---DCBA -- Little Endian (Intel) +# ## |---BADC -- Big Endian with byte swap +# ## |---CDAB -- Little Endian with byte swap +# byte_order = "ABCD" +# +# ## Type of the register for the request +# ## Can be "coil", "discrete", "holding" or "input" +# register = "coil" +# +# ## Name of the measurement. +# ## Can be overriden by the individual field definitions. Defaults to "modbus" +# # measurement = "modbus" +# +# ## Request optimization algorithm. +# ## |---none -- Do not perform any optimization and use the given layout(default) +# ## |---shrink -- Shrink requests to actually requested fields +# ## | by stripping leading and trailing omits +# ## |---rearrange -- Rearrange request boundaries within consecutive address ranges +# ## | to reduce the number of requested registers by keeping +# ## | the number of requests. +# ## |---max_insert -- Rearrange request keeping the number of extra fields below the value +# ## provided in "optimization_max_register_fill". It is not necessary to define 'omitted' +# ## fields as the optimisation will add such field only where needed. +# # optimization = "none" +# +# ## Maximum number register the optimizer is allowed to insert between two fields to +# ## save requests. +# ## This option is only used for the 'max_insert' optimization strategy. +# ## NOTE: All omitted fields are ignored, so this option denotes the effective hole +# ## size to fill. +# # optimization_max_register_fill = 50 +# +# ## Field definitions +# ## Analog Variables, Input Registers and Holding Registers +# ## address - address of the register to query. For coil and discrete inputs this is the bit address. +# ## name *1 - field name +# ## type *1,2 - type of the modbus field, can be +# ## INT8L, INT8H, UINT8L, UINT8H (low and high byte variants) +# ## INT16, UINT16, INT32, UINT32, INT64, UINT64 and +# ## FLOAT16, FLOAT32, FLOAT64 (IEEE 754 binary representation) +# ## scale *1,2 - (optional) factor to scale the variable with +# ## output *1,3 - (optional) type of resulting field, can be INT64, UINT64 or FLOAT64. Defaults to FLOAT64 if +# ## "scale" is provided and to the input "type" class otherwise (i.e. INT* -> INT64, etc). +# ## measurement *1 - (optional) measurement name, defaults to the setting of the request +# ## omit - (optional) omit this field. Useful to leave out single values when querying many registers +# ## with a single request. Defaults to "false". +# ## +# ## *1: These fields are ignored if field is omitted ("omit"=true) +# ## *2: These fields are ignored for both "coil" and "discrete"-input type of registers. +# ## *3: This field can only be "UINT16" or "BOOL" if specified for both "coil" +# ## and "discrete"-input type of registers. By default the fields are +# ## output as zero or one in UINT16 format unless "BOOL" is used. +# +# ## Coil / discrete input example +# fields = [ +# { address=0, name="motor1_run"}, +# { address=1, name="jog", measurement="motor"}, +# { address=2, name="motor1_stop", omit=true}, +# { address=3, name="motor1_overheating", output="BOOL"}, +# ] +# +# [inputs.modbus.request.tags] +# machine = "impresser" +# location = "main building" +# +# [[inputs.modbus.request]] +# ## Holding example +# ## All of those examples will result in FLOAT64 field outputs +# slave_id = 1 +# byte_order = "DCBA" +# register = "holding" +# fields = [ +# { address=0, name="voltage", type="INT16", scale=0.1 }, +# { address=1, name="current", type="INT32", scale=0.001 }, +# { address=3, name="power", type="UINT32", omit=true }, +# { address=5, name="energy", type="FLOAT32", scale=0.001, measurement="W" }, +# { address=7, name="frequency", type="UINT32", scale=0.1 }, +# { address=8, name="power_factor", type="INT64", scale=0.01 }, +# ] +# +# [inputs.modbus.request.tags] +# machine = "impresser" +# location = "main building" +# +# [[inputs.modbus.request]] +# ## Input example with type conversions +# slave_id = 1 +# byte_order = "ABCD" +# register = "input" +# fields = [ +# { address=0, name="rpm", type="INT16" }, # will result in INT64 field +# { address=1, name="temperature", type="INT16", scale=0.1 }, # will result in FLOAT64 field +# { address=2, name="force", type="INT32", output="FLOAT64" }, # will result in FLOAT64 field +# { address=4, name="hours", type="UINT32" }, # will result in UIN64 field +# ] +# +# [inputs.modbus.request.tags] +# machine = "impresser" +# location = "main building" +# +# ## --- "metric" configuration style --- +# +# ## Per metric definition +# ## +# +# ## Request optimization algorithm across metrics +# ## |---none -- Do not perform any optimization and just group requests +# ## | within metrics (default) +# ## |---max_insert -- Collate registers across all defined metrics and fill in +# ## holes to optimize the number of requests. +# # optimization = "none" +# +# ## Maximum number of registers the optimizer is allowed to insert between +# ## non-consecutive registers to save requests. +# ## This option is only used for the 'max_insert' optimization strategy and +# ## effectively denotes the hole size between registers to fill. +# # optimization_max_register_fill = 50 +# +# ## Define a metric produced by the requests to the device +# ## Multiple of those metrics can be defined. The referenced registers will +# ## be collated into requests send to the device +# [[inputs.modbus.metric]] +# ## ID of the modbus slave device to query +# ## If you need to query multiple slave-devices, create several "metric" definitions. +# slave_id = 1 +# +# ## Byte order of the data +# ## |---ABCD -- Big Endian (Motorola) +# ## |---DCBA -- Little Endian (Intel) +# ## |---BADC -- Big Endian with byte swap +# ## |---CDAB -- Little Endian with byte swap +# # byte_order = "ABCD" +# +# ## Name of the measurement +# # measurement = "modbus" +# +# ## Field definitions +# ## register - type of the modbus register, can be "coil", "discrete", +# ## "holding" or "input". Defaults to "holding". +# ## address - address of the register to query. For coil and discrete inputs this is the bit address. +# ## name - field name +# ## type *1 - type of the modbus field, can be +# ## INT8L, INT8H, UINT8L, UINT8H (low and high byte variants) +# ## INT16, UINT16, INT32, UINT32, INT64, UINT64 and +# ## FLOAT16, FLOAT32, FLOAT64 (IEEE 754 binary representation) +# ## scale *1 - (optional) factor to scale the variable with +# ## output *2 - (optional) type of resulting field, can be INT64, UINT64 or FLOAT64. Defaults to FLOAT64 if +# ## "scale" is provided and to the input "type" class otherwise (i.e. INT* -> INT64, etc). +# ## +# ## *1: These fields are ignored for both "coil" and "discrete"-input type of registers. +# ## *2: This field can only be "UINT16" or "BOOL" if specified for both "coil" +# ## and "discrete"-input type of registers. By default the fields are +# ## output as zero or one in UINT16 format unless "BOOL" is used. +# fields = [ +# { register="coil", address=0, name="door_open"}, +# { register="coil", address=1, name="status_ok"}, +# { register="holding", address=0, name="voltage", type="INT16" }, +# { address=1, name="current", type="INT32", scale=0.001 }, +# { address=5, name="energy", type="FLOAT32", scale=0.001,}, +# { address=7, name="frequency", type="UINT32", scale=0.1 }, +# { address=8, name="power_factor", type="INT64", scale=0.01 }, +# ] +# +# ## Tags assigned to the metric +# # [inputs.modbus.metric.tags] +# # machine = "impresser" +# # location = "main building" +# +# +# ## RS485 specific settings. Only take effect for serial controllers. +# ## Note: This has to be at the end of the modbus configuration due to +# ## TOML constraints. +# # [inputs.modbus.rs485] +# ## Delay RTS prior to sending +# # delay_rts_before_send = "0ms" +# ## Delay RTS after to sending +# # delay_rts_after_send = "0ms" +# ## Pull RTS line to high during sending +# # rts_high_during_send = false +# ## Pull RTS line to high after sending +# # rts_high_after_send = false +# ## Enabling receiving (Rx) during transmission (Tx) +# # rx_during_tx = false +# +# ## Enable workarounds required by some devices to work correctly +# # [inputs.modbus.workarounds] +# ## Pause after connect delays the first request by the specified time. +# ## This might be necessary for (slow) devices. +# # pause_after_connect = "0ms" +# +# ## Pause between read requests sent to the device. +# ## This might be necessary for (slow) serial devices. +# # pause_between_requests = "0ms" +# +# ## Close the connection after every gather cycle. +# ## Usually the plugin closes the connection after a certain idle-timeout, +# ## however, if you query a device with limited simultaneous connectivity +# ## (e.g. serial devices) from multiple instances you might want to only +# ## stay connected during gather and disconnect afterwards. +# # close_connection_after_gather = false +# +# ## Force the plugin to read each field in a separate request. +# ## This might be necessary for devices not conforming to the spec, +# ## see https://github.com/influxdata/telegraf/issues/12071. +# # one_request_per_field = false +# +# ## Enforce the starting address to be zero for the first request on +# ## coil registers. This is necessary for some devices see +# ## https://github.com/influxdata/telegraf/issues/8905 + + +# # Read metrics and status information about processes managed by Monit +# [[inputs.monit]] +# ## Monit HTTPD address +# address = "http://127.0.0.1:2812" +# +# ## Username and Password for Monit +# # username = "" +# # password = "" +# +# ## Amount of time allowed to complete the HTTP request +# # timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Aggregates the contents of multiple files into a single point +# [[inputs.multifile]] +# ## Base directory where telegraf will look for files. +# ## Omit this option to use absolute paths. +# base_dir = "/sys/bus/i2c/devices/1-0076/iio:device0" +# +# ## If true discard all data when a single file can't be read. +# ## Else, Telegraf omits the field generated from this file. +# # fail_early = true +# +# ## Files to parse each interval. +# [[inputs.multifile.file]] +# file = "in_pressure_input" +# dest = "pressure" +# conversion = "float" +# [[inputs.multifile.file]] +# file = "in_temp_input" +# dest = "temperature" +# conversion = "float(3)" +# [[inputs.multifile.file]] +# file = "in_humidityrelative_input" +# dest = "humidityrelative" +# conversion = "float(3)" + + +# # Read metrics from one or many mysql servers +# [[inputs.mysql]] +# ## specify servers via a url matching: +# ## [username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify|custom]] +# ## see https://github.com/go-sql-driver/mysql#dsn-data-source-name +# ## e.g. +# ## servers = ["user:passwd@tcp(127.0.0.1:3306)/?tls=false"] +# ## servers = ["user@tcp(127.0.0.1:3306)/?tls=false"] +# # +# ## If no servers are specified, then localhost is used as the host. +# servers = ["tcp(127.0.0.1:3306)/"] +# +# ## Selects the metric output format. +# ## +# ## This option exists to maintain backwards compatibility, if you have +# ## existing metrics do not set or change this value until you are ready to +# ## migrate to the new format. +# ## +# ## If you do not have existing metrics from this plugin set to the latest +# ## version. +# ## +# ## Telegraf >=1.6: metric_version = 2 +# ## <1.6: metric_version = 1 (or unset) +# metric_version = 2 +# +# ## if the list is empty, then metrics are gathered from all database tables +# # table_schema_databases = [] +# +# ## gather metrics from INFORMATION_SCHEMA.TABLES for databases provided +# ## in the list above +# # gather_table_schema = false +# +# ## gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST +# # gather_process_list = false +# +# ## gather user statistics from INFORMATION_SCHEMA.USER_STATISTICS +# # gather_user_statistics = false +# +# ## gather auto_increment columns and max values from information schema +# # gather_info_schema_auto_inc = false +# +# ## gather metrics from INFORMATION_SCHEMA.INNODB_METRICS +# # gather_innodb_metrics = false +# +# ## gather metrics from all channels from SHOW SLAVE STATUS command output +# # gather_all_slave_channels = false +# +# ## gather metrics from SHOW SLAVE STATUS command output +# # gather_slave_status = false +# +# ## use SHOW ALL SLAVES STATUS command output for MariaDB +# # mariadb_dialect = false +# +# ## gather metrics from SHOW BINARY LOGS command output +# # gather_binary_logs = false +# +# ## gather metrics from SHOW GLOBAL VARIABLES command output +# # gather_global_variables = true +# +# ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_TABLE +# # gather_table_io_waits = false +# +# ## gather metrics from PERFORMANCE_SCHEMA.TABLE_LOCK_WAITS +# # gather_table_lock_waits = false +# +# ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_INDEX_USAGE +# # gather_index_io_waits = false +# +# ## gather metrics from PERFORMANCE_SCHEMA.EVENT_WAITS +# # gather_event_waits = false +# +# ## gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME +# # gather_file_events_stats = false +# +# ## gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST +# # gather_perf_events_statements = false +# # +# ## gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_ACCOUNT_BY_EVENT_NAME +# # gather_perf_sum_per_acc_per_event = false +# # +# ## list of events to be gathered for gather_perf_sum_per_acc_per_event +# ## in case of empty list all events will be gathered +# # perf_summary_events = [] +# +# ## the limits for metrics form perf_events_statements +# # perf_events_statements_digest_text_limit = 120 +# # perf_events_statements_limit = 250 +# # perf_events_statements_time_limit = 86400 +# +# ## Some queries we may want to run less often (such as SHOW GLOBAL VARIABLES) +# ## example: interval_slow = "30m" +# # interval_slow = "" +# +# ## Optional TLS Config (used if tls=custom parameter specified in server uri) +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Provides metrics about the state of a NATS server +# # This plugin does NOT support FreeBSD +# [[inputs.nats]] +# ## The address of the monitoring endpoint of the NATS server +# server = "http://localhost:8222" +# +# ## Maximum time to receive response +# # response_timeout = "5s" + + +# # Neptune Apex data collector +# [[inputs.neptune_apex]] +# ## The Neptune Apex plugin reads the publicly available status.xml data from a local Apex. +# ## Measurements will be logged under "apex". +# +# ## The base URL of the local Apex(es). If you specify more than one server, they will +# ## be differentiated by the "source" tag. +# servers = [ +# "http://apex.local", +# ] +# +# ## The response_timeout specifies how long to wait for a reply from the Apex. +# #response_timeout = "5s" +# + + +# # Gather metrics about network interfaces +# [[inputs.net]] +# ## By default, telegraf gathers stats from any up interface (excluding loopback) +# ## Setting interfaces will tell it to gather these explicit interfaces, +# ## regardless of status. When specifying an interface, glob-style +# ## patterns are also supported. +# ## +# # interfaces = ["eth*", "enp0s[0-1]", "lo"] +# ## +# ## On linux systems telegraf also collects protocol stats. +# ## Setting ignore_protocol_stats to true will skip reporting of protocol metrics. +# ## +# ## DEPRECATION NOTICE: A value of 'false' is deprecated and discouraged! +# ## Please set this to `true` and use the 'inputs.nstat' +# ## plugin instead. +# # ignore_protocol_stats = false + + +# # Collect response time of a TCP or UDP connection +# [[inputs.net_response]] +# ## Protocol, must be "tcp" or "udp" +# ## NOTE: because the "udp" protocol does not respond to requests, it requires +# ## a send/expect string pair (see below). +# protocol = "tcp" +# ## Server address (default localhost) +# address = "localhost:80" +# +# ## Set timeout +# # timeout = "1s" +# +# ## Set read timeout (only used if expecting a response) +# # read_timeout = "1s" +# +# ## The following options are required for UDP checks. For TCP, they are +# ## optional. The plugin will send the given string to the server and then +# ## expect to receive the given 'expect' string back. +# ## string sent to the server +# # send = "ssh" +# ## expected string in answer +# # expect = "ssh" +# +# ## Uncomment to remove deprecated fields; recommended for new deploys +# # fielddrop = ["result_type", "string_found"] + + +# # Read TCP metrics such as established, time wait and sockets counts. +# [[inputs.netstat]] +# # no configuration + + +# # Read per-mount NFS client metrics from /proc/self/mountstats +# [[inputs.nfsclient]] +# ## Read more low-level metrics (optional, defaults to false) +# # fullstat = false +# +# ## List of mounts to explictly include or exclude (optional) +# ## The pattern (Go regexp) is matched against the mount point (not the +# ## device being mounted). If include_mounts is set, all mounts are ignored +# ## unless present in the list. If a mount is listed in both include_mounts +# ## and exclude_mounts, it is excluded. Go regexp patterns can be used. +# # include_mounts = [] +# # exclude_mounts = [] +# +# ## List of operations to include or exclude from collecting. This applies +# ## only when fullstat=true. Symantics are similar to {include,exclude}_mounts: +# ## the default is to collect everything; when include_operations is set, only +# ## those OPs are collected; when exclude_operations is set, all are collected +# ## except those listed. If include and exclude are set, the OP is excluded. +# ## See /proc/self/mountstats for a list of valid operations; note that +# ## NFSv3 and NFSv4 have different lists. While it is not possible to +# ## have different include/exclude lists for NFSv3/4, unused elements +# ## in the list should be okay. It is possible to have different lists +# ## for different mountpoints: use mulitple [[input.nfsclient]] stanzas, +# ## with their own lists. See "include_mounts" above, and be careful of +# ## duplicate metrics. +# # include_operations = [] +# # exclude_operations = [] + + +# # Read Nginx's basic status information (ngx_http_stub_status_module) +# [[inputs.nginx]] +# ## An array of Nginx stub_status URI to gather stats. +# urls = ["http://localhost/server_status"] +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## HTTP response timeout (default: 5s) +# response_timeout = "5s" + + +# # Read Nginx Plus' advanced status information +# [[inputs.nginx_plus]] +# ## An array of Nginx status URIs to gather stats. +# urls = ["http://localhost/status"] +# +# # HTTP response timeout (default: 5s) +# response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read Nginx Plus API advanced status information +# [[inputs.nginx_plus_api]] +# ## An array of Nginx API URIs to gather stats. +# urls = ["http://localhost/api"] +# # Nginx API version, default: 3 +# # api_version = 3 +# +# # HTTP response timeout (default: 5s) +# response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read Nginx virtual host traffic status module information (nginx-module-sts) +# [[inputs.nginx_sts]] +# ## An array of ngx_http_status_module or status URI to gather stats. +# urls = ["http://localhost/status"] +# +# ## HTTP response timeout (default: 5s) +# response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read nginx_upstream_check module status information (https://github.com/yaoweibin/nginx_upstream_check_module) +# [[inputs.nginx_upstream_check]] +# ## An URL where Nginx Upstream check module is enabled +# ## It should be set to return a JSON formatted response +# url = "http://127.0.0.1/status?format=json" +# +# ## HTTP method +# # method = "GET" +# +# ## Optional HTTP headers +# # headers = {"X-Special-Header" = "Special-Value"} +# +# ## Override HTTP "Host" header +# # host_header = "check.example.com" +# +# ## Timeout for HTTP requests +# timeout = "5s" +# +# ## Optional HTTP Basic Auth credentials +# # username = "username" +# # password = "pa$$word" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read Nginx virtual host traffic status module information (nginx-module-vts) +# [[inputs.nginx_vts]] +# ## An array of ngx_http_status_module or status URI to gather stats. +# urls = ["http://localhost/status"] +# +# ## HTTP response timeout (default: 5s) +# response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read metrics from the Nomad API +# [[inputs.nomad]] +# ## URL for the Nomad agent +# # url = "http://127.0.0.1:4646" +# +# ## Set response_timeout (default 5 seconds) +# # response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = /path/to/cafile +# # tls_cert = /path/to/certfile +# # tls_key = /path/to/keyfile + + +# # A plugin to collect stats from the NSD DNS resolver +# [[inputs.nsd]] +# ## Address of server to connect to, optionally ':port'. Defaults to the +# ## address in the nsd config file. +# server = "127.0.0.1:8953" +# +# ## If running as a restricted user you can prepend sudo for additional access: +# # use_sudo = false +# +# ## The default location of the nsd-control binary can be overridden with: +# # binary = "/usr/sbin/nsd-control" +# +# ## The default location of the nsd config file can be overridden with: +# # config_file = "/etc/nsd/nsd.conf" +# +# ## The default timeout of 1s can be overridden with: +# # timeout = "1s" + + +# # Read NSQ topic and channel statistics. +# [[inputs.nsq]] +# ## An array of NSQD HTTP API endpoints +# endpoints = ["http://localhost:4151"] +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Collect kernel snmp counters and network interface statistics +# [[inputs.nstat]] +# ## file paths for proc files. If empty default paths will be used: +# ## /proc/net/netstat, /proc/net/snmp, /proc/net/snmp6 +# ## These can also be overridden with env variables, see README. +# proc_net_netstat = "/proc/net/netstat" +# proc_net_snmp = "/proc/net/snmp" +# proc_net_snmp6 = "/proc/net/snmp6" +# ## dump metrics with 0 values too +# dump_zeros = true + + +# # Get standard NTP query metrics, requires ntpq executable. +# [[inputs.ntpq]] +# ## Servers to query with ntpq. +# ## If no server is given, the local machine is queried. +# # servers = [] +# +# ## If false, set the -n ntpq flag. Can reduce metric gather time. +# ## DEPRECATED since 1.24.0: add '-n' to 'options' instead to skip DNS lookup +# # dns_lookup = true +# +# ## Options to pass to the ntpq command. +# # options = "-p" +# +# ## Output format for the 'reach' field. +# ## Available values are +# ## octal -- output as is in octal representation e.g. 377 (default) +# ## decimal -- convert value to decimal representation e.g. 371 -> 249 +# ## count -- count the number of bits in the value. This represents +# ## the number of successful reaches, e.g. 37 -> 5 +# ## ratio -- output the ratio of successful attempts e.g. 37 -> 5/8 = 0.625 +# # reach_format = "octal" + + +# # Pulls statistics from nvidia GPUs attached to the host +# [[inputs.nvidia_smi]] +# ## Optional: path to nvidia-smi binary, defaults "/usr/bin/nvidia-smi" +# ## We will first try to locate the nvidia-smi binary with the explicitly specified value (or default value), +# ## if it is not found, we will try to locate it on PATH(exec.LookPath), if it is still not found, an error will be returned +# # bin_path = "/usr/bin/nvidia-smi" +# +# ## Optional: timeout for GPU polling +# # timeout = "5s" + + +# # Retrieve data from OPCUA devices +# [[inputs.opcua]] +# ## Metric name +# # name = "opcua" +# # +# ## OPC UA Endpoint URL +# # endpoint = "opc.tcp://localhost:4840" +# # +# ## Maximum time allowed to establish a connect to the endpoint. +# # connect_timeout = "10s" +# # +# ## Maximum time allowed for a request over the established connection. +# # request_timeout = "5s" +# # +# ## Security policy, one of "None", "Basic128Rsa15", "Basic256", +# ## "Basic256Sha256", or "auto" +# # security_policy = "auto" +# # +# ## Security mode, one of "None", "Sign", "SignAndEncrypt", or "auto" +# # security_mode = "auto" +# # +# ## Path to cert.pem. Required when security mode or policy isn't "None". +# ## If cert path is not supplied, self-signed cert and key will be generated. +# # certificate = "/etc/telegraf/cert.pem" +# # +# ## Path to private key.pem. Required when security mode or policy isn't "None". +# ## If key path is not supplied, self-signed cert and key will be generated. +# # private_key = "/etc/telegraf/key.pem" +# # +# ## Authentication Method, one of "Certificate", "UserName", or "Anonymous". To +# ## authenticate using a specific ID, select 'Certificate' or 'UserName' +# # auth_method = "Anonymous" +# # +# ## Username. Required for auth_method = "UserName" +# # username = "" +# # +# ## Password. Required for auth_method = "UserName" +# # password = "" +# # +# ## Option to select the metric timestamp to use. Valid options are: +# ## "gather" -- uses the time of receiving the data in telegraf +# ## "server" -- uses the timestamp provided by the server +# ## "source" -- uses the timestamp provided by the source +# # timestamp = "gather" +# # +# ## Node ID configuration +# ## name - field name to use in the output +# ## namespace - OPC UA namespace of the node (integer value 0 thru 3) +# ## identifier_type - OPC UA ID type (s=string, i=numeric, g=guid, b=opaque) +# ## identifier - OPC UA ID (tag as shown in opcua browser) +# ## tags - extra tags to be added to the output metric (optional); deprecated in 1.25.0; use default_tags +# ## default_tags - extra tags to be added to the output metric (optional) +# ## +# ## Use either the inline notation or the bracketed notation, not both. +# # +# ## Inline notation (default_tags not supported yet) +# # nodes = [ +# # {name="", namespace="", identifier_type="", identifier="", tags=[["tag1", "value1"], ["tag2", "value2"]}, +# # {name="", namespace="", identifier_type="", identifier=""}, +# # ] +# # +# ## Bracketed notation +# # [[inputs.opcua.nodes]] +# # name = "node1" +# # namespace = "" +# # identifier_type = "" +# # identifier = "" +# # default_tags = { tag1 = "value1", tag2 = "value2" } +# # +# # [[inputs.opcua.nodes]] +# # name = "node2" +# # namespace = "" +# # identifier_type = "" +# # identifier = "" +# # +# ## Node Group +# ## Sets defaults so they aren't required in every node. +# ## Default values can be set for: +# ## * Metric name +# ## * OPC UA namespace +# ## * Identifier +# ## * Default tags +# ## +# ## Multiple node groups are allowed +# #[[inputs.opcua.group]] +# ## Group Metric name. Overrides the top level name. If unset, the +# ## top level name is used. +# # name = +# # +# ## Group default namespace. If a node in the group doesn't set its +# ## namespace, this is used. +# # namespace = +# # +# ## Group default identifier type. If a node in the group doesn't set its +# ## namespace, this is used. +# # identifier_type = +# # +# ## Default tags that are applied to every node in this group. Can be +# ## overwritten in a node by setting a different value for the tag name. +# ## example: default_tags = { tag1 = "value1" } +# # default_tags = {} +# # +# ## Node ID Configuration. Array of nodes with the same settings as above. +# ## Use either the inline notation or the bracketed notation, not both. +# # +# ## Inline notation (default_tags not supported yet) +# # nodes = [ +# # {name="node1", namespace="", identifier_type="", identifier=""}, +# # {name="node2", namespace="", identifier_type="", identifier=""}, +# #] +# # +# ## Bracketed notation +# # [[inputs.opcua.group.nodes]] +# # name = "node1" +# # namespace = "" +# # identifier_type = "" +# # identifier = "" +# # default_tags = { tag1 = "override1", tag2 = "value2" } +# # +# # [[inputs.opcua.group.nodes]] +# # name = "node2" +# # namespace = "" +# # identifier_type = "" +# # identifier = "" +# +# ## Enable workarounds required by some devices to work correctly +# # [inputs.opcua.workarounds] +# ## Set additional valid status codes, StatusOK (0x0) is always considered valid +# # additional_valid_status_codes = ["0xC0"] +# +# # [inputs.opcua.request_workarounds] +# ## Use unregistered reads instead of registered reads +# # use_unregistered_reads = false + + +# # OpenLDAP cn=Monitor plugin +# [[inputs.openldap]] +# host = "localhost" +# port = 389 +# +# # ldaps, starttls, or no encryption. default is an empty string, disabling all encryption. +# # note that port will likely need to be changed to 636 for ldaps +# # valid options: "" | "starttls" | "ldaps" +# tls = "" +# +# # skip peer certificate verification. Default is false. +# insecure_skip_verify = false +# +# # Path to PEM-encoded Root certificate to use to verify server certificate +# tls_ca = "/etc/ssl/certs.pem" +# +# # dn/password to bind with. If bind_dn is empty, an anonymous bind is performed. +# bind_dn = "" +# bind_password = "" +# +# # reverse metric names so they sort more naturally +# # Defaults to false if unset, but is set to true when generating a new config +# reverse_metric_names = true + + +# # Get standard NTP query metrics from OpenNTPD. +# [[inputs.openntpd]] +# ## Run ntpctl binary with sudo. +# # use_sudo = false +# +# ## Location of the ntpctl binary. +# # binary = "/usr/sbin/ntpctl" +# +# ## Maximum time the ntpctl binary is allowed to run. +# # timeout = "5ms" + + +# # Derive metrics from aggregating OpenSearch query results +# [[inputs.opensearch_query]] +# ## OpenSearch cluster endpoint(s). Multiple urls can be specified as part +# ## of the same cluster. Only one succesful call will be made per interval. +# urls = [ "https://node1.os.example.com:9200" ] # required. +# +# ## OpenSearch client timeout, defaults to "5s". +# # timeout = "5s" +# +# ## HTTP basic authentication details +# # username = "admin" +# # password = "admin" +# +# ## Skip TLS validation. Useful for local testing and self-signed certs. +# # insecure_skip_verify = false +# +# [[inputs.opensearch_query.aggregation]] +# ## measurement name for the results of the aggregation query +# measurement_name = "measurement" +# +# ## OpenSearch index or index pattern to search +# index = "index-*" +# +# ## The date/time field in the OpenSearch index (mandatory). +# date_field = "@timestamp" +# +# ## If the field used for the date/time field in OpenSearch is also using +# ## a custom date/time format it may be required to provide the format to +# ## correctly parse the field. +# ## +# ## If using one of the built in OpenSearch formats this is not required. +# ## https://opensearch.org/docs/2.4/opensearch/supported-field-types/date/#built-in-formats +# # date_field_custom_format = "" +# +# ## Time window to query (eg. "1m" to query documents from last minute). +# ## Normally should be set to same as collection interval +# query_period = "1m" +# +# ## Lucene query to filter results +# # filter_query = "*" +# +# ## Fields to aggregate values (must be numeric fields) +# # metric_fields = ["metric"] +# +# ## Aggregation function to use on the metric fields +# ## Must be set if 'metric_fields' is set +# ## Valid values are: avg, sum, min, max, sum +# # metric_function = "avg" +# +# ## Fields to be used as tags. Must be text, non-analyzed fields. Metric +# ## aggregations are performed per tag +# # tags = ["field.keyword", "field2.keyword"] +# +# ## Set to true to not ignore documents when the tag(s) above are missing +# # include_missing_tag = false +# +# ## String value of the tag when the tag does not exist +# ## Required when include_missing_tag is true +# # missing_tag_value = "null" + + +# # A plugin to collect stats from Opensmtpd - a validating, recursive, and caching DNS resolver +# [[inputs.opensmtpd]] +# ## If running as a restricted user you can prepend sudo for additional access: +# #use_sudo = false +# +# ## The default location of the smtpctl binary can be overridden with: +# binary = "/usr/sbin/smtpctl" +# +# # The default timeout of 1s can be overridden with: +# #timeout = "1s" + + +# # Read current weather and forecasts data from openweathermap.org +# [[inputs.openweathermap]] +# ## OpenWeatherMap API key. +# app_id = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" +# +# ## City ID's to collect weather data from. +# city_id = ["5391959"] +# +# ## Language of the description field. Can be one of "ar", "bg", +# ## "ca", "cz", "de", "el", "en", "fa", "fi", "fr", "gl", "hr", "hu", +# ## "it", "ja", "kr", "la", "lt", "mk", "nl", "pl", "pt", "ro", "ru", +# ## "se", "sk", "sl", "es", "tr", "ua", "vi", "zh_cn", "zh_tw" +# # lang = "en" +# +# ## APIs to fetch; can contain "weather" or "forecast". +# fetch = ["weather", "forecast"] +# +# ## OpenWeatherMap base URL +# # base_url = "https://api.openweathermap.org/" +# +# ## Timeout for HTTP response. +# # response_timeout = "5s" +# +# ## Preferred unit system for temperature and wind speed. Can be one of +# ## "metric", "imperial", or "standard". +# # units = "metric" +# +# ## Query interval; OpenWeatherMap weather data is updated every 10 +# ## minutes. +# interval = "10m" + + +# # P4Runtime telemetry input plugin +# [[inputs.p4runtime]] +# ## Define the endpoint of P4Runtime gRPC server to collect metrics. +# # endpoint = "127.0.0.1:9559" +# ## Set DeviceID required for Client Arbitration. +# ## https://p4.org/p4-spec/p4runtime/main/P4Runtime-Spec.html#sec-client-arbitration-and-controller-replication +# # device_id = 1 +# ## Filter counters by their names that should be observed. +# ## Example: counter_names_include=["ingressCounter", "egressCounter"] +# # counter_names_include = [] +# +# ## Optional TLS Config. +# ## Enable client-side TLS and define CA to authenticate the device. +# # enable_tls = false +# # tls_ca = "/etc/telegraf/ca.crt" +# ## Set minimal TLS version to accept by the client. +# # tls_min_version = "TLS12" +# ## Use TLS but skip chain & host verification. +# # insecure_skip_verify = true +# +# ## Define client-side TLS certificate & key to authenticate to the device. +# # tls_cert = "/etc/telegraf/client.crt" +# # tls_key = "/etc/telegraf/client.key" + + +# # Read metrics of passenger using passenger-status +# [[inputs.passenger]] +# ## Path of passenger-status. +# ## +# ## Plugin gather metric via parsing XML output of passenger-status +# ## More information about the tool: +# ## https://www.phusionpassenger.com/library/admin/apache/overall_status_report.html +# ## +# ## If no path is specified, then the plugin simply execute passenger-status +# ## hopefully it can be found in your PATH +# command = "passenger-status -v --show=xml" + + +# # Gather counters from PF +# [[inputs.pf]] +# ## PF require root access on most systems. +# ## Setting 'use_sudo' to true will make use of sudo to run pfctl. +# ## Users must configure sudo to allow telegraf user to run pfctl with no password. +# ## pfctl can be restricted to only list command "pfctl -s info". +# use_sudo = false + + +# # Read metrics of phpfpm, via HTTP status page or socket +# [[inputs.phpfpm]] +# ## An array of addresses to gather stats about. Specify an ip or hostname +# ## with optional port and path +# ## +# ## Plugin can be configured in three modes (either can be used): +# ## - http: the URL must start with http:// or https://, ie: +# ## "http://localhost/status" +# ## "http://192.168.130.1/status?full" +# ## +# ## - unixsocket: path to fpm socket, ie: +# ## "/var/run/php5-fpm.sock" +# ## or using a custom fpm status path: +# ## "/var/run/php5-fpm.sock:fpm-custom-status-path" +# ## glob patterns are also supported: +# ## "/var/run/php*.sock" +# ## +# ## - fcgi: the URL must start with fcgi:// or cgi://, and port must be present, ie: +# ## "fcgi://10.0.0.12:9000/status" +# ## "cgi://10.0.10.12:9001/status" +# ## +# ## Example of multiple gathering from local socket and remote host +# ## urls = ["http://192.168.1.20/status", "/tmp/fpm.sock"] +# urls = ["http://localhost/status"] +# +# ## Duration allowed to complete HTTP requests. +# # timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Ping given url(s) and return statistics +# [[inputs.ping]] +# ## Hosts to send ping packets to. +# urls = ["example.org"] +# +# ## Method used for sending pings, can be either "exec" or "native". When set +# ## to "exec" the systems ping command will be executed. When set to "native" +# ## the plugin will send pings directly. +# ## +# ## While the default is "exec" for backwards compatibility, new deployments +# ## are encouraged to use the "native" method for improved compatibility and +# ## performance. +# # method = "exec" +# +# ## Number of ping packets to send per interval. Corresponds to the "-c" +# ## option of the ping command. +# # count = 1 +# +# ## Time to wait between sending ping packets in seconds. Operates like the +# ## "-i" option of the ping command. +# # ping_interval = 1.0 +# +# ## If set, the time to wait for a ping response in seconds. Operates like +# ## the "-W" option of the ping command. +# # timeout = 1.0 +# +# ## If set, the total ping deadline, in seconds. Operates like the -w option +# ## of the ping command. +# # deadline = 10 +# +# ## Interface or source address to send ping from. Operates like the -I or -S +# ## option of the ping command. +# # interface = "" +# +# ## Percentiles to calculate. This only works with the native method. +# # percentiles = [50, 95, 99] +# +# ## Specify the ping executable binary. +# # binary = "ping" +# +# ## Arguments for ping command. When arguments is not empty, the command from +# ## the binary option will be used and other options (ping_interval, timeout, +# ## etc) will be ignored. +# # arguments = ["-c", "3"] +# +# ## Use only IPv6 addresses when resolving a hostname. +# # ipv6 = false +# +# ## Number of data bytes to be sent. Corresponds to the "-s" +# ## option of the ping command. This only works with the native method. +# # size = 56 + + +# # Measure postfix queue statistics +# # This plugin ONLY supports non-Windows +# [[inputs.postfix]] +# ## Postfix queue directory. If not provided, telegraf will try to use +# ## 'postconf -h queue_directory' to determine it. +# # queue_directory = "/var/spool/postfix" + + +# # Read metrics from one or many PowerDNS servers +# [[inputs.powerdns]] +# # An array of sockets to gather stats about. +# # Specify a path to unix socket. +# # +# # If no servers are specified, then '/var/run/pdns.controlsocket' is used as the path. +# unix_sockets = ["/var/run/pdns.controlsocket"] + + +# # Read metrics from one or many PowerDNS Recursor servers +# [[inputs.powerdns_recursor]] +# ## Path to the Recursor control socket. +# unix_sockets = ["/var/run/pdns_recursor.controlsocket"] +# +# ## Directory to create receive socket. This default is likely not writable, +# ## please reference the full plugin documentation for a recommended setup. +# # socket_dir = "/var/run/" +# ## Socket permissions for the receive socket. +# # socket_mode = "0666" +# +# ## The version of the PowerDNS control protocol to use. You will have to +# ## change this based on your PowerDNS Recursor version, see below: +# ## Version 1: PowerDNS <4.5.0 +# ## Version 2: PowerDNS 4.5.0 - 4.5.11 +# ## Version 3: PowerDNS >=4.6.0 +# ## By default this is set to 1. +# # control_protocol_version = 1 +# + + +# # Monitor process cpu and memory usage +# [[inputs.procstat]] +# ## PID file to monitor process +# pid_file = "/var/run/nginx.pid" +# ## executable name (ie, pgrep ) +# # exe = "nginx" +# ## pattern as argument for pgrep (ie, pgrep -f ) +# # pattern = "nginx" +# ## user as argument for pgrep (ie, pgrep -u ) +# # user = "nginx" +# ## Systemd unit name, supports globs when include_systemd_children is set to true +# # systemd_unit = "nginx.service" +# # include_systemd_children = false +# ## CGroup name or path, supports globs +# # cgroup = "systemd/system.slice/nginx.service" +# +# ## Windows service name +# # win_service = "" +# +# ## override for process_name +# ## This is optional; default is sourced from /proc//status +# # process_name = "bar" +# +# ## Field name prefix +# # prefix = "" +# +# ## When true add the full cmdline as a tag. +# # cmdline_tag = false +# +# ## Mode to use when calculating CPU usage. Can be one of 'solaris' or 'irix'. +# # mode = "irix" +# +# ## Add the PID as a tag instead of as a field. When collecting multiple +# ## processes with otherwise matching tags this setting should be enabled to +# ## ensure each process has a unique identity. +# ## +# ## Enabling this option may result in a large number of series, especially +# ## when processes have a short lifetime. +# # pid_tag = false +# +# ## Method to use when finding process IDs. Can be one of 'pgrep', or +# ## 'native'. The pgrep finder calls the pgrep executable in the PATH while +# ## the native finder performs the search directly in a manor dependent on the +# ## platform. Default is 'pgrep' +# # pid_finder = "pgrep" + + +# # Provides metrics from Proxmox nodes (Proxmox Virtual Environment > 6.2). +# [[inputs.proxmox]] +# ## API connection configuration. The API token was introduced in Proxmox v6.2. Required permissions for user and token: PVEAuditor role on /. +# base_url = "https://localhost:8006/api2/json" +# api_token = "USER@REALM!TOKENID=UUID" +# +# ## Node name, defaults to OS hostname +# ## Unless Telegraf is on the same host as Proxmox, setting this is required +# ## for Telegraf to successfully connect to Proxmox. If not on the same host, +# ## leaving this empty will often lead to a "search domain is not set" error. +# # node_name = "" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# insecure_skip_verify = false +# +# # HTTP response timeout (default: 5s) +# response_timeout = "5s" + + +# # Reads last_run_summary.yaml file and converts to measurements +# [[inputs.puppetagent]] +# ## Location of puppet last run summary file +# location = "/var/lib/puppet/state/last_run_summary.yaml" + + +# # Reads metrics from RabbitMQ servers via the Management Plugin +# [[inputs.rabbitmq]] +# ## Management Plugin url. (default: http://localhost:15672) +# # url = "http://localhost:15672" +# ## Tag added to rabbitmq_overview series; deprecated: use tags +# # name = "rmq-server-1" +# ## Credentials +# # username = "guest" +# # password = "guest" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Optional request timeouts +# ## +# ## ResponseHeaderTimeout, if non-zero, specifies the amount of time to wait +# ## for a server's response headers after fully writing the request. +# # header_timeout = "3s" +# ## +# ## client_timeout specifies a time limit for requests made by this client. +# ## Includes connection time, any redirects, and reading the response body. +# # client_timeout = "4s" +# +# ## A list of nodes to gather as the rabbitmq_node measurement. If not +# ## specified, metrics for all nodes are gathered. +# # nodes = ["rabbit@node1", "rabbit@node2"] +# +# ## A list of queues to gather as the rabbitmq_queue measurement. If not +# ## specified, metrics for all queues are gathered. +# ## Deprecated in 1.6: Use queue_name_include instead. +# # queues = ["telegraf"] +# +# ## A list of exchanges to gather as the rabbitmq_exchange measurement. If not +# ## specified, metrics for all exchanges are gathered. +# # exchanges = ["telegraf"] +# +# ## Metrics to include and exclude. Globs accepted. +# ## Note that an empty array for both will include all metrics +# ## Currently the following metrics are supported: "exchange", "federation", "node", "overview", "queue" +# # metric_include = [] +# # metric_exclude = [] +# +# ## Queues to include and exclude. Globs accepted. +# ## Note that an empty array for both will include all queues +# # queue_name_include = [] +# # queue_name_exclude = [] +# +# ## Federation upstreams to include and exclude specified as an array of glob +# ## pattern strings. Federation links can also be limited by the queue and +# ## exchange filters. +# # federation_upstream_include = [] +# # federation_upstream_exclude = [] + + +# [[inputs.radius]] +# ## An array of Server IPs and ports to gather from. If none specified, defaults to localhost. +# servers = ["127.0.0.1:1812","hostname.domain.com:1812"] +# +# ## Credentials for radius authentication. +# username = "myuser" +# password = "mypassword" +# secret = "mysecret" +# +# ## Maximum time to receive response. +# # response_timeout = "5s" + + +# # Read raindrops stats (raindrops - real-time stats for preforking Rack servers) +# [[inputs.raindrops]] +# ## An array of raindrops middleware URI to gather stats. +# urls = ["http://localhost:8080/_raindrops"] + + +# # Reads metrics from RavenDB servers via the Monitoring Endpoints +# [[inputs.ravendb]] +# ## Node URL and port that RavenDB is listening on. By default, +# ## attempts to connect securely over HTTPS, however, if the user +# ## is running a local unsecure development cluster users can use +# ## HTTP via a URL like "http://localhost:8080" +# url = "https://localhost:4433" +# +# ## RavenDB X509 client certificate setup +# # tls_cert = "/etc/telegraf/raven.crt" +# # tls_key = "/etc/telegraf/raven.key" +# +# ## Optional request timeout +# ## +# ## Timeout, specifies the amount of time to wait +# ## for a server's response headers after fully writing the request and +# ## time limit for requests made by this client +# # timeout = "5s" +# +# ## List of statistics which are collected +# # At least one is required +# # Allowed values: server, databases, indexes, collections +# # +# # stats_include = ["server", "databases", "indexes", "collections"] +# +# ## List of db where database stats are collected +# ## If empty, all db are concerned +# # db_stats_dbs = [] +# +# ## List of db where index status are collected +# ## If empty, all indexes from all db are concerned +# # index_stats_dbs = [] +# +# ## List of db where collection status are collected +# ## If empty, all collections from all db are concerned +# # collection_stats_dbs = [] + + +# # Read CPU, Fans, Powersupply and Voltage metrics of hardware server through redfish APIs +# [[inputs.redfish]] +# ## Redfish API Base URL. +# address = "https://127.0.0.1:5000" +# +# ## Credentials for the Redfish API. +# username = "root" +# password = "password123456" +# +# ## System Id to collect data for in Redfish APIs. +# computer_system_id="System.Embedded.1" +# +# ## Tag sets allow you to include redfish OData link parent data +# ## For Example. +# ## Thermal data is an OData link with parent Chassis which has a link of Location. +# ## For more info see the Redfish Resource and Schema Guide at DMTFs website. +# ## Available sets are: "chassis.location" and "chassis" +# # include_tag_sets = ["chassis.location"] +# +# ## Amount of time allowed to complete the HTTP request +# # timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read metrics from one or many redis-sentinel servers +# [[inputs.redis_sentinel]] +# ## specify servers via a url matching: +# ## [protocol://][username:password]@address[:port] +# ## e.g. +# ## tcp://localhost:26379 +# ## tcp://username:password@192.168.99.100 +# ## unix:///var/run/redis-sentinel.sock +# ## +# ## If no servers are specified, then localhost is used as the host. +# ## If no port is specified, 26379 is used +# # servers = ["tcp://localhost:26379"] +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = true + + +# # Read metrics from one or many RethinkDB servers +# [[inputs.rethinkdb]] +# ## An array of URI to gather stats about. Specify an ip or hostname +# ## with optional port add password. ie, +# ## rethinkdb://user:auth_key@10.10.3.30:28105, +# ## rethinkdb://10.10.3.33:18832, +# ## 10.0.0.1:10000, etc. +# servers = ["127.0.0.1:28015"] +# +# ## If you use actual rethinkdb of > 2.3.0 with username/password authorization, +# ## protocol have to be named "rethinkdb2" - it will use 1_0 H. +# # servers = ["rethinkdb2://username:password@127.0.0.1:28015"] +# +# ## If you use older versions of rethinkdb (<2.2) with auth_key, protocol +# ## have to be named "rethinkdb". +# # servers = ["rethinkdb://username:auth_key@127.0.0.1:28015"] + + +# # Read metrics one or many Riak servers +# [[inputs.riak]] +# # Specify a list of one or more riak http servers +# servers = ["http://localhost:8098"] + + +# # Read API usage and limits for a Salesforce organisation +# [[inputs.salesforce]] +# ## specify your credentials +# ## +# username = "your_username" +# password = "your_password" +# ## +# ## (optional) security token +# # security_token = "your_security_token" +# ## +# ## (optional) environment type (sandbox or production) +# ## default is: production +# ## +# # environment = "production" +# ## +# ## (optional) API version (default: "39.0") +# ## +# # version = "39.0" + + +# # Monitor sensors, requires lm-sensors package +# # This plugin ONLY supports Linux +# [[inputs.sensors]] +# ## Remove numbers from field names. +# ## If true, a field name like 'temp1_input' will be changed to 'temp_input'. +# # remove_numbers = true +# +# ## Timeout is the maximum amount of time that the sensors command can run. +# # timeout = "5s" + + +# # Get slab statistics from procfs +# # This plugin ONLY supports Linux +# [[inputs.slab]] +# # no configuration - please see the plugin's README for steps to configure +# # sudo properly + + +# # Read metrics from storage devices supporting S.M.A.R.T. +# [[inputs.smart]] +# ## Optionally specify the path to the smartctl executable +# # path_smartctl = "/usr/bin/smartctl" +# +# ## Optionally specify the path to the nvme-cli executable +# # path_nvme = "/usr/bin/nvme" +# +# ## Optionally specify if vendor specific attributes should be propagated for NVMe disk case +# ## ["auto-on"] - automatically find and enable additional vendor specific disk info +# ## ["vendor1", "vendor2", ...] - e.g. "Intel" enable additional Intel specific disk info +# # enable_extensions = ["auto-on"] +# +# ## On most platforms used cli utilities requires root access. +# ## Setting 'use_sudo' to true will make use of sudo to run smartctl or nvme-cli. +# ## Sudo must be configured to allow the telegraf user to run smartctl or nvme-cli +# ## without a password. +# # use_sudo = false +# +# ## Skip checking disks in this power mode. Defaults to +# ## "standby" to not wake up disks that have stopped rotating. +# ## See --nocheck in the man pages for smartctl. +# ## smartctl version 5.41 and 5.42 have faulty detection of +# ## power mode and might require changing this value to +# ## "never" depending on your disks. +# # nocheck = "standby" +# +# ## Gather all returned S.M.A.R.T. attribute metrics and the detailed +# ## information from each drive into the 'smart_attribute' measurement. +# # attributes = false +# +# ## Optionally specify devices to exclude from reporting if disks auto-discovery is performed. +# # excludes = [ "/dev/pass6" ] +# +# ## Optionally specify devices and device type, if unset +# ## a scan (smartctl --scan and smartctl --scan -d nvme) for S.M.A.R.T. devices will be done +# ## and all found will be included except for the excluded in excludes. +# # devices = [ "/dev/ada0 -d atacam", "/dev/nvme0"] +# +# ## Timeout for the cli command to complete. +# # timeout = "30s" +# +# ## Optionally call smartctl and nvme-cli with a specific concurrency policy. +# ## By default, smartctl and nvme-cli are called in separate threads (goroutines) to gather disk attributes. +# ## Some devices (e.g. disks in RAID arrays) may have access limitations that require sequential reading of +# ## SMART data - one individual array drive at the time. In such case please set this configuration option +# ## to "sequential" to get readings for all drives. +# ## valid options: concurrent, sequential +# # read_method = "concurrent" + + +# # Retrieves SNMP values from remote agents +# [[inputs.snmp]] +# ## Agent addresses to retrieve values from. +# ## format: agents = [":"] +# ## scheme: optional, either udp, udp4, udp6, tcp, tcp4, tcp6. +# ## default is udp +# ## port: optional +# ## example: agents = ["udp://127.0.0.1:161"] +# ## agents = ["tcp://127.0.0.1:161"] +# ## agents = ["udp4://v4only-snmp-agent"] +# agents = ["udp://127.0.0.1:161"] +# +# ## Timeout for each request. +# # timeout = "5s" +# +# ## SNMP version; can be 1, 2, or 3. +# # version = 2 +# +# ## Unconnected UDP socket +# ## When true, SNMP reponses are accepted from any address not just +# ## the requested address. This can be useful when gathering from +# ## redundant/failover systems. +# # unconnected_udp_socket = false +# +# ## Path to mib files +# ## Used by the gosmi translator. +# ## To add paths when translating with netsnmp, use the MIBDIRS environment variable +# # path = ["/usr/share/snmp/mibs"] +# +# ## SNMP community string. +# # community = "public" +# +# ## Agent host tag +# # agent_host_tag = "agent_host" +# +# ## Number of retries to attempt. +# # retries = 3 +# +# ## The GETBULK max-repetitions parameter. +# # max_repetitions = 10 +# +# ## SNMPv3 authentication and encryption options. +# ## +# ## Security Name. +# # sec_name = "myuser" +# ## Authentication protocol; one of "MD5", "SHA", "SHA224", "SHA256", "SHA384", "SHA512" or "". +# # auth_protocol = "MD5" +# ## Authentication password. +# # auth_password = "pass" +# ## Security Level; one of "noAuthNoPriv", "authNoPriv", or "authPriv". +# # sec_level = "authNoPriv" +# ## Context Name. +# # context_name = "" +# ## Privacy protocol used for encrypted messages; one of "DES", "AES", "AES192", "AES192C", "AES256", "AES256C", or "". +# ### Protocols "AES192", "AES192", "AES256", and "AES256C" require the underlying net-snmp tools +# ### to be compiled with --enable-blumenthal-aes (http://www.net-snmp.org/docs/INSTALL.html) +# # priv_protocol = "" +# ## Privacy password used for encrypted messages. +# # priv_password = "" +# +# ## Add fields and tables defining the variables you wish to collect. This +# ## example collects the system uptime and interface variables. Reference the +# ## full plugin documentation for configuration details. +# [[inputs.snmp.field]] +# oid = "RFC1213-MIB::sysUpTime.0" +# name = "uptime" +# +# [[inputs.snmp.field]] +# oid = "RFC1213-MIB::sysName.0" +# name = "source" +# is_tag = true +# +# [[inputs.snmp.table]] +# oid = "IF-MIB::ifTable" +# name = "interface" +# inherit_tags = ["source"] +# +# [[inputs.snmp.table.field]] +# oid = "IF-MIB::ifDescr" +# name = "ifDescr" +# is_tag = true + + +# ## DEPRECATED: The "snmp_legacy" plugin is deprecated in version 1.0.0 and will be removed in 1.30.0, use 'inputs.snmp' instead. +# [[inputs.snmp_legacy]] +# ## Use 'oids.txt' file to translate oids to names +# ## To generate 'oids.txt' you need to run: +# ## snmptranslate -m all -Tz -On | sed -e 's/"//g' > /tmp/oids.txt +# ## Or if you have an other MIB folder with custom MIBs +# ## snmptranslate -M /mycustommibfolder -Tz -On -m all | sed -e 's/"//g' > oids.txt +# snmptranslate_file = "/tmp/oids.txt" +# [[inputs.snmp.host]] +# address = "192.168.2.2:161" +# # SNMP community +# community = "public" # default public +# # SNMP version (1, 2 or 3) +# # Version 3 not supported yet +# version = 2 # default 2 +# # SNMP response timeout +# timeout = 2.0 # default 2.0 +# # SNMP request retries +# retries = 2 # default 2 +# # Which get/bulk do you want to collect for this host +# collect = ["mybulk", "sysservices", "sysdescr"] +# # Simple list of OIDs to get, in addition to "collect" +# get_oids = [] +# [[inputs.snmp.host]] +# address = "192.168.2.3:161" +# community = "public" +# version = 2 +# timeout = 2.0 +# retries = 2 +# collect = ["mybulk"] +# get_oids = [ +# "ifNumber", +# ".1.3.6.1.2.1.1.3.0", +# ] +# [[inputs.snmp.get]] +# name = "ifnumber" +# oid = "ifNumber" +# [[inputs.snmp.get]] +# name = "interface_speed" +# oid = "ifSpeed" +# instance = "0" +# [[inputs.snmp.get]] +# name = "sysuptime" +# oid = ".1.3.6.1.2.1.1.3.0" +# unit = "second" +# [[inputs.snmp.bulk]] +# name = "mybulk" +# max_repetition = 127 +# oid = ".1.3.6.1.2.1.1" +# [[inputs.snmp.bulk]] +# name = "ifoutoctets" +# max_repetition = 127 +# oid = "ifOutOctets" +# [[inputs.snmp.host]] +# address = "192.168.2.13:161" +# #address = "127.0.0.1:161" +# community = "public" +# version = 2 +# timeout = 2.0 +# retries = 2 +# #collect = ["mybulk", "sysservices", "sysdescr", "systype"] +# collect = ["sysuptime" ] +# [[inputs.snmp.host.table]] +# name = "iftable3" +# include_instances = ["enp5s0", "eth1"] +# # SNMP TABLEs +# # table without mapping neither subtables +# [[inputs.snmp.table]] +# name = "iftable1" +# oid = ".1.3.6.1.2.1.31.1.1.1" +# # table without mapping but with subtables +# [[inputs.snmp.table]] +# name = "iftable2" +# oid = ".1.3.6.1.2.1.31.1.1.1" +# sub_tables = [".1.3.6.1.2.1.2.2.1.13"] +# # table with mapping but without subtables +# [[inputs.snmp.table]] +# name = "iftable3" +# oid = ".1.3.6.1.2.1.31.1.1.1" +# # if empty. get all instances +# mapping_table = ".1.3.6.1.2.1.31.1.1.1.1" +# # if empty, get all subtables +# # table with both mapping and subtables +# [[inputs.snmp.table]] +# name = "iftable4" +# oid = ".1.3.6.1.2.1.31.1.1.1" +# # if empty get all instances +# mapping_table = ".1.3.6.1.2.1.31.1.1.1.1" +# # if empty get all subtables +# # sub_tables could be not "real subtables" +# sub_tables=[".1.3.6.1.2.1.2.2.1.13", "bytes_recv", "bytes_send"] + + +# # Gather indicators from established connections, using iproute2's ss command. +# # This plugin ONLY supports non-Windows +# [[inputs.socketstat]] +# ## ss can display information about tcp, udp, raw, unix, packet, dccp and sctp sockets +# ## Specify here the types you want to gather +# protocols = [ "tcp", "udp" ] +# +# ## The default timeout of 1s for ss execution can be overridden here: +# # timeout = "1s" + + +# # Gather timeseries from Google Cloud Platform v3 monitoring API +# [[inputs.stackdriver]] +# ## GCP Project +# project = "erudite-bloom-151019" +# +# ## Include timeseries that start with the given metric type. +# metric_type_prefix_include = [ +# "compute.googleapis.com/", +# ] +# +# ## Exclude timeseries that start with the given metric type. +# # metric_type_prefix_exclude = [] +# +# ## Most metrics are updated no more than once per minute; it is recommended +# ## to override the agent level interval with a value of 1m or greater. +# interval = "1m" +# +# ## Maximum number of API calls to make per second. The quota for accounts +# ## varies, it can be viewed on the API dashboard: +# ## https://cloud.google.com/monitoring/quotas#quotas_and_limits +# # rate_limit = 14 +# +# ## The delay and window options control the number of points selected on +# ## each gather. When set, metrics are gathered between: +# ## start: now() - delay - window +# ## end: now() - delay +# # +# ## Collection delay; if set too low metrics may not yet be available. +# # delay = "5m" +# # +# ## If unset, the window will start at 1m and be updated dynamically to span +# ## the time between calls (approximately the length of the plugin interval). +# # window = "1m" +# +# ## TTL for cached list of metric types. This is the maximum amount of time +# ## it may take to discover new metrics. +# # cache_ttl = "1h" +# +# ## If true, raw bucket counts are collected for distribution value types. +# ## For a more lightweight collection, you may wish to disable and use +# ## distribution_aggregation_aligners instead. +# # gather_raw_distribution_buckets = true +# +# ## Aggregate functions to be used for metrics whose value type is +# ## distribution. These aggregate values are recorded in in addition to raw +# ## bucket counts; if they are enabled. +# ## +# ## For a list of aligner strings see: +# ## https://cloud.google.com/monitoring/api/ref_v3/rpc/google.monitoring.v3#aligner +# # distribution_aggregation_aligners = [ +# # "ALIGN_PERCENTILE_99", +# # "ALIGN_PERCENTILE_95", +# # "ALIGN_PERCENTILE_50", +# # ] +# +# ## Filters can be added to reduce the number of time series matched. All +# ## functions are supported: starts_with, ends_with, has_substring, and +# ## one_of. Only the '=' operator is supported. +# ## +# ## The logical operators when combining filters are defined statically using +# ## the following values: +# ## filter ::= {AND AND AND } +# ## resource_labels ::= {OR } +# ## metric_labels ::= {OR } +# ## user_labels ::= {OR } +# ## system_labels ::= {OR } +# ## +# ## For more details, see https://cloud.google.com/monitoring/api/v3/filters +# # +# ## Resource labels refine the time series selection with the following expression: +# ## resource.labels. = +# # [[inputs.stackdriver.filter.resource_labels]] +# # key = "instance_name" +# # value = 'starts_with("localhost")' +# # +# ## Metric labels refine the time series selection with the following expression: +# ## metric.labels. = +# # [[inputs.stackdriver.filter.metric_labels]] +# # key = "device_name" +# # value = 'one_of("sda", "sdb")' +# # +# ## User labels refine the time series selection with the following expression: +# ## metadata.user_labels."" = +# # [[inputs.stackdriver.filter.user_labels]] +# # key = "environment" +# # value = 'one_of("prod", "staging")' +# # +# ## System labels refine the time series selection with the following expression: +# ## metadata.system_labels."" = +# # [[inputs.stackdriver.filter.system_labels]] +# # key = "machine_type" +# # value = 'starts_with("e2-")' + + +# # Gathers information about processes that running under supervisor using XML-RPC API +# [[inputs.supervisor]] +# ## Url of supervisor's XML-RPC endpoint if basic auth enabled in supervisor http server, +# ## than you have to add credentials to url (ex. http://login:pass@localhost:9001/RPC2) +# # url="http://localhost:9001/RPC2" +# ## With settings below you can manage gathering additional information about processes +# ## If both of them empty, then all additional information will be collected. +# ## Currently supported supported additional metrics are: pid, rc +# # metrics_include = [] + + +# # Get synproxy counter statistics from procfs +# # This plugin ONLY supports Linux +# [[inputs.synproxy]] +# # no configuration + + +# # Sysstat metrics collector +# # This plugin ONLY supports Linux +# [[inputs.sysstat]] +# ## Path to the sadc command. +# # +# ## Common Defaults: +# ## Debian/Ubuntu: /usr/lib/sysstat/sadc +# ## Arch: /usr/lib/sa/sadc +# ## RHEL/CentOS: /usr/lib64/sa/sadc +# sadc_path = "/usr/lib/sa/sadc" # required +# +# ## Path to the sadf command, if it is not in PATH +# # sadf_path = "/usr/bin/sadf" +# +# ## Activities is a list of activities, that are passed as argument to the +# ## sadc collector utility (e.g: DISK, SNMP etc...) +# ## The more activities that are added, the more data is collected. +# # activities = ["DISK"] +# +# ## Group metrics to measurements. +# ## +# ## If group is false each metric will be prefixed with a description +# ## and represents itself a measurement. +# ## +# ## If Group is true, corresponding metrics are grouped to a single measurement. +# # group = true +# +# ## Options for the sadf command. The values on the left represent the sadf options and +# ## the values on the right their description (wich are used for grouping and prefixing metrics). +# ## +# ## Run 'sar -h' or 'man sar' to find out the supported options for your sysstat version. +# [inputs.sysstat.options] +# -C = "cpu" +# -B = "paging" +# -b = "io" +# -d = "disk" # requires DISK activity +# "-n ALL" = "network" +# "-P ALL" = "per_cpu" +# -q = "queue" +# -R = "mem" +# -r = "mem_util" +# -S = "swap_util" +# -u = "cpu_util" +# -v = "inode" +# -W = "swap" +# -w = "task" +# # -H = "hugepages" # only available for newer linux distributions +# # "-I ALL" = "interrupts" # requires INT activity +# +# ## Device tags can be used to add additional tags for devices. For example the configuration below +# ## adds a tag vg with value rootvg for all metrics with sda devices. +# # [[inputs.sysstat.device_tags.sda]] +# # vg = "rootvg" + + +# # Gather systemd units state +# [[inputs.systemd_units]] +# ## Set timeout for systemctl execution +# # timeout = "1s" +# # +# ## Filter for a specific unit type, default is "service", other possible +# ## values are "socket", "target", "device", "mount", "automount", "swap", +# ## "timer", "path", "slice" and "scope ": +# # unittype = "service" +# # +# ## Filter for a specific pattern, default is "" (i.e. all), other possible +# ## values are valid pattern for systemctl, e.g. "a*" for all units with +# ## names starting with "a" +# # pattern = "" +# ## pattern = "telegraf* influxdb*" +# ## pattern = "a*" + + +# # Tacacs plugin collects successful tacacs authentication response times. +# [[inputs.tacacs]] +# ## An array of Server IPs (or hostnames) and ports to gather from. If none specified, defaults to localhost. +# # servers = ["127.0.0.1:49"] +# +# ## Request source server IP, normally the server running telegraf. +# # request_ip = "127.0.0.1" +# +# ## Credentials for tacacs authentication. +# username = "myuser" +# password = "mypassword" +# secret = "mysecret" +# +# ## Maximum time to receive response. +# # response_timeout = "5s" + + +# # Reads metrics from a Teamspeak 3 Server via ServerQuery +# [[inputs.teamspeak]] +# ## Server address for Teamspeak 3 ServerQuery +# # server = "127.0.0.1:10011" +# ## Username for ServerQuery +# username = "serverqueryuser" +# ## Password for ServerQuery +# password = "secret" +# ## Nickname of the ServerQuery client +# nickname = "telegraf" +# ## Array of virtual servers +# # virtual_servers = [1] + + +# # Read metrics about temperature +# [[inputs.temp]] +# # no configuration + + +# # Read Tengine's basic status information (ngx_http_reqstat_module) +# [[inputs.tengine]] +# ## An array of Tengine reqstat module URI to gather stats. +# urls = ["http://127.0.0.1/us"] +# +# ## HTTP response timeout (default: 5s) +# # response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Gather metrics from the Tomcat server status page. +# [[inputs.tomcat]] +# ## URL of the Tomcat server status +# # url = "http://127.0.0.1:8080/manager/status/all?XML=true" +# +# ## HTTP Basic Auth Credentials +# # username = "tomcat" +# # password = "s3cret" +# +# ## Request timeout +# # timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Inserts sine and cosine waves for demonstration purposes +# [[inputs.trig]] +# ## Set the amplitude +# amplitude = 10.0 + + +# # Read Twemproxy stats data +# [[inputs.twemproxy]] +# ## Twemproxy stats address and port (no scheme) +# addr = "localhost:22222" +# ## Monitor pool name +# pools = ["redis_pool", "mc_pool"] + + +# # A plugin to collect stats from the Unbound DNS resolver +# [[inputs.unbound]] +# ## Address of server to connect to, read from unbound conf default, optionally ':port' +# ## Will lookup IP if given a hostname +# server = "127.0.0.1:8953" +# +# ## If running as a restricted user you can prepend sudo for additional access: +# # use_sudo = false +# +# ## The default location of the unbound-control binary can be overridden with: +# # binary = "/usr/sbin/unbound-control" +# +# ## The default location of the unbound config file can be overridden with: +# # config_file = "/etc/unbound/unbound.conf" +# +# ## The default timeout of 1s can be overridden with: +# # timeout = "1s" +# +# ## When set to true, thread metrics are tagged with the thread id. +# ## +# ## The default is false for backwards compatibility, and will be changed to +# ## true in a future version. It is recommended to set to true on new +# ## deployments. +# thread_as_tag = false + + +# # Monitor UPSes connected via Network UPS Tools +# [[inputs.upsd]] +# ## A running NUT server to connect to. +# ## IPv6 addresses must be enclosed in brackets (e.g. "[::1]") +# # server = "127.0.0.1" +# # port = 3493 +# # username = "user" +# # password = "password" +# +# ## Force parsing numbers as floats +# ## It is highly recommended to enable this setting to parse numbers +# ## consistently as floats to avoid database conflicts where some numbers are +# ## parsed as integers and others as floats. +# # force_float = false + + +# # Read uWSGI metrics. +# [[inputs.uwsgi]] +# ## List with urls of uWSGI Stats servers. Url must match pattern: +# ## scheme://address[:port] +# ## +# ## For example: +# ## servers = ["tcp://localhost:5050", "http://localhost:1717", "unix:///tmp/statsock"] +# servers = ["tcp://127.0.0.1:1717"] +# +# ## General connection timeout +# # timeout = "5s" + + +# # A plugin to collect stats from Varnish HTTP Cache +# # This plugin ONLY supports non-Windows +# [[inputs.varnish]] +# ## If running as a restricted user you can prepend sudo for additional access: +# #use_sudo = false +# +# ## The default location of the varnishstat binary can be overridden with: +# binary = "/usr/bin/varnishstat" +# +# ## Additional custom arguments for the varnishstat command +# # binary_args = ["-f", "MAIN.*"] +# +# ## The default location of the varnishadm binary can be overridden with: +# adm_binary = "/usr/bin/varnishadm" +# +# ## Custom arguments for the varnishadm command +# # adm_binary_args = [""] +# +# ## Metric version defaults to metric_version=1, use metric_version=2 for removal of nonactive vcls +# ## Varnish 6.0.2 and newer is required for metric_version=2. +# metric_version = 1 +# +# ## Additional regexps to override builtin conversion of varnish metrics into telegraf metrics. +# ## Regexp group "_vcl" is used for extracting the VCL name. Metrics that contain nonactive VCL's are skipped. +# ## Regexp group "_field" overrides the field name. Other named regexp groups are used as tags. +# # regexps = ['^XCNT\.(?P<_vcl>[\w\-]*)(\.)*(?P[\w\-.+]*)\.(?P<_field>[\w\-.+]*)\.val'] +# +# ## By default, telegraf gather stats for 3 metric points. +# ## Setting stats will override the defaults shown below. +# ## Glob matching can be used, ie, stats = ["MAIN.*"] +# ## stats may also be set to ["*"], which will collect all stats +# stats = ["MAIN.cache_hit", "MAIN.cache_miss", "MAIN.uptime"] +# +# ## Optional name for the varnish instance (or working directory) to query +# ## Usually append after -n in varnish cli +# # instance_name = instanceName +# +# ## Timeout for varnishstat command +# # timeout = "1s" + + +# # Read metrics from the Vault API +# [[inputs.vault]] +# ## URL for the Vault agent +# # url = "http://127.0.0.1:8200" +# +# ## Use Vault token for authorization. +# ## Vault token configuration is mandatory. +# ## If both are empty or both are set, an error is thrown. +# # token_file = "/path/to/auth/token" +# ## OR +# token = "s.CDDrgg5zPv5ssI0Z2P4qxJj2" +# +# ## Set response_timeout (default 5 seconds) +# # response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = /path/to/cafile +# # tls_cert = /path/to/certfile +# # tls_key = /path/to/keyfile + + +# # Input plugin to counterPath Performance Counters on Windows operating systems +# # This plugin ONLY supports Windows +# [[inputs.win_perf_counters]] +# ## By default this plugin returns basic CPU and Disk statistics. See the +# ## README file for more examples. Uncomment examples below or write your own +# ## as you see fit. If the system being polled for data does not have the +# ## Object at startup of the Telegraf agent, it will not be gathered. +# +# ## Print All matching performance counters +# # PrintValid = false +# +# ## Whether request a timestamp along with the PerfCounter data or use current +# ## time +# # UsePerfCounterTime = true +# +# ## If UseWildcardsExpansion params is set to true, wildcards (partial +# ## wildcards in instance names and wildcards in counters names) in configured +# ## counter paths will be expanded and in case of localized Windows, counter +# ## paths will be also localized. It also returns instance indexes in instance +# ## names. If false, wildcards (not partial) in instance names will still be +# ## expanded, but instance indexes will not be returned in instance names. +# # UseWildcardsExpansion = false +# +# ## When running on a localized version of Windows and with +# ## UseWildcardsExpansion = true, Windows will localize object and counter +# ## names. When LocalizeWildcardsExpansion = false, use the names in +# ## object.Counters instead of the localized names. Only Instances can have +# ## wildcards in this case. ObjectName and Counters must not have wildcards +# ## when this setting is false. +# # LocalizeWildcardsExpansion = true +# +# ## Period after which counters will be reread from configuration and +# ## wildcards in counter paths expanded +# # CountersRefreshInterval="1m" +# +# ## Accepts a list of PDH error codes which are defined in pdh.go, if this +# ## error is encountered it will be ignored. For example, you can provide +# ## "PDH_NO_DATA" to ignore performance counters with no instances. By default +# ## no errors are ignored You can find the list here: +# ## https://github.com/influxdata/telegraf/blob/master/plugins/inputs/win_perf_counters/pdh.go +# ## e.g. IgnoredErrors = ["PDH_NO_DATA"] +# # IgnoredErrors = [] +# +# ## NOTE: Due to the way TOML is parsed, tables must be at the END of the +# ## plugin definition, otherwise additional config options are read as part of +# ## the table +# +# # [[inputs.win_perf_counters.object]] +# # Measurement = "" +# # ObjectName = "" +# # Instances = [""] +# # Counters = [] +# ## Additional Object Settings +# ## * IncludeTotal: set to true to include _Total instance when querying +# ## for all metrics via '*' +# ## * WarnOnMissing: print out when the performance counter is missing +# ## from object, counter or instance +# ## * UseRawValues: gather raw values instead of formatted. Raw values are +# ## stored in the field name with the "_Raw" suffix, e.g. +# ## "Disk_Read_Bytes_sec_Raw". +# # IncludeTotal = false +# # WarnOnMissing = false +# # UseRawValues = false +# +# ## Processor usage, alternative to native, reports on a per core. +# # [[inputs.win_perf_counters.object]] +# # Measurement = "win_cpu" +# # ObjectName = "Processor" +# # Instances = ["*"] +# # UseRawValues = true +# # Counters = [ +# # "% Idle Time", +# # "% Interrupt Time", +# # "% Privileged Time", +# # "% User Time", +# # "% Processor Time", +# # "% DPC Time", +# # ] +# +# ## Disk times and queues +# # [[inputs.win_perf_counters.object]] +# # Measurement = "win_disk" +# # ObjectName = "LogicalDisk" +# # Instances = ["*"] +# # Counters = [ +# # "% Idle Time", +# # "% Disk Time", +# # "% Disk Read Time", +# # "% Disk Write Time", +# # "% User Time", +# # "% Free Space", +# # "Current Disk Queue Length", +# # "Free Megabytes", +# # ] +# +# # [[inputs.win_perf_counters.object]] +# # Measurement = "win_diskio" +# # ObjectName = "PhysicalDisk" +# # Instances = ["*"] +# # Counters = [ +# # "Disk Read Bytes/sec", +# # "Disk Write Bytes/sec", +# # "Current Disk Queue Length", +# # "Disk Reads/sec", +# # "Disk Writes/sec", +# # "% Disk Time", +# # "% Disk Read Time", +# # "% Disk Write Time", +# # ] +# +# # [[inputs.win_perf_counters.object]] +# # Measurement = "win_net" +# # ObjectName = "Network Interface" +# # Instances = ["*"] +# # Counters = [ +# # "Bytes Received/sec", +# # "Bytes Sent/sec", +# # "Packets Received/sec", +# # "Packets Sent/sec", +# # "Packets Received Discarded", +# # "Packets Outbound Discarded", +# # "Packets Received Errors", +# # "Packets Outbound Errors", +# # ] +# +# # [[inputs.win_perf_counters.object]] +# # Measurement = "win_system" +# # ObjectName = "System" +# # Instances = ["------"] +# # Counters = [ +# # "Context Switches/sec", +# # "System Calls/sec", +# # "Processor Queue Length", +# # "System Up Time", +# # ] +# +# ## Example counterPath where the Instance portion must be removed to get +# ## data back, such as from the Memory object. +# # [[inputs.win_perf_counters.object]] +# # Measurement = "win_mem" +# # ObjectName = "Memory" +# ## Use 6 x - to remove the Instance bit from the counterPath. +# # Instances = ["------"] +# # Counters = [ +# # "Available Bytes", +# # "Cache Faults/sec", +# # "Demand Zero Faults/sec", +# # "Page Faults/sec", +# # "Pages/sec", +# # "Transition Faults/sec", +# # "Pool Nonpaged Bytes", +# # "Pool Paged Bytes", +# # "Standby Cache Reserve Bytes", +# # "Standby Cache Normal Priority Bytes", +# # "Standby Cache Core Bytes", +# # ] +# +# ## Example query where the Instance portion must be removed to get data back, +# ## such as from the Paging File object. +# # [[inputs.win_perf_counters.object]] +# # Measurement = "win_swap" +# # ObjectName = "Paging File" +# # Instances = ["_Total"] +# # Counters = [ +# # "% Usage", +# # ] + + +# # Input plugin to report Windows services info. +# # This plugin ONLY supports Windows +# [[inputs.win_services]] +# ## Names of the services to monitor. Leave empty to monitor all the available services on the host. Globs accepted. Case sensitive. +# service_names = [ +# "LanmanServer", +# "TermService", +# "Win*", +# ] +# excluded_service_names = ['WinRM'] # optional, list of service names to exclude + + +# # Input plugin to query Windows Management Instrumentation +# # This plugin ONLY supports Windows +# [[inputs.win_wmi]] +# [[inputs.win_wmi.query]] +# # a string representing the WMI namespace to be queried +# namespace = "root\\cimv2" +# # a string representing the WMI class to be queried +# class_name = "Win32_Volume" +# # an array of strings representing the properties of the WMI class to be queried +# properties = ["Name", "Capacity", "FreeSpace"] +# # a string specifying a WHERE clause to use as a filter for the WQL +# filter = 'NOT Name LIKE "\\\\?\\%"' +# # WMI class properties which should be considered tags instead of fields +# tag_properties = ["Name"] + + +# # Collect Wireguard server interface and peer statistics +# [[inputs.wireguard]] +# ## Optional list of Wireguard device/interface names to query. +# ## If omitted, all Wireguard interfaces are queried. +# # devices = ["wg0"] + + +# # Monitor wifi signal strength and quality +# # This plugin ONLY supports Linux +# [[inputs.wireless]] +# ## Sets 'proc' directory path +# ## If not specified, then default is /proc +# # host_proc = "/proc" + + +# # Reads metrics from a SSL certificate +# [[inputs.x509_cert]] +# ## List certificate sources, support wildcard expands for files +# ## Prefix your entry with 'file://' if you intend to use relative paths +# sources = ["tcp://example.org:443", "https://influxdata.com:443", +# "smtp://mail.localhost:25", "udp://127.0.0.1:4433", +# "/etc/ssl/certs/ssl-cert-snakeoil.pem", +# "/etc/mycerts/*.mydomain.org.pem", "file:///path/to/*.pem"] +# +# ## Timeout for SSL connection +# # timeout = "5s" +# +# ## Pass a different name into the TLS request (Server Name Indication). +# ## This is synonymous with tls_server_name, and only one of the two +# ## options may be specified at one time. +# ## example: server_name = "myhost.example.org" +# # server_name = "myhost.example.org" +# +# ## Only output the leaf certificates and omit the root ones. +# # exclude_root_certs = false +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# # tls_server_name = "myhost.example.org" +# +# ## Set the proxy URL +# # use_proxy = true +# # proxy_url = "http://localhost:8888" + + +# # Gathers Metrics From a Dell EMC XtremIO Storage Array's V3 API +# [[inputs.xtremio]] +# ## XtremIO User Interface Endpoint +# url = "https://xtremio.example.com/" # required +# +# ## Credentials +# username = "user1" +# password = "pass123" +# +# ## Metrics to collect from the XtremIO +# # collectors = ["bbus","clusters","ssds","volumes","xms"] +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use SSL but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read metrics of ZFS from arcstats, zfetchstats, vdev_cache_stats, pools and datasets +# # This plugin ONLY supports Linux & FreeBSD +# [[inputs.zfs]] +# ## ZFS kstat path. Ignored on FreeBSD +# ## If not specified, then default is: +# # kstatPath = "/proc/spl/kstat/zfs" +# +# ## By default, telegraf gather all zfs stats +# ## Override the stats list using the kstatMetrics array: +# ## For FreeBSD, the default is: +# # kstatMetrics = ["arcstats", "zfetchstats", "vdev_cache_stats"] +# ## For Linux, the default is: +# # kstatMetrics = ["abdstats", "arcstats", "dnodestats", "dbufcachestats", +# # "dmu_tx", "fm", "vdev_mirror_stats", "zfetchstats", "zil"] +# +# ## By default, don't gather zpool stats +# # poolMetrics = false +# +# ## By default, don't gather dataset stats +# # datasetMetrics = false + + +# # Reads 'mntr' stats from one or many zookeeper servers +# [[inputs.zookeeper]] +# ## An array of address to gather stats about. Specify an ip or hostname +# ## with port. ie localhost:2181, 10.0.0.1:2181, etc. +# +# ## If no servers are specified, then localhost is used as the host. +# ## If no port is specified, 2181 is used +# servers = [":2181"] +# +# ## Timeout for metric collections from all servers. Minimum timeout is "1s". +# # timeout = "5s" +# +# ## Float Parsing - the initial implementation forced any value unable to be +# ## parsed as an int to be a string. Setting this to "float" will attempt to +# ## parse float values as floats and not strings. This would break existing +# ## metrics and may cause issues if a value switches between a float and int. +# # parse_floats = "string" +# +# ## Optional TLS Config +# # enable_tls = false +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## If false, skip chain & host verification +# # insecure_skip_verify = true + + +############################################################################### +# SERVICE INPUT PLUGINS # +############################################################################### + + +# # Pull Metric Statistics from Aliyun CMS +# [[inputs.aliyuncms]] +# ## Aliyun Credentials +# ## Credentials are loaded in the following order +# ## 1) Ram RoleArn credential +# ## 2) AccessKey STS token credential +# ## 3) AccessKey credential +# ## 4) Ecs Ram Role credential +# ## 5) RSA keypair credential +# ## 6) Environment variables credential +# ## 7) Instance metadata credential +# +# # access_key_id = "" +# # access_key_secret = "" +# # access_key_sts_token = "" +# # role_arn = "" +# # role_session_name = "" +# # private_key = "" +# # public_key_id = "" +# # role_name = "" +# +# ## Specify ali cloud regions to be queried for metric and object discovery +# ## If not set, all supported regions (see below) would be covered, it can +# ## provide a significant load on API, so the recommendation here is to +# ## limit the list as much as possible. +# ## Allowed values: https://www.alibabacloud.com/help/zh/doc-detail/40654.htm +# ## Default supported regions are: +# ## cn-qingdao,cn-beijing,cn-zhangjiakou,cn-huhehaote,cn-hangzhou, +# ## cn-shanghai, cn-shenzhen, cn-heyuan,cn-chengdu,cn-hongkong, +# ## ap-southeast-1,ap-southeast-2,ap-southeast-3,ap-southeast-5, +# ## ap-south-1,ap-northeast-1, us-west-1,us-east-1,eu-central-1, +# ## eu-west-1,me-east-1 +# ## +# ## From discovery perspective it set the scope for object discovery, +# ## the discovered info can be used to enrich the metrics with objects +# ## attributes/tags. Discovery is not supported for all projects. +# ## Currently, discovery supported for the following projects: +# ## - acs_ecs_dashboard +# ## - acs_rds_dashboard +# ## - acs_slb_dashboard +# ## - acs_vpc_eip +# regions = ["cn-hongkong"] +# +# ## Requested AliyunCMS aggregation Period (required) +# ## The period must be multiples of 60s and the minimum for AliyunCMS metrics +# ## is 1 minute (60s). However not all metrics are made available to the +# ## one minute period. Some are collected at 3 minute, 5 minute, or larger +# ## intervals. +# ## See: https://help.aliyun.com/document_detail/51936.html?spm=a2c4g.11186623.2.18.2bc1750eeOw1Pv +# ## Note that if a period is configured that is smaller than the minimum for +# ## a particular metric, that metric will not be returned by Aliyun's +# ## OpenAPI and will not be collected by Telegraf. +# period = "5m" +# +# ## Collection Delay (required) +# ## The delay must account for metrics availability via AliyunCMS API. +# delay = "1m" +# +# ## Recommended: use metric 'interval' that is a multiple of 'period' +# ## to avoid gaps or overlap in pulled data +# interval = "5m" +# +# ## Metric Statistic Project (required) +# project = "acs_slb_dashboard" +# +# ## Maximum requests per second, default value is 200 +# ratelimit = 200 +# +# ## How often the discovery API call executed (default 1m) +# #discovery_interval = "1m" +# +# ## NOTE: Due to the way TOML is parsed, tables must be at the END of the +# ## plugin definition, otherwise additional config options are read as part of +# ## the table +# +# ## Metrics to Pull +# ## At least one metrics definition required +# [[inputs.aliyuncms.metrics]] +# ## Metrics names to be requested, +# ## Description can be found here (per project): +# ## https://help.aliyun.com/document_detail/28619.html?spm=a2c4g.11186623.6.690.1938ad41wg8QSq +# names = ["InstanceActiveConnection", "InstanceNewConnection"] +# +# ## Dimension filters for Metric (optional) +# ## This allows to get additional metric dimension. If dimension is not +# ## specified it can be returned or the data can be aggregated - it depends +# ## on particular metric, you can find details here: +# ## https://help.aliyun.com/document_detail/28619.html?spm=a2c4g.11186623.6.690.1938ad41wg8QSq +# ## +# ## Note, that by default dimension filter includes the list of discovered +# ## objects in scope (if discovery is enabled). Values specified here would +# ## be added into the list of discovered objects. You can specify either +# ## single dimension: +# # dimensions = '{"instanceId": "p-example"}' +# +# ## Or you can specify several dimensions at once: +# # dimensions = '[{"instanceId": "p-example"},{"instanceId": "q-example"}]' +# +# ## Tag Query Path +# ## The following tags added by default: +# ## * regionId (if discovery enabled) +# ## * userId +# ## * instanceId +# ## Enrichment tags, can be added from discovery (if supported) +# ## Notation is +# ## : +# ## To figure out which fields are available, consult the +# ## Describe API per project. For example, for SLB see: +# ## https://api.aliyun.com/#/?product=Slb&version=2014-05-15&api=DescribeLoadBalancers¶ms={}&tab=MOCK&lang=GO +# # tag_query_path = [ +# # "address:Address", +# # "name:LoadBalancerName", +# # "cluster_owner:Tags.Tag[?TagKey=='cs.cluster.name'].TagValue | [0]" +# # ] +# +# ## Allow metrics without discovery data, if discovery is enabled. +# ## If set to true, then metric without discovery data would be emitted, otherwise dropped. +# ## This cane be of help, in case debugging dimension filters, or partial coverage of +# ## discovery scope vs monitoring scope +# # allow_dps_without_discovery = false + + +# # AMQP consumer plugin +# [[inputs.amqp_consumer]] +# ## Brokers to consume from. If multiple brokers are specified a random broker +# ## will be selected anytime a connection is established. This can be +# ## helpful for load balancing when not using a dedicated load balancer. +# brokers = ["amqp://localhost:5672/influxdb"] +# +# ## Authentication credentials for the PLAIN auth_method. +# # username = "" +# # password = "" +# +# ## Name of the exchange to declare. If unset, no exchange will be declared. +# exchange = "telegraf" +# +# ## Exchange type; common types are "direct", "fanout", "topic", "header", "x-consistent-hash". +# # exchange_type = "topic" +# +# ## If true, exchange will be passively declared. +# # exchange_passive = false +# +# ## Exchange durability can be either "transient" or "durable". +# # exchange_durability = "durable" +# +# ## Additional exchange arguments. +# # exchange_arguments = { } +# # exchange_arguments = {"hash_property" = "timestamp"} +# +# ## AMQP queue name. +# queue = "telegraf" +# +# ## AMQP queue durability can be "transient" or "durable". +# queue_durability = "durable" +# +# ## If true, queue will be passively declared. +# # queue_passive = false +# +# ## Additional arguments when consuming from Queue +# # queue_consume_arguments = { } +# # queue_consume_arguments = {"x-stream-offset" = "first"} +# +# ## A binding between the exchange and queue using this binding key is +# ## created. If unset, no binding is created. +# binding_key = "#" +# +# ## Maximum number of messages server should give to the worker. +# # prefetch_count = 50 +# +# ## Max undelivered messages +# ## This plugin uses tracking metrics, which ensure messages are read to +# ## outputs before acknowledging them to the original broker to ensure data +# ## is not lost. This option sets the maximum messages to read from the +# ## broker that have not been written by an output. +# ## +# ## This value needs to be picked with awareness of the agent's +# ## metric_batch_size value as well. Setting max undelivered messages too high +# ## can result in a constant stream of data batches to the output. While +# ## setting it too low may never flush the broker's messages. +# # max_undelivered_messages = 1000 +# +# ## Auth method. PLAIN and EXTERNAL are supported +# ## Using EXTERNAL requires enabling the rabbitmq_auth_mechanism_ssl plugin as +# ## described here: https://www.rabbitmq.com/plugins.html +# # auth_method = "PLAIN" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Content encoding for message payloads, can be set to +# ## "gzip", "identity" or "auto" +# ## - Use "gzip" to decode gzip +# ## - Use "identity" to apply no encoding +# ## - Use "auto" determine the encoding using the ContentEncoding header +# # content_encoding = "identity" +# +# ## Maximum size of decoded message. +# ## Acceptable units are B, KiB, KB, MiB, MB... +# ## Without quotes and units, interpreted as size in bytes. +# # max_decompression_size = "500MB" +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# ## DEPRECATED: The "cassandra" plugin is deprecated in version 1.7.0 and will be removed in 1.30.0, use 'inputs.jolokia2' with the 'cassandra.conf' example configuration instead. +# # Read Cassandra metrics through Jolokia +# [[inputs.cassandra]] +# context = "/jolokia/read" +# ## List of cassandra servers exposing jolokia read service +# servers = ["myuser:mypassword@10.10.10.1:8778","10.10.10.2:8778",":8778"] +# ## List of metrics collected on above servers +# ## Each metric consists of a jmx path. +# ## This will collect all heap memory usage metrics from the jvm and +# ## ReadLatency metrics for all keyspaces and tables. +# ## "type=Table" in the query works with Cassandra3.0. Older versions might +# ## need to use "type=ColumnFamily" +# metrics = [ +# "/java.lang:type=Memory/HeapMemoryUsage", +# "/org.apache.cassandra.metrics:type=Table,keyspace=*,scope=*,name=ReadLatency" +# ] + + +# # Cisco model-driven telemetry (MDT) input plugin for IOS XR, IOS XE and NX-OS platforms +# [[inputs.cisco_telemetry_mdt]] +# ## Telemetry transport can be "tcp" or "grpc". TLS is only supported when +# ## using the grpc transport. +# transport = "grpc" +# +# ## Address and port to host telemetry listener +# service_address = ":57000" +# +# ## Grpc Maximum Message Size, default is 4MB, increase the size. This is +# ## stored as a uint32, and limited to 4294967295. +# max_msg_size = 4000000 +# +# ## Enable TLS; grpc transport only. +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## Enable TLS client authentication and define allowed CA certificates; grpc +# ## transport only. +# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# +# ## Define (for certain nested telemetry measurements with embedded tags) which fields are tags +# # embedded_tags = ["Cisco-IOS-XR-qos-ma-oper:qos/interface-table/interface/input/service-policy-names/service-policy-instance/statistics/class-stats/class-name"] +# +# ## Include the delete field in every telemetry message. +# # include_delete_field = false +# +# ## Specify custom name for incoming MDT source field. +# # source_field_name = "mdt_source" +# +# ## Define aliases to map telemetry encoding paths to simple measurement names +# [inputs.cisco_telemetry_mdt.aliases] +# ifstats = "ietf-interfaces:interfaces-state/interface/statistics" +# ## Define Property Xformation, please refer README and https://pubhub.devnetcloud.com/media/dme-docs-9-3-3/docs/appendix/ for Model details. +# [inputs.cisco_telemetry_mdt.dmes] +# # Global Property Xformation. +# # prop1 = "uint64 to int" +# # prop2 = "uint64 to string" +# # prop3 = "string to uint64" +# # prop4 = "string to int64" +# # prop5 = "string to float64" +# # auto-prop-xfrom = "auto-float-xfrom" #Xform any property which is string, and has float number to type float64 +# # Per Path property xformation, Name is telemetry configuration under sensor-group, path configuration "WORD Distinguished Name" +# # Per Path configuration is better as it avoid property collision issue of types. +# # dnpath = '{"Name": "show ip route summary","prop": [{"Key": "routes","Value": "string"}, {"Key": "best-paths","Value": "string"}]}' +# # dnpath2 = '{"Name": "show processes cpu","prop": [{"Key": "kernel_percent","Value": "float"}, {"Key": "idle_percent","Value": "float"}, {"Key": "process","Value": "string"}, {"Key": "user_percent","Value": "float"}, {"Key": "onesec","Value": "float"}]}' +# # dnpath3 = '{"Name": "show processes memory physical","prop": [{"Key": "processname","Value": "string"}]}' +# +# ## Additional GRPC connection settings. +# [inputs.cisco_telemetry_mdt.grpc_enforcement_policy] +# ## GRPC permit keepalives without calls, set to true if your clients are +# ## sending pings without calls in-flight. This can sometimes happen on IOS-XE +# ## devices where the GRPC connection is left open but subscriptions have been +# ## removed, and adding subsequent subscriptions does not keep a stable session. +# # permit_keepalive_without_calls = false +# +# ## GRPC minimum timeout between successive pings, decreasing this value may +# ## help if this plugin is closing connections with ENHANCE_YOUR_CALM (too_many_pings). +# # keepalive_minimum_time = "5m" + + +# # Read metrics from one or many ClickHouse servers +# [[inputs.clickhouse]] +# ## Username for authorization on ClickHouse server +# username = "default" +# +# ## Password for authorization on ClickHouse server +# # password = "" +# +# ## HTTP(s) timeout while getting metrics values +# ## The timeout includes connection time, any redirects, and reading the +# ## response body. +# # timeout = 5s +# +# ## List of servers for metrics scraping +# ## metrics scrape via HTTP(s) clickhouse interface +# ## https://clickhouse.tech/docs/en/interfaces/http/ +# servers = ["http://127.0.0.1:8123"] +# +# ## If "auto_discovery"" is "true" plugin tries to connect to all servers +# ## available in the cluster with using same "user:password" described in +# ## "user" and "password" parameters and get this server hostname list from +# ## "system.clusters" table. See +# ## - https://clickhouse.tech/docs/en/operations/system_tables/#system-clusters +# ## - https://clickhouse.tech/docs/en/operations/server_settings/settings/#server_settings_remote_servers +# ## - https://clickhouse.tech/docs/en/operations/table_engines/distributed/ +# ## - https://clickhouse.tech/docs/en/operations/table_engines/replication/#creating-replicated-tables +# # auto_discovery = true +# +# ## Filter cluster names in "system.clusters" when "auto_discovery" is "true" +# ## when this filter present then "WHERE cluster IN (...)" filter will apply +# ## please use only full cluster names here, regexp and glob filters is not +# ## allowed for "/etc/clickhouse-server/config.d/remote.xml" +# ## +# ## +# ## +# ## +# ## clickhouse-ru-1.local9000 +# ## clickhouse-ru-2.local9000 +# ## +# ## +# ## clickhouse-eu-1.local9000 +# ## clickhouse-eu-2.local9000 +# ## +# ## +# ## +# ## +# ## +# ## +# ## example: cluster_include = ["my-own-cluster"] +# # cluster_include = [] +# +# ## Filter cluster names in "system.clusters" when "auto_discovery" is +# ## "true" when this filter present then "WHERE cluster NOT IN (...)" +# ## filter will apply +# ## example: cluster_exclude = ["my-internal-not-discovered-cluster"] +# # cluster_exclude = [] +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read metrics from Google PubSub +# [[inputs.cloud_pubsub]] +# ## Required. Name of Google Cloud Platform (GCP) Project that owns +# ## the given PubSub subscription. +# project = "my-project" +# +# ## Required. Name of PubSub subscription to ingest metrics from. +# subscription = "my-subscription" +# +# ## Required. Data format to consume. +# ## Each data format has its own unique set of configuration options. +# ## Read more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" +# +# ## Optional. Filepath for GCP credentials JSON file to authorize calls to +# ## PubSub APIs. If not set explicitly, Telegraf will attempt to use +# ## Application Default Credentials, which is preferred. +# # credentials_file = "path/to/my/creds.json" +# +# ## Optional. Number of seconds to wait before attempting to restart the +# ## PubSub subscription receiver after an unexpected error. +# ## If the streaming pull for a PubSub Subscription fails (receiver), +# ## the agent attempts to restart receiving messages after this many seconds. +# # retry_delay_seconds = 5 +# +# ## Optional. Maximum byte length of a message to consume. +# ## Larger messages are dropped with an error. If less than 0 or unspecified, +# ## treated as no limit. +# # max_message_len = 1000000 +# +# ## Max undelivered messages +# ## This plugin uses tracking metrics, which ensure messages are read to +# ## outputs before acknowledging them to the original broker to ensure data +# ## is not lost. This option sets the maximum messages to read from the +# ## broker that have not been written by an output. +# ## +# ## This value needs to be picked with awareness of the agent's +# ## metric_batch_size value as well. Setting max undelivered messages too high +# ## can result in a constant stream of data batches to the output. While +# ## setting it too low may never flush the broker's messages. +# # max_undelivered_messages = 1000 +# +# ## The following are optional Subscription ReceiveSettings in PubSub. +# ## Read more about these values: +# ## https://godoc.org/cloud.google.com/go/pubsub#ReceiveSettings +# +# ## Optional. Maximum number of seconds for which a PubSub subscription +# ## should auto-extend the PubSub ACK deadline for each message. If less than +# ## 0, auto-extension is disabled. +# # max_extension = 0 +# +# ## Optional. Maximum number of unprocessed messages in PubSub +# ## (unacknowledged but not yet expired in PubSub). +# ## A value of 0 is treated as the default PubSub value. +# ## Negative values will be treated as unlimited. +# # max_outstanding_messages = 0 +# +# ## Optional. Maximum size in bytes of unprocessed messages in PubSub +# ## (unacknowledged but not yet expired in PubSub). +# ## A value of 0 is treated as the default PubSub value. +# ## Negative values will be treated as unlimited. +# # max_outstanding_bytes = 0 +# +# ## Optional. Max number of goroutines a PubSub Subscription receiver can spawn +# ## to pull messages from PubSub concurrently. This limit applies to each +# ## subscription separately and is treated as the PubSub default if less than +# ## 1. Note this setting does not limit the number of messages that can be +# ## processed concurrently (use "max_outstanding_messages" instead). +# # max_receiver_go_routines = 0 +# +# ## Optional. If true, Telegraf will attempt to base64 decode the +# ## PubSub message data before parsing. Many GCP services that +# ## output JSON to Google PubSub base64-encode the JSON payload. +# # base64_data = false +# +# ## Content encoding for message payloads, can be set to "gzip" or +# ## "identity" to apply no encoding. +# # content_encoding = "identity" +# +# ## If content encoding is not "identity", sets the maximum allowed size, +# ## in bytes, for a message payload when it's decompressed. Can be increased +# ## for larger payloads or reduced to protect against decompression bombs. +# ## Acceptable units are B, KiB, KB, MiB, MB... +# # max_decompression_size = "500MB" + + +# # Google Cloud Pub/Sub Push HTTP listener +# [[inputs.cloud_pubsub_push]] +# ## Address and port to host HTTP listener on +# service_address = ":8080" +# +# ## Application secret to verify messages originate from Cloud Pub/Sub +# # token = "" +# +# ## Path to listen to. +# # path = "/" +# +# ## Maximum duration before timing out read of the request +# # read_timeout = "10s" +# ## Maximum duration before timing out write of the response. This should be +# ## set to a value large enough that you can send at least 'metric_batch_size' +# ## number of messages within the duration. +# # write_timeout = "10s" +# +# ## Maximum allowed http request body size in bytes. +# ## 0 means to use the default of 524,288,00 bytes (500 mebibytes) +# # max_body_size = "500MB" +# +# ## Whether to add the pubsub metadata, such as message attributes and +# ## subscription as a tag. +# # add_meta = false +# +# ## Max undelivered messages +# ## This plugin uses tracking metrics, which ensure messages are read to +# ## outputs before acknowledging them to the original broker to ensure data +# ## is not lost. This option sets the maximum messages to read from the +# ## broker that have not been written by an output. +# ## +# ## This value needs to be picked with awareness of the agent's +# ## metric_batch_size value as well. Setting max undelivered messages too high +# ## can result in a constant stream of data batches to the output. While +# ## setting it too low may never flush the broker's messages. +# # max_undelivered_messages = 1000 +# +# ## Set one or more allowed client CA certificate file names to +# ## enable mutually authenticated TLS connections +# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# +# ## Add service certificate and key +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # AWS Metric Streams listener +# [[inputs.cloudwatch_metric_streams]] +# ## Address and port to host HTTP listener on +# service_address = ":443" +# +# ## Paths to listen to. +# # paths = ["/telegraf"] +# +# ## maximum duration before timing out read of the request +# # read_timeout = "10s" +# +# ## maximum duration before timing out write of the response +# # write_timeout = "10s" +# +# ## Maximum allowed http request body size in bytes. +# ## 0 means to use the default of 524,288,000 bytes (500 mebibytes) +# # max_body_size = "500MB" +# +# ## Optional access key for Firehose security. +# # access_key = "test-key" +# +# ## An optional flag to keep Metric Streams metrics compatible with +# ## CloudWatch's API naming +# # api_compatability = false +# +# ## Set one or more allowed client CA certificate file names to +# ## enable mutually authenticated TLS connections +# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# +# ## Add service certificate and key +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" + + +# # A ctrlX Data Layer server sent event input plugin +# [[inputs.ctrlx_datalayer]] +# ## Hostname or IP address of the ctrlX CORE Data Layer server +# ## example: server = "localhost" # Telegraf is running directly on the device +# ## server = "192.168.1.1" # Connect to ctrlX CORE remote via IP +# ## server = "host.example.com" # Connect to ctrlX CORE remote via hostname +# ## server = "10.0.2.2:8443" # Connect to ctrlX CORE Virtual from development environment +# server = "localhost" +# +# ## Authentication credentials +# username = "boschrexroth" +# password = "boschrexroth" +# +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Timeout for HTTP requests. (default: "10s") +# # timeout = "10s" +# +# +# ## Create a ctrlX Data Layer subscription. +# ## It is possible to define multiple subscriptions per host. Each subscription can have its own +# ## sampling properties and a list of nodes to subscribe to. +# ## All subscriptions share the same credentials. +# [[inputs.ctrlx_datalayer.subscription]] +# ## The name of the measurement. (default: "ctrlx") +# measurement = "memory" +# +# ## Configure the ctrlX Data Layer nodes which should be subscribed. +# ## address - node address in ctrlX Data Layer (mandatory) +# ## name - field name to use in the output (optional, default: base name of address) +# ## tags - extra node tags to be added to the output metric (optional) +# ## Note: +# ## Use either the inline notation or the bracketed notation, not both. +# ## The tags property is only supported in bracketed notation due to toml parser restrictions +# ## Examples: +# ## Inline notation +# nodes=[ +# {name="available", address="framework/metrics/system/memavailable-mb"}, +# {name="used", address="framework/metrics/system/memused-mb"}, +# ] +# ## Bracketed notation +# # [[inputs.ctrlx_datalayer.subscription.nodes]] +# # name ="available" +# # address="framework/metrics/system/memavailable-mb" +# # ## Define extra tags related to node to be added to the output metric (optional) +# # [inputs.ctrlx_datalayer.subscription.nodes.tags] +# # node_tag1="node_tag1" +# # node_tag2="node_tag2" +# # [[inputs.ctrlx_datalayer.subscription.nodes]] +# # name ="used" +# # address="framework/metrics/system/memused-mb" +# +# ## The switch "output_json_string" enables output of the measurement as json. +# ## That way it can be used in in a subsequent processor plugin, e.g. "Starlark Processor Plugin". +# # output_json_string = false +# +# ## Define extra tags related to subscription to be added to the output metric (optional) +# # [inputs.ctrlx_datalayer.subscription.tags] +# # subscription_tag1 = "subscription_tag1" +# # subscription_tag2 = "subscription_tag2" +# +# ## The interval in which messages shall be sent by the ctrlX Data Layer to this plugin. (default: 1s) +# ## Higher values reduce load on network by queuing samples on server side and sending as a single TCP packet. +# # publish_interval = "1s" +# +# ## The interval a "keepalive" message is sent if no change of data occurs. (default: 60s) +# ## Only used internally to detect broken network connections. +# # keep_alive_interval = "60s" +# +# ## The interval an "error" message is sent if an error was received from a node. (default: 10s) +# ## Higher values reduce load on output target and network in case of errors by limiting frequency of error messages. +# # error_interval = "10s" +# +# ## The interval that defines the fastest rate at which the node values should be sampled and values captured. (default: 1s) +# ## The sampling frequency should be adjusted to the dynamics of the signal to be sampled. +# ## Higher sampling frequence increases load on ctrlX Data Layer. +# ## The sampling frequency can be higher, than the publish interval. Captured samples are put in a queue and sent in publish interval. +# ## Note: The minimum sampling interval can be overruled by a global setting in the ctrlX Data Layer configuration ('datalayer/subscriptions/settings'). +# # sampling_interval = "1s" +# +# ## The requested size of the node value queue. (default: 10) +# ## Relevant if more values are captured than can be sent. +# # queue_size = 10 +# +# ## The behaviour of the queue if it is full. (default: "DiscardOldest") +# ## Possible values: +# ## - "DiscardOldest" +# ## The oldest value gets deleted from the queue when it is full. +# ## - "DiscardNewest" +# ## The newest value gets deleted from the queue when it is full. +# # queue_behaviour = "DiscardOldest" +# +# ## The filter when a new value will be sampled. (default: 0.0) +# ## Calculation rule: If (abs(lastCapturedValue - newValue) > dead_band_value) capture(newValue). +# # dead_band_value = 0.0 +# +# ## The conditions on which a sample should be captured and thus will be sent as a message. (default: "StatusValue") +# ## Possible values: +# ## - "Status" +# ## Capture the value only, when the state of the node changes from or to error state. Value changes are ignored. +# ## - "StatusValue" +# ## Capture when the value changes or the node changes from or to error state. +# ## See also 'dead_band_value' for what is considered as a value change. +# ## - "StatusValueTimestamp": +# ## Capture even if the value is the same, but the timestamp of the value is newer. +# ## Note: This might lead to high load on the network because every sample will be sent as a message +# ## even if the value of the node did not change. +# # value_change = "StatusValue" +# + + +# # Ingests files in a directory and then moves them to a target directory. +# [[inputs.directory_monitor]] +# ## The directory to monitor and read files from (including sub-directories if "recursive" is true). +# directory = "" +# # +# ## The directory to move finished files to (maintaining directory hierachy from source). +# finished_directory = "" +# # +# ## Setting recursive to true will make the plugin recursively walk the directory and process all sub-directories. +# # recursive = false +# # +# ## The directory to move files to upon file error. +# ## If not provided, erroring files will stay in the monitored directory. +# # error_directory = "" +# # +# ## The amount of time a file is allowed to sit in the directory before it is picked up. +# ## This time can generally be low but if you choose to have a very large file written to the directory and it's potentially slow, +# ## set this higher so that the plugin will wait until the file is fully copied to the directory. +# # directory_duration_threshold = "50ms" +# # +# ## A list of the only file names to monitor, if necessary. Supports regex. If left blank, all files are ingested. +# # files_to_monitor = ["^.*\.csv"] +# # +# ## A list of files to ignore, if necessary. Supports regex. +# # files_to_ignore = [".DS_Store"] +# # +# ## Maximum lines of the file to process that have not yet be written by the +# ## output. For best throughput set to the size of the output's metric_buffer_limit. +# ## Warning: setting this number higher than the output's metric_buffer_limit can cause dropped metrics. +# # max_buffered_metrics = 10000 +# # +# ## The maximum amount of file paths to queue up for processing at once, before waiting until files are processed to find more files. +# ## Lowering this value will result in *slightly* less memory use, with a potential sacrifice in speed efficiency, if absolutely necessary. +# # file_queue_size = 100000 +# # +# ## Name a tag containing the name of the file the data was parsed from. Leave empty +# ## to disable. Cautious when file name variation is high, this can increase the cardinality +# ## significantly. Read more about cardinality here: +# ## https://docs.influxdata.com/influxdb/cloud/reference/glossary/#series-cardinality +# # file_tag = "" +# # +# ## Specify if the file can be read completely at once or if it needs to be read line by line (default). +# ## Possible values: "line-by-line", "at-once" +# # parse_method = "line-by-line" +# # +# ## The dataformat to be read from the files. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Read logging output from the Docker engine +# [[inputs.docker_log]] +# ## Docker Endpoint +# ## To use TCP, set endpoint = "tcp://[ip]:[port]" +# ## To use environment variables (ie, docker-machine), set endpoint = "ENV" +# # endpoint = "unix:///var/run/docker.sock" +# +# ## When true, container logs are read from the beginning; otherwise reading +# ## begins at the end of the log. If state-persistence is enabled for Telegraf, +# ## the reading continues at the last previously processed timestamp. +# # from_beginning = false +# +# ## Timeout for Docker API calls. +# # timeout = "5s" +# +# ## Containers to include and exclude. Globs accepted. +# ## Note that an empty array for both will include all containers +# # container_name_include = [] +# # container_name_exclude = [] +# +# ## Container states to include and exclude. Globs accepted. +# ## When empty only containers in the "running" state will be captured. +# # container_state_include = [] +# # container_state_exclude = [] +# +# ## docker labels to include and exclude as tags. Globs accepted. +# ## Note that an empty array for both will include all labels as tags +# # docker_label_include = [] +# # docker_label_exclude = [] +# +# ## Set the source tag for the metrics to the container ID hostname, eg first 12 chars +# source_tag = false +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Azure Event Hubs service input plugin +# [[inputs.eventhub_consumer]] +# ## The default behavior is to create a new Event Hub client from environment variables. +# ## This requires one of the following sets of environment variables to be set: +# ## +# ## 1) Expected Environment Variables: +# ## - "EVENTHUB_CONNECTION_STRING" +# ## +# ## 2) Expected Environment Variables: +# ## - "EVENTHUB_NAMESPACE" +# ## - "EVENTHUB_NAME" +# ## - "EVENTHUB_KEY_NAME" +# ## - "EVENTHUB_KEY_VALUE" +# +# ## 3) Expected Environment Variables: +# ## - "EVENTHUB_NAMESPACE" +# ## - "EVENTHUB_NAME" +# ## - "AZURE_TENANT_ID" +# ## - "AZURE_CLIENT_ID" +# ## - "AZURE_CLIENT_SECRET" +# +# ## Uncommenting the option below will create an Event Hub client based solely on the connection string. +# ## This can either be the associated environment variable or hard coded directly. +# ## If this option is uncommented, environment variables will be ignored. +# ## Connection string should contain EventHubName (EntityPath) +# # connection_string = "" +# +# ## Set persistence directory to a valid folder to use a file persister instead of an in-memory persister +# # persistence_dir = "" +# +# ## Change the default consumer group +# # consumer_group = "" +# +# ## By default the event hub receives all messages present on the broker, alternative modes can be set below. +# ## The timestamp should be in https://github.com/toml-lang/toml#offset-date-time format (RFC 3339). +# ## The 3 options below only apply if no valid persister is read from memory or file (e.g. first run). +# # from_timestamp = +# # latest = true +# +# ## Set a custom prefetch count for the receiver(s) +# # prefetch_count = 1000 +# +# ## Add an epoch to the receiver(s) +# # epoch = 0 +# +# ## Change to set a custom user agent, "telegraf" is used by default +# # user_agent = "telegraf" +# +# ## To consume from a specific partition, set the partition_ids option. +# ## An empty array will result in receiving from all partitions. +# # partition_ids = ["0","1"] +# +# ## Max undelivered messages +# ## This plugin uses tracking metrics, which ensure messages are read to +# ## outputs before acknowledging them to the original broker to ensure data +# ## is not lost. This option sets the maximum messages to read from the +# ## broker that have not been written by an output. +# ## +# ## This value needs to be picked with awareness of the agent's +# ## metric_batch_size value as well. Setting max undelivered messages too high +# ## can result in a constant stream of data batches to the output. While +# ## setting it too low may never flush the broker's messages. +# # max_undelivered_messages = 1000 +# +# ## Set either option below to true to use a system property as timestamp. +# ## You have the choice between EnqueuedTime and IoTHubEnqueuedTime. +# ## It is recommended to use this setting when the data itself has no timestamp. +# # enqueued_time_as_ts = true +# # iot_hub_enqueued_time_as_ts = true +# +# ## Tags or fields to create from keys present in the application property bag. +# ## These could for example be set by message enrichments in Azure IoT Hub. +# # application_property_tags = [] +# # application_property_fields = [] +# +# ## Tag or field name to use for metadata +# ## By default all metadata is disabled +# # sequence_number_field = "SequenceNumber" +# # enqueued_time_field = "EnqueuedTime" +# # offset_field = "Offset" +# # partition_id_tag = "PartitionID" +# # partition_key_tag = "PartitionKey" +# # iot_hub_device_connection_id_tag = "IoTHubDeviceConnectionID" +# # iot_hub_auth_generation_id_tag = "IoTHubAuthGenerationID" +# # iot_hub_connection_auth_method_tag = "IoTHubConnectionAuthMethod" +# # iot_hub_connection_module_id_tag = "IoTHubConnectionModuleID" +# # iot_hub_enqueued_time_field = "IoTHubEnqueuedTime" +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Run executable as long-running input plugin +# [[inputs.execd]] +# ## One program to run as daemon. +# ## NOTE: process and each argument should each be their own string +# command = ["telegraf-smartctl", "-d", "/dev/sda"] +# +# ## Environment variables +# ## Array of "key=value" pairs to pass as environment variables +# ## e.g. "KEY=value", "USERNAME=John Doe", +# ## "LD_LIBRARY_PATH=/opt/custom/lib64:/usr/local/libs" +# # environment = [] +# +# ## Define how the process is signaled on each collection interval. +# ## Valid values are: +# ## "none" : Do not signal anything. (Recommended for service inputs) +# ## The process must output metrics by itself. +# ## "STDIN" : Send a newline on STDIN. (Recommended for gather inputs) +# ## "SIGHUP" : Send a HUP signal. Not available on Windows. (not recommended) +# ## "SIGUSR1" : Send a USR1 signal. Not available on Windows. +# ## "SIGUSR2" : Send a USR2 signal. Not available on Windows. +# signal = "none" +# +# ## Delay before the process is restarted after an unexpected termination +# restart_delay = "10s" +# +# ## Buffer size used to read from the command output stream +# ## Optional parameter. Default is 64 Kib, minimum is 16 bytes +# # buffer_size = "64Kib" +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # gNMI telemetry input plugin +# [[inputs.gnmi]] +# ## Address and port of the gNMI GRPC server +# addresses = ["10.49.234.114:57777"] +# +# ## define credentials +# username = "cisco" +# password = "cisco" +# +# ## gNMI encoding requested (one of: "proto", "json", "json_ietf", "bytes") +# # encoding = "proto" +# +# ## redial in case of failures after +# # redial = "10s" +# +# ## gRPC Maximum Message Size +# # max_msg_size = "4MB" +# +# ## Enable to get the canonical path as field-name +# # canonical_field_names = false +# +# ## Remove leading slashes and dots in field-name +# # trim_field_names = false +# +# ## enable client-side TLS and define CA to authenticate the device +# # enable_tls = false +# # tls_ca = "/etc/telegraf/ca.pem" +# ## Minimal TLS version to accept by the client +# # tls_min_version = "TLS12" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = true +# +# ## define client-side TLS certificate & key to authenticate to the device +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## gNMI subscription prefix (optional, can usually be left empty) +# ## See: https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#222-paths +# # origin = "" +# # prefix = "" +# # target = "" +# +# ## Vendor specific options +# ## This defines what vendor specific options to load. +# ## * Juniper Header Extension (juniper_header): some sensors are directly managed by +# ## Linecard, which adds the Juniper GNMI Header Extension. Enabling this +# ## allows the decoding of the Extension header if present. Currently this knob +# ## adds component, component_id & sub_component_id as additionnal tags +# # vendor_specific = [] +# +# ## Define additional aliases to map encoding paths to measurement names +# # [inputs.gnmi.aliases] +# # ifcounters = "openconfig:/interfaces/interface/state/counters" +# +# [[inputs.gnmi.subscription]] +# ## Name of the measurement that will be emitted +# name = "ifcounters" +# +# ## Origin and path of the subscription +# ## See: https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#222-paths +# ## +# ## origin usually refers to a (YANG) data model implemented by the device +# ## and path to a specific substructure inside it that should be subscribed +# ## to (similar to an XPath). YANG models can be found e.g. here: +# ## https://github.com/YangModels/yang/tree/master/vendor/cisco/xr +# origin = "openconfig-interfaces" +# path = "/interfaces/interface/state/counters" +# +# ## Subscription mode ("target_defined", "sample", "on_change") and interval +# subscription_mode = "sample" +# sample_interval = "10s" +# +# ## Suppress redundant transmissions when measured values are unchanged +# # suppress_redundant = false +# +# ## If suppression is enabled, send updates at least every X seconds anyway +# # heartbeat_interval = "60s" +# +# ## Tag subscriptions are applied as tags to other subscriptions. +# # [[inputs.gnmi.tag_subscription]] +# # ## When applying this value as a tag to other metrics, use this tag name +# # name = "descr" +# # +# # ## All other subscription fields are as normal +# # origin = "openconfig-interfaces" +# # path = "/interfaces/interface/state" +# # subscription_mode = "on_change" +# # +# # ## Match strategy to use for the tag. +# # ## Tags are only applied for metrics of the same address. The following +# # ## settings are valid: +# # ## unconditional -- always match +# # ## name -- match by the "name" key +# # ## This resembles the previsou 'tag-only' behavior. +# # ## elements -- match by the keys in the path filtered by the path +# # ## parts specified `elements` below +# # ## By default, 'elements' is used if the 'elements' option is provided, +# # ## otherwise match by 'name'. +# # # match = "" +# # +# # ## For the 'elements' match strategy, at least one path-element name must +# # ## be supplied containing at least one key to match on. Multiple path +# # ## elements can be specified in any order. All given keys must be equal +# # ## for a match. +# # # elements = ["description", "interface"] + + +# ## DEPRECATED: The "http_listener" plugin is deprecated in version 1.9.0, has been renamed to 'influxdb_listener', use 'inputs.influxdb_listener' or 'inputs.http_listener_v2' instead. +# # Accept metrics over InfluxDB 1.x HTTP API +# [[inputs.influxdb_listener]] +# ## Address and port to host HTTP listener on +# service_address = ":8186" +# +# ## maximum duration before timing out read of the request +# read_timeout = "10s" +# ## maximum duration before timing out write of the response +# write_timeout = "10s" +# +# ## Maximum allowed HTTP request body size in bytes. +# ## 0 means to use the default of 32MiB. +# max_body_size = 0 +# +# ## Maximum line size allowed to be sent in bytes. +# ## deprecated in 1.14; parser now handles lines of unlimited length and option is ignored +# # max_line_size = 0 +# +# ## Set one or more allowed client CA certificate file names to +# ## enable mutually authenticated TLS connections +# tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# +# ## Add service certificate and key +# tls_cert = "/etc/telegraf/cert.pem" +# tls_key = "/etc/telegraf/key.pem" +# +# ## Optional tag name used to store the database name. +# ## If the write has a database in the query string then it will be kept in this tag name. +# ## This tag can be used in downstream outputs. +# ## The default value of nothing means it will be off and the database will not be recorded. +# ## If you have a tag that is the same as the one specified below, and supply a database, +# ## the tag will be overwritten with the database supplied. +# # database_tag = "" +# +# ## If set the retention policy specified in the write query will be added as +# ## the value of this tag name. +# # retention_policy_tag = "" +# +# ## Optional username and password to accept for HTTP basic authentication +# ## or authentication token. +# ## You probably want to make sure you have TLS configured above for this. +# ## Use these options for the authentication token in the form +# ## Authentication: Token : +# # basic_username = "foobar" +# # basic_password = "barfoo" +# +# ## Optional JWT token authentication for HTTP requests +# ## Please see the documentation at +# ## https://docs.influxdata.com/influxdb/v1.8/administration/authentication_and_authorization/#authenticate-using-jwt-tokens +# ## for further details. +# ## Please note: Token authentication and basic authentication cannot be used +# ## at the same time. +# # token_shared_secret = "" +# # token_username = "" +# +# ## Influx line protocol parser +# ## 'internal' is the default. 'upstream' is a newer parser that is faster +# ## and more memory efficient. +# # parser_type = "internal" + + +# # Generic HTTP write listener +# [[inputs.http_listener_v2]] +# ## Address and port to host HTTP listener on +# service_address = ":8080" +# +# ## Paths to listen to. +# # paths = ["/telegraf"] +# +# ## Save path as http_listener_v2_path tag if set to true +# # path_tag = false +# +# ## HTTP methods to accept. +# # methods = ["POST", "PUT"] +# +# ## Optional HTTP headers +# ## These headers are applied to the server that is listening for HTTP +# ## requests and included in responses. +# # http_headers = {"HTTP_HEADER" = "TAG_NAME"} +# +# ## maximum duration before timing out read of the request +# # read_timeout = "10s" +# ## maximum duration before timing out write of the response +# # write_timeout = "10s" +# +# ## Maximum allowed http request body size in bytes. +# ## 0 means to use the default of 524,288,000 bytes (500 mebibytes) +# # max_body_size = "500MB" +# +# ## Part of the request to consume. Available options are "body" and +# ## "query". +# # data_source = "body" +# +# ## Set one or more allowed client CA certificate file names to +# ## enable mutually authenticated TLS connections +# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# +# ## Add service certificate and key +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## Minimal TLS version accepted by the server +# # tls_min_version = "TLS12" +# +# ## Optional username and password to accept for HTTP basic authentication. +# ## You probably want to make sure you have TLS configured above for this. +# # basic_username = "foobar" +# # basic_password = "barfoo" +# +# ## Optional setting to map http headers into tags +# ## If the http header is not present on the request, no corresponding tag will be added +# ## If multiple instances of the http header are present, only the first value will be used +# # http_header_tags = {"HTTP_HEADER" = "TAG_NAME"} +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Accept metrics over InfluxDB 1.x HTTP API +# [[inputs.influxdb_listener]] +# ## Address and port to host HTTP listener on +# service_address = ":8186" +# +# ## maximum duration before timing out read of the request +# read_timeout = "10s" +# ## maximum duration before timing out write of the response +# write_timeout = "10s" +# +# ## Maximum allowed HTTP request body size in bytes. +# ## 0 means to use the default of 32MiB. +# max_body_size = 0 +# +# ## Maximum line size allowed to be sent in bytes. +# ## deprecated in 1.14; parser now handles lines of unlimited length and option is ignored +# # max_line_size = 0 +# +# ## Set one or more allowed client CA certificate file names to +# ## enable mutually authenticated TLS connections +# tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# +# ## Add service certificate and key +# tls_cert = "/etc/telegraf/cert.pem" +# tls_key = "/etc/telegraf/key.pem" +# +# ## Optional tag name used to store the database name. +# ## If the write has a database in the query string then it will be kept in this tag name. +# ## This tag can be used in downstream outputs. +# ## The default value of nothing means it will be off and the database will not be recorded. +# ## If you have a tag that is the same as the one specified below, and supply a database, +# ## the tag will be overwritten with the database supplied. +# # database_tag = "" +# +# ## If set the retention policy specified in the write query will be added as +# ## the value of this tag name. +# # retention_policy_tag = "" +# +# ## Optional username and password to accept for HTTP basic authentication +# ## or authentication token. +# ## You probably want to make sure you have TLS configured above for this. +# ## Use these options for the authentication token in the form +# ## Authentication: Token : +# # basic_username = "foobar" +# # basic_password = "barfoo" +# +# ## Optional JWT token authentication for HTTP requests +# ## Please see the documentation at +# ## https://docs.influxdata.com/influxdb/v1.8/administration/authentication_and_authorization/#authenticate-using-jwt-tokens +# ## for further details. +# ## Please note: Token authentication and basic authentication cannot be used +# ## at the same time. +# # token_shared_secret = "" +# # token_username = "" +# +# ## Influx line protocol parser +# ## 'internal' is the default. 'upstream' is a newer parser that is faster +# ## and more memory efficient. +# # parser_type = "internal" + + +# # Accept metrics over InfluxDB 2.x HTTP API +# [[inputs.influxdb_v2_listener]] +# ## Address and port to host InfluxDB listener on +# ## (Double check the port. Could be 9999 if using OSS Beta) +# service_address = ":8086" +# +# ## Maximum duration before timing out read of the request +# # read_timeout = "10s" +# ## Maximum duration before timing out write of the response +# # write_timeout = "10s" +# +# ## Maximum allowed HTTP request body size in bytes. +# ## 0 means to use the default of 32MiB. +# # max_body_size = "32MiB" +# +# ## Optional tag to determine the bucket. +# ## If the write has a bucket in the query string then it will be kept in this tag name. +# ## This tag can be used in downstream outputs. +# ## The default value of nothing means it will be off and the database will not be recorded. +# # bucket_tag = "" +# +# ## Set one or more allowed client CA certificate file names to +# ## enable mutually authenticated TLS connections +# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# +# ## Add service certificate and key +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## Optional token to accept for HTTP authentication. +# ## You probably want to make sure you have TLS configured above for this. +# # token = "some-long-shared-secret-token" +# +# ## Influx line protocol parser +# ## 'internal' is the default. 'upstream' is a newer parser that is faster +# ## and more memory efficient. +# # parser_type = "internal" + + +# # Intel Performance Monitoring Unit plugin exposes Intel PMU metrics available through Linux Perf subsystem +# # This plugin ONLY supports Linux on amd64 +# [[inputs.intel_pmu]] +# ## List of filesystem locations of JSON files that contain PMU event definitions. +# event_definitions = ["/var/cache/pmu/GenuineIntel-6-55-4-core.json", "/var/cache/pmu/GenuineIntel-6-55-4-uncore.json"] +# +# ## List of core events measurement entities. There can be more than one core_events sections. +# [[inputs.intel_pmu.core_events]] +# ## List of events to be counted. Event names shall match names from event_definitions files. +# ## Single entry can contain name of the event (case insensitive) augmented with config options and perf modifiers. +# ## If absent, all core events from provided event_definitions are counted skipping unresolvable ones. +# events = ["INST_RETIRED.ANY", "CPU_CLK_UNHALTED.THREAD_ANY:config1=0x4043200000000k"] +# +# ## Limits the counting of events to core numbers specified. +# ## If absent, events are counted on all cores. +# ## Single "0", multiple "0,1,2" and range "0-2" notation is supported for each array element. +# ## example: cores = ["0,2", "4", "12-16"] +# cores = ["0"] +# +# ## Indicator that plugin shall attempt to run core_events.events as a single perf group. +# ## If absent or set to false, each event is counted individually. Defaults to false. +# ## This limits the number of events that can be measured to a maximum of available hardware counters per core. +# ## Could vary depending on type of event, use of fixed counters. +# # perf_group = false +# +# ## Optionally set a custom tag value that will be added to every measurement within this events group. +# ## Can be applied to any group of events, unrelated to perf_group setting. +# # events_tag = "" +# +# ## List of uncore event measurement entities. There can be more than one uncore_events sections. +# [[inputs.intel_pmu.uncore_events]] +# ## List of events to be counted. Event names shall match names from event_definitions files. +# ## Single entry can contain name of the event (case insensitive) augmented with config options and perf modifiers. +# ## If absent, all uncore events from provided event_definitions are counted skipping unresolvable ones. +# events = ["UNC_CHA_CLOCKTICKS", "UNC_CHA_TOR_OCCUPANCY.IA_MISS"] +# +# ## Limits the counting of events to specified sockets. +# ## If absent, events are counted on all sockets. +# ## Single "0", multiple "0,1" and range "0-1" notation is supported for each array element. +# ## example: sockets = ["0-2"] +# sockets = ["0"] +# +# ## Indicator that plugin shall provide an aggregated value for multiple units of same type distributed in an uncore. +# ## If absent or set to false, events for each unit are exposed as separate metric. Defaults to false. +# # aggregate_uncore_units = false +# +# ## Optionally set a custom tag value that will be added to every measurement within this events group. +# # events_tag = "" + + +# # Read Intel RDT metrics +# # This plugin ONLY supports non-Windows +# [[inputs.intel_rdt]] +# ## Optionally set sampling interval to Nx100ms. +# ## This value is propagated to pqos tool. Interval format is defined by pqos itself. +# ## If not provided or provided 0, will be set to 10 = 10x100ms = 1s. +# # sampling_interval = "10" +# +# ## Optionally specify the path to pqos executable. +# ## If not provided, auto discovery will be performed. +# # pqos_path = "/usr/local/bin/pqos" +# +# ## Optionally specify if IPC and LLC_Misses metrics shouldn't be propagated. +# ## If not provided, default value is false. +# # shortened_metrics = false +# +# ## Specify the list of groups of CPU core(s) to be provided as pqos input. +# ## Mandatory if processes aren't set and forbidden if processes are specified. +# ## e.g. ["0-3", "4,5,6"] or ["1-3,4"] +# # cores = ["0-3"] +# +# ## Specify the list of processes for which Metrics will be collected. +# ## Mandatory if cores aren't set and forbidden if cores are specified. +# ## e.g. ["qemu", "pmd"] +# # processes = ["process"] +# +# ## Specify if the pqos process should be called with sudo. +# ## Mandatory if the telegraf process does not run as root. +# # use_sudo = false + + +# # Subscribe and receive OpenConfig Telemetry data using JTI +# [[inputs.jti_openconfig_telemetry]] +# ## List of device addresses to collect telemetry from +# servers = ["localhost:1883"] +# +# ## Authentication details. Username and password are must if device expects +# ## authentication. Client ID must be unique when connecting from multiple instances +# ## of telegraf to the same device +# username = "user" +# password = "pass" +# client_id = "telegraf" +# +# ## Frequency to get data +# sample_frequency = "1000ms" +# +# ## Sensors to subscribe for +# ## A identifier for each sensor can be provided in path by separating with space +# ## Else sensor path will be used as identifier +# ## When identifier is used, we can provide a list of space separated sensors. +# ## A single subscription will be created with all these sensors and data will +# ## be saved to measurement with this identifier name +# sensors = [ +# "/interfaces/", +# "collection /components/ /lldp", +# ] +# +# ## We allow specifying sensor group level reporting rate. To do this, specify the +# ## reporting rate in Duration at the beginning of sensor paths / collection +# ## name. For entries without reporting rate, we use configured sample frequency +# sensors = [ +# "1000ms customReporting /interfaces /lldp", +# "2000ms collection /components", +# "/interfaces", +# ] +# +# ## Timestamp Source +# ## Set to 'collection' for time of collection, and 'data' for using the time +# ## provided by the _timestamp field. +# # timestamp_source = "collection" +# +# ## Optional TLS Config +# # enable_tls = false +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Minimal TLS version to accept by the client +# # tls_min_version = "TLS12" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Delay between retry attempts of failed RPC calls or streams. Defaults to 1000ms. +# ## Failed streams/calls will not be retried if 0 is provided +# retry_delay = "1000ms" +# +# ## Period for sending keep-alive packets on idle connections +# ## This is helpful to identify broken connections to the server +# # keep_alive_period = "10s" +# +# ## To treat all string values as tags, set this to true +# str_as_tags = false + + +# # Read metrics from Kafka topics +# [[inputs.kafka_consumer]] +# ## Kafka brokers. +# brokers = ["localhost:9092"] +# +# ## Topics to consume. +# topics = ["telegraf"] +# +# ## Topic regular expressions to consume. Matches will be added to topics. +# ## Example: topic_regexps = [ "*test", "metric[0-9A-z]*" ] +# # topic_regexps = [ ] +# +# ## When set this tag will be added to all metrics with the topic as the value. +# # topic_tag = "" +# +# ## Optional Client id +# # client_id = "Telegraf" +# +# ## Set the minimal supported Kafka version. Setting this enables the use of new +# ## Kafka features and APIs. Must be 0.10.2.0 or greater. +# ## ex: version = "1.1.0" +# # version = "" +# +# ## Optional TLS Config +# # enable_tls = false +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Period between keep alive probes. +# ## Defaults to the OS configuration if not specified or zero. +# # keep_alive_period = "15s" +# +# ## SASL authentication credentials. These settings should typically be used +# ## with TLS encryption enabled +# # sasl_username = "kafka" +# # sasl_password = "secret" +# +# ## Optional SASL: +# ## one of: OAUTHBEARER, PLAIN, SCRAM-SHA-256, SCRAM-SHA-512, GSSAPI +# ## (defaults to PLAIN) +# # sasl_mechanism = "" +# +# ## used if sasl_mechanism is GSSAPI +# # sasl_gssapi_service_name = "" +# # ## One of: KRB5_USER_AUTH and KRB5_KEYTAB_AUTH +# # sasl_gssapi_auth_type = "KRB5_USER_AUTH" +# # sasl_gssapi_kerberos_config_path = "/" +# # sasl_gssapi_realm = "realm" +# # sasl_gssapi_key_tab_path = "" +# # sasl_gssapi_disable_pafxfast = false +# +# ## used if sasl_mechanism is OAUTHBEARER +# # sasl_access_token = "" +# +# ## SASL protocol version. When connecting to Azure EventHub set to 0. +# # sasl_version = 1 +# +# # Disable Kafka metadata full fetch +# # metadata_full = false +# +# ## Name of the consumer group. +# # consumer_group = "telegraf_metrics_consumers" +# +# ## Compression codec represents the various compression codecs recognized by +# ## Kafka in messages. +# ## 0 : None +# ## 1 : Gzip +# ## 2 : Snappy +# ## 3 : LZ4 +# ## 4 : ZSTD +# # compression_codec = 0 +# ## Initial offset position; one of "oldest" or "newest". +# # offset = "oldest" +# +# ## Consumer group partition assignment strategy; one of "range", "roundrobin" or "sticky". +# # balance_strategy = "range" +# +# ## Maximum number of retries for metadata operations including +# ## connecting. Sets Sarama library's Metadata.Retry.Max config value. If 0 or +# ## unset, use the Sarama default of 3, +# # metadata_retry_max = 0 +# +# ## Type of retry backoff. Valid options: "constant", "exponential" +# # metadata_retry_type = "constant" +# +# ## Amount of time to wait before retrying. When metadata_retry_type is +# ## "constant", each retry is delayed this amount. When "exponential", the +# ## first retry is delayed this amount, and subsequent delays are doubled. If 0 +# ## or unset, use the Sarama default of 250 ms +# # metadata_retry_backoff = 0 +# +# ## Maximum amount of time to wait before retrying when metadata_retry_type is +# ## "exponential". Ignored for other retry types. If 0, there is no backoff +# ## limit. +# # metadata_retry_max_duration = 0 +# +# ## Strategy for making connection to kafka brokers. Valid options: "startup", +# ## "defer". If set to "defer" the plugin is allowed to start before making a +# ## connection. This is useful if the broker may be down when telegraf is +# ## started, but if there are any typos in the broker setting, they will cause +# ## connection failures without warning at startup +# # connection_strategy = "startup" +# +# ## Maximum length of a message to consume, in bytes (default 0/unlimited); +# ## larger messages are dropped +# max_message_len = 1000000 +# +# ## Max undelivered messages +# ## This plugin uses tracking metrics, which ensure messages are read to +# ## outputs before acknowledging them to the original broker to ensure data +# ## is not lost. This option sets the maximum messages to read from the +# ## broker that have not been written by an output. +# ## +# ## This value needs to be picked with awareness of the agent's +# ## metric_batch_size value as well. Setting max undelivered messages too high +# ## can result in a constant stream of data batches to the output. While +# ## setting it too low may never flush the broker's messages. +# # max_undelivered_messages = 1000 +# +# ## Maximum amount of time the consumer should take to process messages. If +# ## the debug log prints messages from sarama about 'abandoning subscription +# ## to [topic] because consuming was taking too long', increase this value to +# ## longer than the time taken by the output plugin(s). +# ## +# ## Note that the effective timeout could be between 'max_processing_time' and +# ## '2 * max_processing_time'. +# # max_processing_time = "100ms" +# +# ## The default number of message bytes to fetch from the broker in each +# ## request (default 1MB). This should be larger than the majority of +# ## your messages, or else the consumer will spend a lot of time +# ## negotiating sizes and not actually consuming. Similar to the JVM's +# ## `fetch.message.max.bytes`. +# # consumer_fetch_default = "1MB" +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# ## DEPRECATED: The "kafka_consumer_legacy" plugin is deprecated in version 1.4.0 and will be removed in 1.30.0, use 'inputs.kafka_consumer' instead, NOTE: 'kafka_consumer' only supports Kafka v0.8+. +# # Read metrics from Kafka topic(s) +# [[inputs.kafka_consumer_legacy]] +# ## topic(s) to consume +# topics = ["telegraf"] +# +# ## an array of Zookeeper connection strings +# zookeeper_peers = ["localhost:2181"] +# +# ## Zookeeper Chroot +# zookeeper_chroot = "" +# +# ## the name of the consumer group +# consumer_group = "telegraf_metrics_consumers" +# +# ## Offset (must be either "oldest" or "newest") +# offset = "oldest" +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" +# +# ## Maximum length of a message to consume, in bytes (default 0/unlimited); +# ## larger messages are dropped +# max_message_len = 65536 + + +# # Configuration for the AWS Kinesis input. +# [[inputs.kinesis_consumer]] +# ## Amazon REGION of kinesis endpoint. +# region = "ap-southeast-2" +# +# ## Amazon Credentials +# ## Credentials are loaded in the following order +# ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified +# ## 2) Assumed credentials via STS if role_arn is specified +# ## 3) explicit credentials from 'access_key' and 'secret_key' +# ## 4) shared profile from 'profile' +# ## 5) environment variables +# ## 6) shared credentials file +# ## 7) EC2 Instance Profile +# # access_key = "" +# # secret_key = "" +# # token = "" +# # role_arn = "" +# # web_identity_token_file = "" +# # role_session_name = "" +# # profile = "" +# # shared_credential_file = "" +# +# ## Endpoint to make request against, the correct endpoint is automatically +# ## determined and this option should only be set if you wish to override the +# ## default. +# ## ex: endpoint_url = "http://localhost:8000" +# # endpoint_url = "" +# +# ## Kinesis StreamName must exist prior to starting telegraf. +# streamname = "StreamName" +# +# ## Shard iterator type (only 'TRIM_HORIZON' and 'LATEST' currently supported) +# # shard_iterator_type = "TRIM_HORIZON" +# +# ## Max undelivered messages +# ## This plugin uses tracking metrics, which ensure messages are read to +# ## outputs before acknowledging them to the original broker to ensure data +# ## is not lost. This option sets the maximum messages to read from the +# ## broker that have not been written by an output. +# ## +# ## This value needs to be picked with awareness of the agent's +# ## metric_batch_size value as well. Setting max undelivered messages too high +# ## can result in a constant stream of data batches to the output. While +# ## setting it too low may never flush the broker's messages. +# # max_undelivered_messages = 1000 +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" +# +# ## +# ## The content encoding of the data from kinesis +# ## If you are processing a cloudwatch logs kinesis stream then set this to "gzip" +# ## as AWS compresses cloudwatch log data before it is sent to kinesis (aws +# ## also base64 encodes the zip byte data before pushing to the stream. The base64 decoding +# ## is done automatically by the golang sdk, as data is read from kinesis) +# ## +# # content_encoding = "identity" +# +# ## Optional +# ## Configuration for a dynamodb checkpoint +# [inputs.kinesis_consumer.checkpoint_dynamodb] +# ## unique name for this consumer +# app_name = "default" +# table_name = "default" + + +# # Listener capable of handling KNX bus messages provided through a KNX-IP Interface. +# [[inputs.knx_listener]] +# ## Type of KNX-IP interface. +# ## Can be either "tunnel_udp", "tunnel_tcp", "tunnel" (alias for tunnel_udp) or "router". +# # service_type = "tunnel" +# +# ## Address of the KNX-IP interface. +# service_address = "localhost:3671" +# +# ## Measurement definition(s) +# # [[inputs.knx_listener.measurement]] +# # ## Name of the measurement +# # name = "temperature" +# # ## Datapoint-Type (DPT) of the KNX messages +# # dpt = "9.001" +# # ## List of Group-Addresses (GAs) assigned to the measurement +# # addresses = ["5/5/1"] +# +# # [[inputs.knx_listener.measurement]] +# # name = "illumination" +# # dpt = "9.004" +# # addresses = ["5/5/3"] + + +# # Read metrics off Arista LANZ, via socket +# [[inputs.lanz]] +# ## URL to Arista LANZ endpoint +# servers = [ +# "tcp://switch1.int.example.com:50001", +# "tcp://switch2.int.example.com:50001", +# ] + + +# ## DEPRECATED: The "logparser" plugin is deprecated in version 1.15.0, use 'inputs.tail' with 'grok' data format instead. +# # Read metrics off Arista LANZ, via socket +# [[inputs.logparser]] +# ## Log files to parse. +# ## These accept standard unix glob matching rules, but with the addition of +# ## ** as a "super asterisk". ie: +# ## /var/log/**.log -> recursively find all .log files in /var/log +# ## /var/log/*/*.log -> find all .log files with a parent dir in /var/log +# ## /var/log/apache.log -> only tail the apache log file +# files = ["/var/log/apache/access.log"] +# +# ## Read files that currently exist from the beginning. Files that are created +# ## while telegraf is running (and that match the "files" globs) will always +# ## be read from the beginning. +# from_beginning = false +# +# ## Method used to watch for file updates. Can be either "inotify" or "poll". +# # watch_method = "inotify" +# +# ## Parse logstash-style "grok" patterns: +# [inputs.logparser.grok] +# ## This is a list of patterns to check the given log file(s) for. +# ## Note that adding patterns here increases processing time. The most +# ## efficient configuration is to have one pattern per logparser. +# ## Other common built-in patterns are: +# ## %{COMMON_LOG_FORMAT} (plain apache & nginx access logs) +# ## %{COMBINED_LOG_FORMAT} (access logs + referrer & agent) +# patterns = ["%{COMBINED_LOG_FORMAT}"] +# +# ## Name of the outputted measurement name. +# measurement = "apache_access_log" +# +# ## Full path(s) to custom pattern files. +# custom_pattern_files = [] +# +# ## Custom patterns can also be defined here. Put one pattern per line. +# custom_patterns = ''' +# ''' +# +# ## Timezone allows you to provide an override for timestamps that +# ## don't already include an offset +# ## e.g. 04/06/2016 12:41:45 data one two 5.43µs +# ## +# ## Default: "" which renders UTC +# ## Options are as follows: +# ## 1. Local -- interpret based on machine localtime +# ## 2. "Canada/Eastern" -- Unix TZ values like those found in https://en.wikipedia.org/wiki/List_of_tz_database_time_zones +# ## 3. UTC -- or blank/unspecified, will return timestamp in UTC +# # timezone = "Canada/Eastern" +# +# ## When set to "disable", timestamp will not incremented if there is a +# ## duplicate. +# # unique_timestamp = "auto" + + +# # Read metrics from one or many MongoDB servers +# [[inputs.mongodb]] +# ## An array of URLs of the form: +# ## "mongodb://" [user ":" pass "@"] host [ ":" port] +# ## For example: +# ## mongodb://user:auth_key@10.10.3.30:27017, +# ## mongodb://10.10.3.33:18832, +# ## +# ## If connecting to a cluster, users must include the "?connect=direct" in +# ## the URL to ensure that the connection goes directly to the specified node +# ## and not have all connections passed to the master node. +# servers = ["mongodb://127.0.0.1:27017/?connect=direct"] +# +# ## When true, collect cluster status. +# ## Note that the query that counts jumbo chunks triggers a COLLSCAN, which +# ## may have an impact on performance. +# # gather_cluster_status = true +# +# ## When true, collect per database stats +# # gather_perdb_stats = false +# +# ## When true, collect per collection stats +# # gather_col_stats = false +# +# ## When true, collect usage statistics for each collection +# ## (insert, update, queries, remove, getmore, commands etc...). +# # gather_top_stat = false +# +# ## List of db where collections stats are collected +# ## If empty, all db are concerned +# # col_stats_dbs = ["local"] +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Specifies plugin behavior regarding disconnected servers +# ## Available choices : +# ## - error: telegraf will return an error on startup if one the servers is unreachable +# ## - skip: telegraf will skip unreachable servers on both startup and gather +# # disconnected_servers_behavior = "error" + + +# # Read metrics from MQTT topic(s) +# [[inputs.mqtt_consumer]] +# ## Broker URLs for the MQTT server or cluster. To connect to multiple +# ## clusters or standalone servers, use a separate plugin instance. +# ## example: servers = ["tcp://localhost:1883"] +# ## servers = ["ssl://localhost:1883"] +# ## servers = ["ws://localhost:1883"] +# servers = ["tcp://127.0.0.1:1883"] +# +# ## Topics that will be subscribed to. +# topics = [ +# "telegraf/host01/cpu", +# "telegraf/+/mem", +# "sensors/#", +# ] +# +# ## The message topic will be stored in a tag specified by this value. If set +# ## to the empty string no topic tag will be created. +# # topic_tag = "topic" +# +# ## QoS policy for messages +# ## 0 = at most once +# ## 1 = at least once +# ## 2 = exactly once +# ## +# ## When using a QoS of 1 or 2, you should enable persistent_session to allow +# ## resuming unacknowledged messages. +# # qos = 0 +# +# ## Connection timeout for initial connection in seconds +# # connection_timeout = "30s" +# +# ## Max undelivered messages +# ## This plugin uses tracking metrics, which ensure messages are read to +# ## outputs before acknowledging them to the original broker to ensure data +# ## is not lost. This option sets the maximum messages to read from the +# ## broker that have not been written by an output. +# ## +# ## This value needs to be picked with awareness of the agent's +# ## metric_batch_size value as well. Setting max undelivered messages too high +# ## can result in a constant stream of data batches to the output. While +# ## setting it too low may never flush the broker's messages. +# # max_undelivered_messages = 1000 +# +# ## Persistent session disables clearing of the client session on connection. +# ## In order for this option to work you must also set client_id to identify +# ## the client. To receive messages that arrived while the client is offline, +# ## also set the qos option to 1 or 2 and don't forget to also set the QoS when +# ## publishing. +# # persistent_session = false +# +# ## If unset, a random client ID will be generated. +# # client_id = "" +# +# ## Username and password to connect MQTT server. +# # username = "telegraf" +# # password = "metricsmetricsmetricsmetrics" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Client trace messages +# ## When set to true, and debug mode enabled in the agent settings, the MQTT +# ## client's messages are included in telegraf logs. These messages are very +# ## noisey, but essential for debugging issues. +# # client_trace = false +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" +# +# ## Enable extracting tag values from MQTT topics +# ## _ denotes an ignored entry in the topic path +# # [[inputs.mqtt_consumer.topic_parsing]] +# # topic = "" +# # measurement = "" +# # tags = "" +# # fields = "" +# ## Value supported is int, float, unit +# # [[inputs.mqtt_consumer.topic.types]] +# # key = type + + +# # Read metrics from NATS subject(s) +# [[inputs.nats_consumer]] +# ## urls of NATS servers +# servers = ["nats://localhost:4222"] +# +# ## subject(s) to consume +# ## If you use jetstream you need to set the subjects +# ## in jetstream_subjects +# subjects = ["telegraf"] +# +# ## jetstream subjects +# ## jetstream is a streaming technology inside of nats. +# ## With jetstream the nats-server persists messages and +# ## a consumer can consume historical messages. This is +# ## useful when telegraf needs to restart it don't miss a +# ## message. You need to configure the nats-server. +# ## https://docs.nats.io/nats-concepts/jetstream. +# jetstream_subjects = ["js_telegraf"] +# +# ## name a queue group +# queue_group = "telegraf_consumers" +# +# ## Optional credentials +# # username = "" +# # password = "" +# +# ## Optional NATS 2.0 and NATS NGS compatible user credentials +# # credentials = "/etc/telegraf/nats.creds" +# +# ## Use Transport Layer Security +# # secure = false +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Sets the limits for pending msgs and bytes for each subscription +# ## These shouldn't need to be adjusted except in very high throughput scenarios +# # pending_message_limit = 65536 +# # pending_bytes_limit = 67108864 +# +# ## Max undelivered messages +# ## This plugin uses tracking metrics, which ensure messages are read to +# ## outputs before acknowledging them to the original broker to ensure data +# ## is not lost. This option sets the maximum messages to read from the +# ## broker that have not been written by an output. +# ## +# ## This value needs to be picked with awareness of the agent's +# ## metric_batch_size value as well. Setting max undelivered messages too high +# ## can result in a constant stream of data batches to the output. While +# ## setting it too low may never flush the broker's messages. +# # max_undelivered_messages = 1000 +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Netflow v5, Netflow v9 and IPFIX collector +# [[inputs.netflow]] +# ## Address to listen for netflow,ipfix or sflow packets. +# ## example: service_address = "udp://:2055" +# ## service_address = "udp4://:2055" +# ## service_address = "udp6://:2055" +# service_address = "udp://:2055" +# +# ## Set the size of the operating system's receive buffer. +# ## example: read_buffer_size = "64KiB" +# ## Uses the system's default if not set. +# # read_buffer_size = "" +# +# ## Protocol version to use for decoding. +# ## Available options are +# ## "ipfix" -- IPFIX / Netflow v10 protocol (also works for Netflow v9) +# ## "netflow v5" -- Netflow v5 protocol +# ## "netflow v9" -- Netflow v9 protocol (also works for IPFIX) +# ## "sflow v5" -- sFlow v5 protocol +# # protocol = "ipfix" +# +# ## Private Enterprise Numbers (PEN) mappings for decoding +# ## This option allows to specify vendor-specific mapping files to use during +# ## decoding. +# # private_enterprise_number_files = [] +# +# ## Dump incoming packets to the log +# ## This can be helpful to debug parsing issues. Only active if +# ## Telegraf is in debug mode. +# # dump_packets = false + + +# # Read metrics from NSQD topic(s) +# [[inputs.nsq_consumer]] +# ## Server option still works but is deprecated, we just prepend it to the nsqd array. +# # server = "localhost:4150" +# +# ## An array representing the NSQD TCP HTTP Endpoints +# nsqd = ["localhost:4150"] +# +# ## An array representing the NSQLookupd HTTP Endpoints +# nsqlookupd = ["localhost:4161"] +# topic = "telegraf" +# channel = "consumer" +# max_in_flight = 100 +# +# ## Max undelivered messages +# ## This plugin uses tracking metrics, which ensure messages are read to +# ## outputs before acknowledging them to the original broker to ensure data +# ## is not lost. This option sets the maximum messages to read from the +# ## broker that have not been written by an output. +# ## +# ## This value needs to be picked with awareness of the agent's +# ## metric_batch_size value as well. Setting max undelivered messages too high +# ## can result in a constant stream of data batches to the output. While +# ## setting it too low may never flush the broker's messages. +# # max_undelivered_messages = 1000 +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Retrieve data from OPCUA devices +# [[inputs.opcua_listener]] +# ## Metric name +# # name = "opcua_listener" +# # +# ## OPC UA Endpoint URL +# # endpoint = "opc.tcp://localhost:4840" +# # +# ## Maximum time allowed to establish a connect to the endpoint. +# # connect_timeout = "10s" +# # +# ## Maximum time allowed for a request over the established connection. +# # request_timeout = "5s" +# # +# ## The interval at which the server should at least update its monitored items +# # subscription_interval = "100ms" +# # +# ## Security policy, one of "None", "Basic128Rsa15", "Basic256", +# ## "Basic256Sha256", or "auto" +# # security_policy = "auto" +# # +# ## Security mode, one of "None", "Sign", "SignAndEncrypt", or "auto" +# # security_mode = "auto" +# # +# ## Path to cert.pem. Required when security mode or policy isn't "None". +# ## If cert path is not supplied, self-signed cert and key will be generated. +# # certificate = "/etc/telegraf/cert.pem" +# # +# ## Path to private key.pem. Required when security mode or policy isn't "None". +# ## If key path is not supplied, self-signed cert and key will be generated. +# # private_key = "/etc/telegraf/key.pem" +# # +# ## Authentication Method, one of "Certificate", "UserName", or "Anonymous". To +# ## authenticate using a specific ID, select 'Certificate' or 'UserName' +# # auth_method = "Anonymous" +# # +# ## Username. Required for auth_method = "UserName" +# # username = "" +# # +# ## Password. Required for auth_method = "UserName" +# # password = "" +# # +# ## Option to select the metric timestamp to use. Valid options are: +# ## "gather" -- uses the time of receiving the data in telegraf +# ## "server" -- uses the timestamp provided by the server +# ## "source" -- uses the timestamp provided by the source +# # timestamp = "gather" +# # +# ## The default timetsamp format is RFC3339Nano +# # Other timestamp layouts can be configured using the Go language time +# # layout specification from https://golang.org/pkg/time/#Time.Format +# # e.g.: json_timestamp_format = "2006-01-02T15:04:05Z07:00" +# #timestamp_format = "" +# # +# ## Node ID configuration +# ## name - field name to use in the output +# ## namespace - OPC UA namespace of the node (integer value 0 thru 3) +# ## identifier_type - OPC UA ID type (s=string, i=numeric, g=guid, b=opaque) +# ## identifier - OPC UA ID (tag as shown in opcua browser) +# ## default_tags - extra tags to be added to the output metric (optional) +# ## +# ## Use either the inline notation or the bracketed notation, not both. +# # +# ## Inline notation (default_tags not supported yet) +# # nodes = [ +# # {name="", namespace="", identifier_type="", identifier=""}, +# # {name="", namespace="", identifier_type="", identifier=""}, +# # ] +# # +# ## Bracketed notation +# # [[inputs.opcua_listener.nodes]] +# # name = "node1" +# # namespace = "" +# # identifier_type = "" +# # identifier = "" +# # default_tags = { tag1 = "value1", tag2 = "value2" } +# # +# # [[inputs.opcua_listener.nodes]] +# # name = "node2" +# # namespace = "" +# # identifier_type = "" +# # identifier = "" +# # +# ## Node Group +# ## Sets defaults so they aren't required in every node. +# ## Default values can be set for: +# ## * Metric name +# ## * OPC UA namespace +# ## * Identifier +# ## * Default tags +# ## +# ## Multiple node groups are allowed +# #[[inputs.opcua_listener.group]] +# ## Group Metric name. Overrides the top level name. If unset, the +# ## top level name is used. +# # name = +# # +# ## Group default namespace. If a node in the group doesn't set its +# ## namespace, this is used. +# # namespace = +# # +# ## Group default identifier type. If a node in the group doesn't set its +# ## namespace, this is used. +# # identifier_type = +# # +# ## Default tags that are applied to every node in this group. Can be +# ## overwritten in a node by setting a different value for the tag name. +# ## example: default_tags = { tag1 = "value1" } +# # default_tags = {} +# # +# ## Node ID Configuration. Array of nodes with the same settings as above. +# ## Use either the inline notation or the bracketed notation, not both. +# # +# ## Inline notation (default_tags not supported yet) +# # nodes = [ +# # {name="node1", namespace="", identifier_type="", identifier=""}, +# # {name="node2", namespace="", identifier_type="", identifier=""}, +# #] +# # +# ## Bracketed notation +# # [[inputs.opcua_listener.group.nodes]] +# # name = "node1" +# # namespace = "" +# # identifier_type = "" +# # identifier = "" +# # default_tags = { tag1 = "override1", tag2 = "value2" } +# # +# # [[inputs.opcua_listener.group.nodes]] +# # name = "node2" +# # namespace = "" +# # identifier_type = "" +# # identifier = "" +# +# ## Enable workarounds required by some devices to work correctly +# # [inputs.opcua_listener.workarounds] +# ## Set additional valid status codes, StatusOK (0x0) is always considered valid +# # additional_valid_status_codes = ["0xC0"] +# +# # [inputs.opcua_listener.request_workarounds] +# ## Use unregistered reads instead of registered reads +# # use_unregistered_reads = false + + +# # Collects performance metrics from OpenStack services +# [[inputs.openstack]] +# ## The recommended interval to poll is '30m' +# +# ## The identity endpoint to authenticate against and get the service catalog from. +# authentication_endpoint = "https://my.openstack.cloud:5000" +# +# ## The domain to authenticate against when using a V3 identity endpoint. +# # domain = "default" +# +# ## The project to authenticate as. +# # project = "admin" +# +# ## User authentication credentials. Must have admin rights. +# username = "admin" +# password = "password" +# +# ## Available services are: +# ## "agents", "aggregates", "cinder_services", "flavors", "hypervisors", "networks", +# ## "nova_services", "ports", "projects", "servers", "services", "stacks", "storage_pools", +# ## "subnets", "volumes" +# # enabled_services = ["services", "projects", "hypervisors", "flavors", "networks", "volumes"] +# +# ## Collect Server Diagnostics +# # server_diagnotics = false +# +# ## output secrets (such as adminPass(for server) and UserID(for volume)). +# # output_secrets = false +# +# ## Amount of time allowed to complete the HTTP(s) request. +# # timeout = "5s" +# +# ## HTTP Proxy support +# # http_proxy_url = "" +# +# ## Optional TLS Config +# # tls_ca = /path/to/cafile +# # tls_cert = /path/to/certfile +# # tls_key = /path/to/keyfile +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Options for tags received from Openstack +# # tag_prefix = "openstack_tag_" +# # tag_value = "true" +# +# ## Timestamp format for timestamp data received from Openstack. +# ## If false format is unix nanoseconds. +# # human_readable_timestamps = false +# +# ## Measure Openstack call duration +# # measure_openstack_requests = false + + +# # Receive OpenTelemetry traces, metrics, and logs over gRPC +# [[inputs.opentelemetry]] +# ## Override the default (0.0.0.0:4317) destination OpenTelemetry gRPC service +# ## address:port +# # service_address = "0.0.0.0:4317" +# +# ## Override the default (5s) new connection timeout +# # timeout = "5s" +# +# ## Override the default span attributes to be used as line protocol tags. +# ## These are always included as tags: +# ## - trace ID +# ## - span ID +# ## The default values are strongly recommended for use with Jaeger: +# ## - service.name +# ## - span.name +# ## Other common attributes can be found here: +# ## - https://github.com/open-telemetry/opentelemetry-collector/tree/main/semconv +# # span_dimensions = ["service.name", "span.name"] +# +# ## Override the default log record attributes to be used as line protocol tags. +# ## These are always included as tags, if available: +# ## - trace ID +# ## - span ID +# ## The default values: +# ## - service.name +# ## Other common attributes can be found here: +# ## - https://github.com/open-telemetry/opentelemetry-collector/tree/main/semconv +# ## When using InfluxDB for both logs and traces, be certain that log_record_dimensions +# ## matches the span_dimensions value. +# # log_record_dimensions = ["service.name"] +# +# ## Override the default (prometheus-v1) metrics schema. +# ## Supports: "prometheus-v1", "prometheus-v2" +# ## For more information about the alternatives, read the Prometheus input +# ## plugin notes. +# # metrics_schema = "prometheus-v1" +# +# ## Optional TLS Config. +# ## For advanced options: https://github.com/influxdata/telegraf/blob/v1.18.3/docs/TLS.md +# ## +# ## Set one or more allowed client CA certificate file names to +# ## enable mutually authenticated TLS connections. +# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# ## Add service certificate and key. +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" + + +# # Read metrics from one or many pgbouncer servers +# [[inputs.pgbouncer]] +# ## specify address via a url matching: +# ## postgres://[pqgotest[:password]]@host:port[/dbname]\ +# ## ?sslmode=[disable|verify-ca|verify-full] +# ## or a simple string: +# ## host=localhost port=5432 user=pqgotest password=... sslmode=... dbname=app_production +# ## +# ## All connection parameters are optional. +# ## +# address = "host=localhost user=pgbouncer sslmode=disable" +# +# ## Specify which "show" commands to gather metrics for. +# ## Choose from: "stats", "pools", "lists", "databases" +# # show_commands = ["stats", "pools"] + + +# # Read metrics from one or many postgresql servers +# [[inputs.postgresql]] +# ## Specify address via a url matching: +# ## postgres://[pqgotest[:password]]@localhost[/dbname]?sslmode=[disable|verify-ca|verify-full]&statement_timeout=... +# ## or a simple string: +# ## host=localhost user=pqgotest password=... sslmode=... dbname=app_production +# ## Users can pass the path to the socket as the host value to use a socket +# ## connection (e.g. `/var/run/postgresql`). +# ## +# ## All connection parameters are optional. +# ## +# ## Without the dbname parameter, the driver will default to a database +# ## with the same name as the user. This dbname is just for instantiating a +# ## connection with the server and doesn't restrict the databases we are trying +# ## to grab metrics for. +# ## +# address = "host=localhost user=postgres sslmode=disable" +# +# ## A custom name for the database that will be used as the "server" tag in the +# ## measurement output. If not specified, a default one generated from +# ## the connection address is used. +# # outputaddress = "db01" +# +# ## connection configuration. +# ## maxlifetime - specify the maximum lifetime of a connection. +# ## default is forever (0s) +# ## +# ## Note that this does not interrupt queries, the lifetime will not be enforced +# ## whilst a query is running +# # max_lifetime = "0s" +# +# ## A list of databases to explicitly ignore. If not specified, metrics for all +# ## databases are gathered. Do NOT use with the 'databases' option. +# # ignored_databases = ["postgres", "template0", "template1"] +# +# ## A list of databases to pull metrics about. If not specified, metrics for all +# ## databases are gathered. Do NOT use with the 'ignored_databases' option. +# # databases = ["app_production", "testing"] +# +# ## Whether to use prepared statements when connecting to the database. +# ## This should be set to false when connecting through a PgBouncer instance +# ## with pool_mode set to transaction. +# prepared_statements = true + + +# # Read metrics from one or many postgresql servers +# [[inputs.postgresql_extensible]] +# # specify address via a url matching: +# # postgres://[pqgotest[:password]]@host:port[/dbname]?sslmode=...&statement_timeout=... +# # or a simple string: +# # host=localhost port=5432 user=pqgotest password=... sslmode=... dbname=app_production +# # +# # All connection parameters are optional. +# # Without the dbname parameter, the driver will default to a database +# # with the same name as the user. This dbname is just for instantiating a +# # connection with the server and doesn't restrict the databases we are trying +# # to grab metrics for. +# # +# address = "host=localhost user=postgres sslmode=disable" +# +# ## A list of databases to pull metrics about. +# ## deprecated in 1.22.3; use the sqlquery option to specify database to use +# # databases = ["app_production", "testing"] +# +# ## Whether to use prepared statements when connecting to the database. +# ## This should be set to false when connecting through a PgBouncer instance +# ## with pool_mode set to transaction. +# prepared_statements = true +# +# # Define the toml config where the sql queries are stored +# # The script option can be used to specify the .sql file path. +# # If script and sqlquery options specified at same time, sqlquery will be used +# # +# # the measurement field defines measurement name for metrics produced +# # by the query. Default is "postgresql". +# # +# # the tagvalue field is used to define custom tags (separated by comas). +# # the query is expected to return columns which match the names of the +# # defined tags. The values in these columns must be of a string-type, +# # a number-type or a blob-type. +# # +# # The timestamp field is used to override the data points timestamp value. By +# # default, all rows inserted with current time. By setting a timestamp column, +# # the row will be inserted with that column's value. +# # +# # The min_version field specifies minimal database version this query +# # will run on. +# # +# # The max_version field when set specifies maximal database version +# # this query will NOT run on. +# # +# # Database version in `minversion` and `maxversion` is represented as +# # a single integer without last component, for example: +# # 9.6.2 -> 906 +# # 15.2 -> 1500 +# # +# # Structure : +# # [[inputs.postgresql_extensible.query]] +# # measurement string +# # sqlquery string +# # min_version int +# # max_version int +# # withdbname boolean +# # tagvalue string (coma separated) +# # timestamp string +# [[inputs.postgresql_extensible.query]] +# measurement="pg_stat_database" +# sqlquery="SELECT * FROM pg_stat_database where datname" +# min_version=901 +# tagvalue="" +# [[inputs.postgresql_extensible.query]] +# script="your_sql-filepath.sql" +# min_version=901 +# max_version=1300 +# tagvalue="" + + +# # Read metrics from one or many prometheus clients +# [[inputs.prometheus]] +# ## An array of urls to scrape metrics from. +# urls = ["http://localhost:9100/metrics"] +# +# ## Metric version controls the mapping from Prometheus metrics into Telegraf metrics. +# ## See "Metric Format Configuration" in plugins/inputs/prometheus/README.md for details. +# ## Valid options: 1, 2 +# # metric_version = 1 +# +# ## Url tag name (tag containing scrapped url. optional, default is "url") +# # url_tag = "url" +# +# ## Whether the timestamp of the scraped metrics will be ignored. +# ## If set to true, the gather time will be used. +# # ignore_timestamp = false +# +# ## An array of Kubernetes services to scrape metrics from. +# # kubernetes_services = ["http://my-service-dns.my-namespace:9100/metrics"] +# +# ## Kubernetes config file to create client from. +# # kube_config = "/path/to/kubernetes.config" +# +# ## Scrape Pods +# ## Enable scraping of k8s pods. Further settings as to which pods to scape +# ## are determiend by the 'method' option below. When enabled, the default is +# ## to use annotations to determine whether to scrape or not. +# # monitor_kubernetes_pods = false +# +# ## Scrape Pods Method +# ## annotations: default, looks for specific pod annotations documented below +# ## settings: only look for pods matching the settings provided, not +# ## annotations +# ## settings+annotations: looks at pods that match annotations using the user +# ## defined settings +# # monitor_kubernetes_pods_method = "annotations" +# +# ## Scrape Pods 'annotations' method options +# ## If set method is set to 'annotations' or 'settings+annotations', these +# ## annotation flags are looked for: +# ## - prometheus.io/scrape: Required to enable scraping for this pod. Can also +# ## use 'prometheus.io/scrape=false' annotation to opt-out entirely. +# ## - prometheus.io/scheme: If the metrics endpoint is secured then you will +# ## need to set this to 'https' & most likely set the tls config +# ## - prometheus.io/path: If the metrics path is not /metrics, define it with +# ## this annotation +# ## - prometheus.io/port: If port is not 9102 use this annotation +# +# ## Scrape Pods 'settings' method options +# ## When using 'settings' or 'settings+annotations', the default values for +# ## annotations can be modified using with the following options: +# # monitor_kubernetes_pods_scheme = "http" +# # monitor_kubernetes_pods_port = "9102" +# # monitor_kubernetes_pods_path = "/metrics" +# +# ## Get the list of pods to scrape with either the scope of +# ## - cluster: the kubernetes watch api (default, no need to specify) +# ## - node: the local cadvisor api; for scalability. Note that the config node_ip or the environment variable NODE_IP must be set to the host IP. +# # pod_scrape_scope = "cluster" +# +# ## Only for node scrape scope: node IP of the node that telegraf is running on. +# ## Either this config or the environment variable NODE_IP must be set. +# # node_ip = "10.180.1.1" +# +# ## Only for node scrape scope: interval in seconds for how often to get updated pod list for scraping. +# ## Default is 60 seconds. +# # pod_scrape_interval = 60 +# +# ## Restricts Kubernetes monitoring to a single namespace +# ## ex: monitor_kubernetes_pods_namespace = "default" +# # monitor_kubernetes_pods_namespace = "" +# ## The name of the label for the pod that is being scraped. +# ## Default is 'namespace' but this can conflict with metrics that have the label 'namespace' +# # pod_namespace_label_name = "namespace" +# # label selector to target pods which have the label +# # kubernetes_label_selector = "env=dev,app=nginx" +# # field selector to target pods +# # eg. To scrape pods on a specific node +# # kubernetes_field_selector = "spec.nodeName=$HOSTNAME" +# +# ## Filter which pod annotations and labels will be added to metric tags +# # +# # pod_annotation_include = ["annotation-key-1"] +# # pod_annotation_exclude = ["exclude-me"] +# # pod_label_include = ["label-key-1"] +# # pod_label_exclude = ["exclude-me"] +# +# # cache refresh interval to set the interval for re-sync of pods list. +# # Default is 60 minutes. +# # cache_refresh_interval = 60 +# +# ## Scrape Services available in Consul Catalog +# # [inputs.prometheus.consul] +# # enabled = true +# # agent = "http://localhost:8500" +# # query_interval = "5m" +# +# # [[inputs.prometheus.consul.query]] +# # name = "a service name" +# # tag = "a service tag" +# # url = 'http://{{if ne .ServiceAddress ""}}{{.ServiceAddress}}{{else}}{{.Address}}{{end}}:{{.ServicePort}}/{{with .ServiceMeta.metrics_path}}{{.}}{{else}}metrics{{end}}' +# # [inputs.prometheus.consul.query.tags] +# # host = "{{.Node}}" +# +# ## Use bearer token for authorization. ('bearer_token' takes priority) +# # bearer_token = "/path/to/bearer/token" +# ## OR +# # bearer_token_string = "abc_123" +# +# ## HTTP Basic Authentication username and password. ('bearer_token' and +# ## 'bearer_token_string' take priority) +# # username = "" +# # password = "" +# +# ## Optional custom HTTP headers +# # http_headers = {"X-Special-Header" = "Special-Value"} +# +# ## Specify timeout duration for slower prometheus clients (default is 5s) +# # timeout = "5s" +# +# ## deprecated in 1.26; use the timeout option +# # response_timeout = "5s" +# +# ## HTTP Proxy support +# # use_system_proxy = false +# # http_proxy_url = "" +# +# ## Optional TLS Config +# # tls_ca = /path/to/cafile +# # tls_cert = /path/to/certfile +# # tls_key = /path/to/keyfile +# +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Use the given name as the SNI server name on each URL +# # tls_server_name = "myhost.example.org" +# +# ## TLS renegotiation method, choose from "never", "once", "freely" +# # tls_renegotiation_method = "never" +# +# ## Enable/disable TLS +# ## Set to true/false to enforce TLS being enabled/disabled. If not set, +# ## enable TLS only if any of the other options are specified. +# # tls_enable = true +# +# ## Control pod scraping based on pod namespace annotations +# ## Pass and drop here act like tagpass and tagdrop, but instead +# ## of filtering metrics they filters pod candidates for scraping +# #[inputs.prometheus.namespace_annotation_pass] +# # annotation_key = ["value1", "value2"] +# #[inputs.prometheus.namespace_annotation_drop] +# # some_annotation_key = ["dont-scrape"] + + +# # RAS plugin exposes counter metrics for Machine Check Errors provided by RASDaemon (sqlite3 output is required). +# # This plugin ONLY supports Linux on 386, amd64, arm, and arm64 +# [[inputs.ras]] +# ## Optional path to RASDaemon sqlite3 database. +# ## Default: /var/lib/rasdaemon/ras-mc_event.db +# # db_path = "" + + +# # Read metrics from one or many redis servers +# [[inputs.redis]] +# ## specify servers via a url matching: +# ## [protocol://][username:password]@address[:port] +# ## e.g. +# ## tcp://localhost:6379 +# ## tcp://username:password@192.168.99.100 +# ## unix:///var/run/redis.sock +# ## +# ## If no servers are specified, then localhost is used as the host. +# ## If no port is specified, 6379 is used +# servers = ["tcp://localhost:6379"] +# +# ## Optional. Specify redis commands to retrieve values +# # [[inputs.redis.commands]] +# # # The command to run where each argument is a separate element +# # command = ["get", "sample-key"] +# # # The field to store the result in +# # field = "sample-key-value" +# # # The type of the result +# # # Can be "string", "integer", or "float" +# # type = "string" +# +# ## Specify username and password for ACL auth (Redis 6.0+). You can add this +# ## to the server URI above or specify it here. The values here take +# ## precidence. +# # username = "" +# # password = "" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = true + + +# # Riemann protobuff listener +# [[inputs.riemann_listener]] +# ## URL to listen on +# ## Default is "tcp://:5555" +# # service_address = "tcp://:8094" +# # service_address = "tcp://127.0.0.1:http" +# # service_address = "tcp4://:8094" +# # service_address = "tcp6://:8094" +# # service_address = "tcp6://[2001:db8::1]:8094" +# +# ## Maximum number of concurrent connections. +# ## 0 (default) is unlimited. +# # max_connections = 1024 +# ## Read timeout. +# ## 0 (default) is unlimited. +# # read_timeout = "30s" +# ## Optional TLS configuration. +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Enables client authentication if set. +# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# ## Maximum socket buffer size (in bytes when no unit specified). +# # read_buffer_size = "64KiB" +# ## Period between keep alive probes. +# ## 0 disables keep alive probes. +# ## Defaults to the OS configuration. +# # keep_alive_period = "5m" + + +# # Plugin for retrieving data from Siemens PLCs via the S7 protocol (RFC1006) +# [[inputs.s7comm]] +# ## Parameters to contact the PLC (mandatory) +# ## The server is in the [:port] format where the port defaults to 102 +# ## if not explicitly specified. +# server = "127.0.0.1:102" +# rack = 0 +# slot = 0 +# +# ## Timeout for requests +# # timeout = "10s" +# +# ## Log detailed connection messages for debugging +# ## This option only has an effect when Telegraf runs in debug mode +# # debug_connection = false +# +# ## Metric definition(s) +# [[inputs.s7comm.metric]] +# ## Name of the measurement +# # name = "s7comm" +# +# ## Field definitions +# ## name - field name +# ## address - indirect address ".
[.extra]" +# ## area - e.g. be "DB1" for data-block one +# ## type - supported types are (uppercase) +# ## X -- bit, requires the bit-number as 'extra' +# ## parameter +# ## B -- byte (8 bit) +# ## C -- character (8 bit) +# ## W -- word (16 bit) +# ## DW -- double word (32 bit) +# ## I -- integer (16 bit) +# ## DI -- double integer (32 bit) +# ## R -- IEEE 754 real floating point number (32 bit) +# ## DT -- date-time, always converted to unix timestamp +# ## with nano-second precision +# ## S -- string, requires the maximum length of the +# ## string as 'extra' parameter +# ## address - start address to read if not specified otherwise +# ## in the type field +# ## extra - extra parameter e.g. for the bit and string type +# fields = [ +# { name="rpm", address="DB1.R4" }, +# { name="status_ok", address="DB1.X2.1" }, +# { name="last_error", address="DB2.S1.32" }, +# { name="last_error_time", address="DB2.DT2" } +# ] +# +# ## Tags assigned to the metric +# # [inputs.s7comm.metric.tags] +# # device = "compressor" +# # location = "main building" + + +# # SFlow V5 Protocol Listener +# [[inputs.sflow]] +# ## Address to listen for sFlow packets. +# ## example: service_address = "udp://:6343" +# ## service_address = "udp4://:6343" +# ## service_address = "udp6://:6343" +# service_address = "udp://:6343" +# +# ## Set the size of the operating system's receive buffer. +# ## example: read_buffer_size = "64KiB" +# # read_buffer_size = "" + + +# # Receive SNMP traps +# [[inputs.snmp_trap]] +# ## Transport, local address, and port to listen on. Transport must +# ## be "udp://". Omit local address to listen on all interfaces. +# ## example: "udp://127.0.0.1:1234" +# ## +# ## Special permissions may be required to listen on a port less than +# ## 1024. See README.md for details +# ## +# # service_address = "udp://:162" +# ## +# ## Path to mib files +# ## Used by the gosmi translator. +# ## To add paths when translating with netsnmp, use the MIBDIRS environment variable +# # path = ["/usr/share/snmp/mibs"] +# ## +# ## Deprecated in 1.20.0; no longer running snmptranslate +# ## Timeout running snmptranslate command +# # timeout = "5s" +# ## Snmp version +# # version = "2c" +# ## SNMPv3 authentication and encryption options. +# ## +# ## Security Name. +# # sec_name = "myuser" +# ## Authentication protocol; one of "MD5", "SHA" or "". +# # auth_protocol = "MD5" +# ## Authentication password. +# # auth_password = "pass" +# ## Security Level; one of "noAuthNoPriv", "authNoPriv", or "authPriv". +# # sec_level = "authNoPriv" +# ## Privacy protocol used for encrypted messages; one of "DES", "AES", "AES192", "AES192C", "AES256", "AES256C" or "". +# # priv_protocol = "" +# ## Privacy password used for encrypted messages. +# # priv_password = "" + + +# # Generic socket listener capable of handling multiple socket types. +# [[inputs.socket_listener]] +# ## URL to listen on +# # service_address = "tcp://:8094" +# # service_address = "tcp://127.0.0.1:http" +# # service_address = "tcp4://:8094" +# # service_address = "tcp6://:8094" +# # service_address = "tcp6://[2001:db8::1]:8094" +# # service_address = "udp://:8094" +# # service_address = "udp4://:8094" +# # service_address = "udp6://:8094" +# # service_address = "unix:///tmp/telegraf.sock" +# # service_address = "unixgram:///tmp/telegraf.sock" +# +# ## Change the file mode bits on unix sockets. These permissions may not be +# ## respected by some platforms, to safely restrict write permissions it is best +# ## to place the socket into a directory that has previously been created +# ## with the desired permissions. +# ## ex: socket_mode = "777" +# # socket_mode = "" +# +# ## Maximum number of concurrent connections. +# ## Only applies to stream sockets (e.g. TCP). +# ## 0 (default) is unlimited. +# # max_connections = 1024 +# +# ## Read timeout. +# ## Only applies to stream sockets (e.g. TCP). +# ## 0 (default) is unlimited. +# # read_timeout = "30s" +# +# ## Optional TLS configuration. +# ## Only applies to stream sockets (e.g. TCP). +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Enables client authentication if set. +# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# +# ## Maximum socket buffer size (in bytes when no unit specified). +# ## For stream sockets, once the buffer fills up, the sender will start backing up. +# ## For datagram sockets, once the buffer fills up, metrics will start dropping. +# ## Defaults to the OS default. +# # read_buffer_size = "64KiB" +# +# ## Period between keep alive probes. +# ## Only applies to TCP sockets. +# ## 0 disables keep alive probes. +# ## Defaults to the OS configuration. +# # keep_alive_period = "5m" +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# # data_format = "influx" +# +# ## Content encoding for message payloads, can be set to "gzip" to or +# ## "identity" to apply no encoding. +# # content_encoding = "identity" +# +# ## Maximum size of decoded packet. +# ## Acceptable units are B, KiB, KB, MiB, MB... +# ## Without quotes and units, interpreted as size in bytes. +# # max_decompression_size = "500MB" +# +# ## Message splitting strategy and corresponding settings for stream sockets +# ## (tcp, tcp4, tcp6, unix or unixpacket). The setting is ignored for packet +# ## listeners such as udp. +# ## Available strategies are: +# ## newline -- split at newlines (default) +# ## null -- split at null bytes +# ## delimiter -- split at delimiter byte-sequence in hex-format +# ## given in `splitting_delimiter` +# ## fixed length -- split after number of bytes given in `splitting_length` +# ## variable length -- split depending on length information received in the +# ## data. The length field information is specified in +# ## `splitting_length_field`. +# # splitting_strategy = "newline" +# +# ## Delimiter used to split received data to messages consumed by the parser. +# ## The delimiter is a hex byte-sequence marking the end of a message +# ## e.g. "0x0D0A", "x0d0a" or "0d0a" marks a Windows line-break (CR LF). +# ## The value is case-insensitive and can be specifed with "0x" or "x" prefix +# ## or withou. +# ## Note: This setting is only used for splitting_strategy = "delimiter". +# # splitting_delimiter = "" +# +# ## Fixed length of a message in bytes. +# ## Note: This setting is only used for splitting_strategy = "fixed length". +# # splitting_length = 0 +# +# ## Specification of the length field contained in the data to split messages +# ## with variable length. The specification contains the following fields: +# ## offset -- start of length field in bytes from begin of data +# ## bytes -- length of length field in bytes +# ## endianness -- endianness of the value, either "be" for big endian or +# ## "le" for little endian +# ## header_length -- total length of header to be skipped when passing +# ## data on to the parser. If zero (default), the header +# ## is passed on to the parser together with the message. +# ## Note: This setting is only used for splitting_strategy = "variable length". +# # splitting_length_field = {offset = 0, bytes = 0, endianness = "be", header_length = 0} + + +# # Read stats from one or more Solr servers or cores +# [[inputs.solr]] +# ## specify a list of one or more Solr servers +# servers = ["http://localhost:8983"] +# +# ## specify a list of one or more Solr cores (default - all) +# # cores = ["*"] +# +# ## Optional HTTP Basic Auth Credentials +# # username = "username" +# # password = "pa$$word" +# +# ## Timeout for HTTP requests +# # timeout = "5s" + + +# # Read metrics from SQL queries +# [[inputs.sql]] +# ## Database Driver +# ## See https://github.com/influxdata/telegraf/blob/master/docs/SQL_DRIVERS_INPUT.md for +# ## a list of supported drivers. +# driver = "mysql" +# +# ## Data source name for connecting +# ## The syntax and supported options depends on selected driver. +# dsn = "username:password@mysqlserver:3307/dbname?param=value" +# +# ## Timeout for any operation +# ## Note that the timeout for queries is per query not per gather. +# # timeout = "5s" +# +# ## Connection time limits +# ## By default the maximum idle time and maximum lifetime of a connection is unlimited, i.e. the connections +# ## will not be closed automatically. If you specify a positive time, the connections will be closed after +# ## idleing or existing for at least that amount of time, respectively. +# # connection_max_idle_time = "0s" +# # connection_max_life_time = "0s" +# +# ## Connection count limits +# ## By default the number of open connections is not limited and the number of maximum idle connections +# ## will be inferred from the number of queries specified. If you specify a positive number for any of the +# ## two options, connections will be closed when reaching the specified limit. The number of idle connections +# ## will be clipped to the maximum number of connections limit if any. +# # connection_max_open = 0 +# # connection_max_idle = auto +# +# ## Specifies plugin behavior regarding disconnected servers +# ## Available choices : +# ## - error: telegraf will return an error on startup if one the servers is unreachable +# ## - ignore: telegraf will ignore unreachable servers on both startup and gather +# # disconnected_servers_behavior = "error" +# +# [[inputs.sql.query]] +# ## Query to perform on the server +# query="SELECT user,state,latency,score FROM Scoreboard WHERE application > 0" +# ## Alternatively to specifying the query directly you can select a file here containing the SQL query. +# ## Only one of 'query' and 'query_script' can be specified! +# # query_script = "/path/to/sql/script.sql" +# +# ## Name of the measurement +# ## In case both measurement and 'measurement_col' are given, the latter takes precedence. +# # measurement = "sql" +# +# ## Column name containing the name of the measurement +# ## If given, this will take precedence over the 'measurement' setting. In case a query result +# ## does not contain the specified column, we fall-back to the 'measurement' setting. +# # measurement_column = "" +# +# ## Column name containing the time of the measurement +# ## If ommited, the time of the query will be used. +# # time_column = "" +# +# ## Format of the time contained in 'time_col' +# ## The time must be 'unix', 'unix_ms', 'unix_us', 'unix_ns', or a golang time format. +# ## See https://golang.org/pkg/time/#Time.Format for details. +# # time_format = "unix" +# +# ## Column names containing tags +# ## An empty include list will reject all columns and an empty exclude list will not exclude any column. +# ## I.e. by default no columns will be returned as tag and the tags are empty. +# # tag_columns_include = [] +# # tag_columns_exclude = [] +# +# ## Column names containing fields (explicit types) +# ## Convert the given columns to the corresponding type. Explicit type conversions take precedence over +# ## the automatic (driver-based) conversion below. +# ## NOTE: Columns should not be specified for multiple types or the resulting type is undefined. +# # field_columns_float = [] +# # field_columns_int = [] +# # field_columns_uint = [] +# # field_columns_bool = [] +# # field_columns_string = [] +# +# ## Column names containing fields (automatic types) +# ## An empty include list is equivalent to '[*]' and all returned columns will be accepted. An empty +# ## exclude list will not exclude any column. I.e. by default all columns will be returned as fields. +# ## NOTE: We rely on the database driver to perform automatic datatype conversion. +# # field_columns_include = [] +# # field_columns_exclude = [] + + +# # Read metrics from Microsoft SQL Server +# [[inputs.sqlserver]] +# ## Specify instances to monitor with a list of connection strings. +# ## All connection parameters are optional. +# ## By default, the host is localhost, listening on default port, TCP 1433. +# ## for Windows, the user is the currently running AD user (SSO). +# ## See https://github.com/microsoft/go-mssqldb for detailed connection +# ## parameters, in particular, tls connections can be created like so: +# ## "encrypt=true;certificate=;hostNameInCertificate=" +# servers = [ +# "Server=192.168.1.10;Port=1433;User Id=;Password=;app name=telegraf;log=1;", +# ] +# +# ## Timeout for query execution operation +# ## Note that the timeout for queries is per query not per gather. +# ## 0 value means no timeout +# # query_timeout = "0s" +# +# ## Authentication method +# ## valid methods: "connection_string", "AAD" +# # auth_method = "connection_string" +# +# ## "database_type" enables a specific set of queries depending on the database type. If specified, it replaces azuredb = true/false and query_version = 2 +# ## In the config file, the sql server plugin section should be repeated each with a set of servers for a specific database_type. +# ## Possible values for database_type are - "SQLServer" or "AzureSQLDB" or "AzureSQLManagedInstance" or "AzureSQLPool" +# database_type = "SQLServer" +# +# ## A list of queries to include. If not specified, all the below listed queries are used. +# include_query = [] +# +# ## A list of queries to explicitly ignore. +# exclude_query = ["SQLServerAvailabilityReplicaStates", "SQLServerDatabaseReplicaStates"] +# +# ## Queries enabled by default for database_type = "SQLServer" are - +# ## SQLServerPerformanceCounters, SQLServerWaitStatsCategorized, SQLServerDatabaseIO, SQLServerProperties, SQLServerMemoryClerks, +# ## SQLServerSchedulers, SQLServerRequests, SQLServerVolumeSpace, SQLServerCpu, SQLServerAvailabilityReplicaStates, SQLServerDatabaseReplicaStates, +# ## SQLServerRecentBackups +# +# ## Queries enabled by default for database_type = "AzureSQLDB" are - +# ## AzureSQLDBResourceStats, AzureSQLDBResourceGovernance, AzureSQLDBWaitStats, AzureSQLDBDatabaseIO, AzureSQLDBServerProperties, +# ## AzureSQLDBOsWaitstats, AzureSQLDBMemoryClerks, AzureSQLDBPerformanceCounters, AzureSQLDBRequests, AzureSQLDBSchedulers +# +# ## Queries enabled by default for database_type = "AzureSQLManagedInstance" are - +# ## AzureSQLMIResourceStats, AzureSQLMIResourceGovernance, AzureSQLMIDatabaseIO, AzureSQLMIServerProperties, AzureSQLMIOsWaitstats, +# ## AzureSQLMIMemoryClerks, AzureSQLMIPerformanceCounters, AzureSQLMIRequests, AzureSQLMISchedulers +# +# ## Queries enabled by default for database_type = "AzureSQLPool" are - +# ## AzureSQLPoolResourceStats, AzureSQLPoolResourceGovernance, AzureSQLPoolDatabaseIO, AzureSQLPoolWaitStats, +# ## AzureSQLPoolMemoryClerks, AzureSQLPoolPerformanceCounters, AzureSQLPoolSchedulers +# +# ## Queries enabled by default for database_type = "AzureArcSQLManagedInstance" are - +# ## AzureSQLMIDatabaseIO, AzureSQLMIServerProperties, AzureSQLMIOsWaitstats, +# ## AzureSQLMIMemoryClerks, AzureSQLMIPerformanceCounters, AzureSQLMIRequests, AzureSQLMISchedulers +# +# ## Following are old config settings +# ## You may use them only if you are using the earlier flavor of queries, however it is recommended to use +# ## the new mechanism of identifying the database_type there by use it's corresponding queries +# +# ## Optional parameter, setting this to 2 will use a new version +# ## of the collection queries that break compatibility with the original +# ## dashboards. +# ## Version 2 - is compatible from SQL Server 2012 and later versions and also for SQL Azure DB +# # query_version = 2 +# +# ## If you are using AzureDB, setting this to true will gather resource utilization metrics +# # azuredb = false +# +# ## Toggling this to true will emit an additional metric called "sqlserver_telegraf_health". +# ## This metric tracks the count of attempted queries and successful queries for each SQL instance specified in "servers". +# ## The purpose of this metric is to assist with identifying and diagnosing any connectivity or query issues. +# ## This setting/metric is optional and is disabled by default. +# # health_metric = false +# +# ## Possible queries accross different versions of the collectors +# ## Queries enabled by default for specific Database Type +# +# ## database_type = AzureSQLDB by default collects the following queries +# ## - AzureSQLDBWaitStats +# ## - AzureSQLDBResourceStats +# ## - AzureSQLDBResourceGovernance +# ## - AzureSQLDBDatabaseIO +# ## - AzureSQLDBServerProperties +# ## - AzureSQLDBOsWaitstats +# ## - AzureSQLDBMemoryClerks +# ## - AzureSQLDBPerformanceCounters +# ## - AzureSQLDBRequests +# ## - AzureSQLDBSchedulers +# +# ## database_type = AzureSQLManagedInstance by default collects the following queries +# ## - AzureSQLMIResourceStats +# ## - AzureSQLMIResourceGovernance +# ## - AzureSQLMIDatabaseIO +# ## - AzureSQLMIServerProperties +# ## - AzureSQLMIOsWaitstats +# ## - AzureSQLMIMemoryClerks +# ## - AzureSQLMIPerformanceCounters +# ## - AzureSQLMIRequests +# ## - AzureSQLMISchedulers +# +# ## database_type = AzureSQLPool by default collects the following queries +# ## - AzureSQLPoolResourceStats +# ## - AzureSQLPoolResourceGovernance +# ## - AzureSQLPoolDatabaseIO +# ## - AzureSQLPoolOsWaitStats, +# ## - AzureSQLPoolMemoryClerks +# ## - AzureSQLPoolPerformanceCounters +# ## - AzureSQLPoolSchedulers +# +# ## database_type = SQLServer by default collects the following queries +# ## - SQLServerPerformanceCounters +# ## - SQLServerWaitStatsCategorized +# ## - SQLServerDatabaseIO +# ## - SQLServerProperties +# ## - SQLServerMemoryClerks +# ## - SQLServerSchedulers +# ## - SQLServerRequests +# ## - SQLServerVolumeSpace +# ## - SQLServerCpu +# ## - SQLServerRecentBackups +# ## and following as optional (if mentioned in the include_query list) +# ## - SQLServerAvailabilityReplicaStates +# ## - SQLServerDatabaseReplicaStates +# +# ## Version 2 by default collects the following queries +# ## Version 2 is being deprecated, please consider using database_type. +# ## - PerformanceCounters +# ## - WaitStatsCategorized +# ## - DatabaseIO +# ## - ServerProperties +# ## - MemoryClerk +# ## - Schedulers +# ## - SqlRequests +# ## - VolumeSpace +# ## - Cpu +# +# ## Version 1 by default collects the following queries +# ## Version 1 is deprecated, please consider using database_type. +# ## - PerformanceCounters +# ## - WaitStatsCategorized +# ## - CPUHistory +# ## - DatabaseIO +# ## - DatabaseSize +# ## - DatabaseStats +# ## - DatabaseProperties +# ## - MemoryClerk +# ## - VolumeSpace +# ## - PerformanceMetrics + + +# # Statsd Server +# [[inputs.statsd]] +# ## Protocol, must be "tcp", "udp4", "udp6" or "udp" (default=udp) +# protocol = "udp" +# +# ## MaxTCPConnection - applicable when protocol is set to tcp (default=250) +# max_tcp_connections = 250 +# +# ## Enable TCP keep alive probes (default=false) +# tcp_keep_alive = false +# +# ## Specifies the keep-alive period for an active network connection. +# ## Only applies to TCP sockets and will be ignored if tcp_keep_alive is false. +# ## Defaults to the OS configuration. +# # tcp_keep_alive_period = "2h" +# +# ## Address and port to host UDP listener on +# service_address = ":8125" +# +# ## The following configuration options control when telegraf clears it's cache +# ## of previous values. If set to false, then telegraf will only clear it's +# ## cache when the daemon is restarted. +# ## Reset gauges every interval (default=true) +# delete_gauges = true +# ## Reset counters every interval (default=true) +# delete_counters = true +# ## Reset sets every interval (default=true) +# delete_sets = true +# ## Reset timings & histograms every interval (default=true) +# delete_timings = true +# +# ## Enable aggregation temporality adds temporality=delta or temporality=commulative tag, and +# ## start_time field, which adds the start time of the metric accumulation. +# ## You should use this when using OpenTelemetry output. +# # enable_aggregation_temporality = false +# +# ## Percentiles to calculate for timing & histogram stats. +# percentiles = [50.0, 90.0, 99.0, 99.9, 99.95, 100.0] +# +# ## separator to use between elements of a statsd metric +# metric_separator = "_" +# +# ## Parses tags in the datadog statsd format +# ## http://docs.datadoghq.com/guides/dogstatsd/ +# ## deprecated in 1.10; use datadog_extensions option instead +# parse_data_dog_tags = false +# +# ## Parses extensions to statsd in the datadog statsd format +# ## currently supports metrics and datadog tags. +# ## http://docs.datadoghq.com/guides/dogstatsd/ +# datadog_extensions = false +# +# ## Parses distributions metric as specified in the datadog statsd format +# ## https://docs.datadoghq.com/developers/metrics/types/?tab=distribution#definition +# datadog_distributions = false +# +# ## Statsd data translation templates, more info can be read here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/TEMPLATE_PATTERN.md +# # templates = [ +# # "cpu.* measurement*" +# # ] +# +# ## Number of UDP messages allowed to queue up, once filled, +# ## the statsd server will start dropping packets +# allowed_pending_messages = 10000 +# +# ## Number of worker threads used to parse the incoming messages. +# # number_workers_threads = 5 +# +# ## Number of timing/histogram values to track per-measurement in the +# ## calculation of percentiles. Raising this limit increases the accuracy +# ## of percentiles but also increases the memory usage and cpu time. +# percentile_limit = 1000 +# +# ## Maximum socket buffer size in bytes, once the buffer fills up, metrics +# ## will start dropping. Defaults to the OS default. +# # read_buffer_size = 65535 +# +# ## Max duration (TTL) for each metric to stay cached/reported without being updated. +# # max_ttl = "10h" +# +# ## Sanitize name method +# ## By default, telegraf will pass names directly as they are received. +# ## However, upstream statsd now does sanitization of names which can be +# ## enabled by using the "upstream" method option. This option will a) replace +# ## white space with '_', replace '/' with '-', and remove charachters not +# ## matching 'a-zA-Z_\-0-9\.;='. +# #sanitize_name_method = "" + + +# # Suricata stats and alerts plugin +# [[inputs.suricata]] +# ## Source +# ## Data sink for Suricata stats log. This is expected to be a filename of a +# ## unix socket to be created for listening. +# # source = "/var/run/suricata-stats.sock" +# +# ## Delimiter +# ## Used for flattening field keys, e.g. subitem "alert" of "detect" becomes +# ## "detect_alert" when delimiter is "_". +# # delimiter = "_" +# +# ## Metric version +# ## Version 1 only collects stats and optionally will look for alerts if +# ## the configuration setting alerts is set to true. +# ## Version 2 parses any event type message by default and produced metrics +# ## under a single metric name using a tag to differentiate between event +# ## types. The timestamp for the message is applied to the generated metric. +# ## Additional tags and fields are included as well. +# # version = "1" +# +# ## Alerts +# ## In metric version 1, only status is captured by default, alerts must be +# ## turned on with this configuration option. This option does not apply for +# ## metric version 2. +# # alerts = false + + +# [[inputs.syslog]] +# ## Protocol, address and port to host the syslog receiver. +# ## If no host is specified, then localhost is used. +# ## If no port is specified, 6514 is used (RFC5425#section-4.1). +# ## ex: server = "tcp://localhost:6514" +# ## server = "udp://:6514" +# ## server = "unix:///var/run/telegraf-syslog.sock" +# ## When using tcp, consider using 'tcp4' or 'tcp6' to force the usage of IPv4 +# ## or IPV6 respectively. There are cases, where when not specified, a system +# ## may force an IPv4 mapped IPv6 address. +# server = "tcp://:6514" +# +# ## TLS Config +# # tls_allowed_cacerts = ["/etc/telegraf/ca.pem"] +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## Period between keep alive probes. +# ## 0 disables keep alive probes. +# ## Defaults to the OS configuration. +# ## Only applies to stream sockets (e.g. TCP). +# # keep_alive_period = "5m" +# +# ## Maximum number of concurrent connections (default = 0). +# ## 0 means unlimited. +# ## Only applies to stream sockets (e.g. TCP). +# # max_connections = 1024 +# +# ## Read timeout is the maximum time allowed for reading a single message (default = 5s). +# ## 0 means unlimited. +# # read_timeout = "5s" +# +# ## The framing technique with which it is expected that messages are transported (default = "octet-counting"). +# ## Whether the messages come using the octect-counting (RFC5425#section-4.3.1, RFC6587#section-3.4.1), +# ## or the non-transparent framing technique (RFC6587#section-3.4.2). +# ## Must be one of "octect-counting", "non-transparent". +# # framing = "octet-counting" +# +# ## The trailer to be expected in case of non-transparent framing (default = "LF"). +# ## Must be one of "LF", or "NUL". +# # trailer = "LF" +# +# ## Whether to parse in best effort mode or not (default = false). +# ## By default best effort parsing is off. +# # best_effort = false +# +# ## The RFC standard to use for message parsing +# ## By default RFC5424 is used. RFC3164 only supports UDP transport (no streaming support) +# ## Must be one of "RFC5424", or "RFC3164". +# # syslog_standard = "RFC5424" +# +# ## Character to prepend to SD-PARAMs (default = "_"). +# ## A syslog message can contain multiple parameters and multiple identifiers within structured data section. +# ## Eg., [id1 name1="val1" name2="val2"][id2 name1="val1" nameA="valA"] +# ## For each combination a field is created. +# ## Its name is created concatenating identifier, sdparam_separator, and parameter name. +# # sdparam_separator = "_" + + +# # Parse the new lines appended to a file +# [[inputs.tail]] +# ## File names or a pattern to tail. +# ## These accept standard unix glob matching rules, but with the addition of +# ## ** as a "super asterisk". ie: +# ## "/var/log/**.log" -> recursively find all .log files in /var/log +# ## "/var/log/*/*.log" -> find all .log files with a parent dir in /var/log +# ## "/var/log/apache.log" -> just tail the apache log file +# ## "/var/log/log[!1-2]* -> tail files without 1-2 +# ## "/var/log/log[^1-2]* -> identical behavior as above +# ## See https://github.com/gobwas/glob for more examples +# ## +# files = ["/var/mymetrics.out"] +# +# ## Read file from beginning. +# # from_beginning = false +# +# ## Whether file is a named pipe +# # pipe = false +# +# ## Method used to watch for file updates. Can be either "inotify" or "poll". +# # watch_method = "inotify" +# +# ## Maximum lines of the file to process that have not yet be written by the +# ## output. For best throughput set based on the number of metrics on each +# ## line and the size of the output's metric_batch_size. +# # max_undelivered_lines = 1000 +# +# ## Character encoding to use when interpreting the file contents. Invalid +# ## characters are replaced using the unicode replacement character. When set +# ## to the empty string the data is not decoded to text. +# ## ex: character_encoding = "utf-8" +# ## character_encoding = "utf-16le" +# ## character_encoding = "utf-16be" +# ## character_encoding = "" +# # character_encoding = "" +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" +# +# ## Set the tag that will contain the path of the tailed file. If you don't want this tag, set it to an empty string. +# # path_tag = "path" +# +# ## Filters to apply to files before generating metrics +# ## "ansi_color" removes ANSI colors +# # filters = [] +# +# ## multiline parser/codec +# ## https://www.elastic.co/guide/en/logstash/2.4/plugins-filters-multiline.html +# #[inputs.tail.multiline] +# ## The pattern should be a regexp which matches what you believe to be an indicator that the field is part of an event consisting of multiple lines of log data. +# #pattern = "^\s" +# +# ## The field's value must be previous or next and indicates the relation to the +# ## multi-line event. +# #match_which_line = "previous" +# +# ## The invert_match can be true or false (defaults to false). +# ## If true, a message not matching the pattern will constitute a match of the multiline filter and the what will be applied. (vice-versa is also true) +# #invert_match = false +# +# ## The handling method for quoted text (defaults to 'ignore'). +# ## The following methods are available: +# ## ignore -- do not consider quotation (default) +# ## single-quotes -- consider text quoted by single quotes (') +# ## double-quotes -- consider text quoted by double quotes (") +# ## backticks -- consider text quoted by backticks (`) +# ## When handling quotes, escaped quotes (e.g. \") are handled correctly. +# #quotation = "ignore" +# +# ## The preserve_newline option can be true or false (defaults to false). +# ## If true, the newline character is preserved for multiline elements, +# ## this is useful to preserve message-structure e.g. for logging outputs. +# #preserve_newline = false +# +# #After the specified timeout, this plugin sends the multiline event even if no new pattern is found to start a new event. The default is 5s. +# #timeout = 5s + + +# ## DEPRECATED: The "tcp_listener" plugin is deprecated in version 1.3.0 and will be removed in 1.30.0, use 'inputs.socket_listener' instead. +# # Generic TCP listener +# [[inputs.tcp_listener]] +# # socket_listener plugin +# # see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/socket_listener + + +# ## DEPRECATED: The "udp_listener" plugin is deprecated in version 1.3.0 and will be removed in 1.30.0, use 'inputs.socket_listener' instead. +# # Generic UDP listener +# [[inputs.udp_listener]] +# # see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/socket_listener + + +# # Read metrics from one or many vCenters +# [[inputs.vsphere]] +# ## List of vCenter URLs to be monitored. These three lines must be uncommented +# ## and edited for the plugin to work. +# vcenters = [ "https://vcenter.local/sdk" ] +# username = "user@corp.local" +# password = "secret" +# +# ## VMs +# ## Typical VM metrics (if omitted or empty, all metrics are collected) +# # vm_include = [ "/*/vm/**"] # Inventory path to VMs to collect (by default all are collected) +# # vm_exclude = [] # Inventory paths to exclude +# vm_metric_include = [ +# "cpu.demand.average", +# "cpu.idle.summation", +# "cpu.latency.average", +# "cpu.readiness.average", +# "cpu.ready.summation", +# "cpu.run.summation", +# "cpu.usagemhz.average", +# "cpu.used.summation", +# "cpu.wait.summation", +# "mem.active.average", +# "mem.granted.average", +# "mem.latency.average", +# "mem.swapin.average", +# "mem.swapinRate.average", +# "mem.swapout.average", +# "mem.swapoutRate.average", +# "mem.usage.average", +# "mem.vmmemctl.average", +# "net.bytesRx.average", +# "net.bytesTx.average", +# "net.droppedRx.summation", +# "net.droppedTx.summation", +# "net.usage.average", +# "power.power.average", +# "virtualDisk.numberReadAveraged.average", +# "virtualDisk.numberWriteAveraged.average", +# "virtualDisk.read.average", +# "virtualDisk.readOIO.latest", +# "virtualDisk.throughput.usage.average", +# "virtualDisk.totalReadLatency.average", +# "virtualDisk.totalWriteLatency.average", +# "virtualDisk.write.average", +# "virtualDisk.writeOIO.latest", +# "sys.uptime.latest", +# ] +# # vm_metric_exclude = [] ## Nothing is excluded by default +# # vm_instances = true ## true by default +# +# ## Hosts +# ## Typical host metrics (if omitted or empty, all metrics are collected) +# # host_include = [ "/*/host/**"] # Inventory path to hosts to collect (by default all are collected) +# # host_exclude [] # Inventory paths to exclude +# host_metric_include = [ +# "cpu.coreUtilization.average", +# "cpu.costop.summation", +# "cpu.demand.average", +# "cpu.idle.summation", +# "cpu.latency.average", +# "cpu.readiness.average", +# "cpu.ready.summation", +# "cpu.swapwait.summation", +# "cpu.usage.average", +# "cpu.usagemhz.average", +# "cpu.used.summation", +# "cpu.utilization.average", +# "cpu.wait.summation", +# "disk.deviceReadLatency.average", +# "disk.deviceWriteLatency.average", +# "disk.kernelReadLatency.average", +# "disk.kernelWriteLatency.average", +# "disk.numberReadAveraged.average", +# "disk.numberWriteAveraged.average", +# "disk.read.average", +# "disk.totalReadLatency.average", +# "disk.totalWriteLatency.average", +# "disk.write.average", +# "mem.active.average", +# "mem.latency.average", +# "mem.state.latest", +# "mem.swapin.average", +# "mem.swapinRate.average", +# "mem.swapout.average", +# "mem.swapoutRate.average", +# "mem.totalCapacity.average", +# "mem.usage.average", +# "mem.vmmemctl.average", +# "net.bytesRx.average", +# "net.bytesTx.average", +# "net.droppedRx.summation", +# "net.droppedTx.summation", +# "net.errorsRx.summation", +# "net.errorsTx.summation", +# "net.usage.average", +# "power.power.average", +# "storageAdapter.numberReadAveraged.average", +# "storageAdapter.numberWriteAveraged.average", +# "storageAdapter.read.average", +# "storageAdapter.write.average", +# "sys.uptime.latest", +# ] +# ## Collect IP addresses? Valid values are "ipv4" and "ipv6" +# # ip_addresses = ["ipv6", "ipv4" ] +# +# # host_metric_exclude = [] ## Nothing excluded by default +# # host_instances = true ## true by default +# +# +# ## Clusters +# # cluster_include = [ "/*/host/**"] # Inventory path to clusters to collect (by default all are collected) +# # cluster_exclude = [] # Inventory paths to exclude +# # cluster_metric_include = [] ## if omitted or empty, all metrics are collected +# # cluster_metric_exclude = [] ## Nothing excluded by default +# # cluster_instances = false ## false by default +# +# ## Resource Pools +# # resource_pool_include = [ "/*/host/**"] # Inventory path to resource pools to collect (by default all are collected) +# # resource_pool_exclude = [] # Inventory paths to exclude +# # resource_pool_metric_include = [] ## if omitted or empty, all metrics are collected +# # resource_pool_metric_exclude = [] ## Nothing excluded by default +# # resource_pool_instances = false ## false by default +# +# ## Datastores +# # datastore_include = [ "/*/datastore/**"] # Inventory path to datastores to collect (by default all are collected) +# # datastore_exclude = [] # Inventory paths to exclude +# # datastore_metric_include = [] ## if omitted or empty, all metrics are collected +# # datastore_metric_exclude = [] ## Nothing excluded by default +# # datastore_instances = false ## false by default +# +# ## Datacenters +# # datacenter_include = [ "/*/host/**"] # Inventory path to clusters to collect (by default all are collected) +# # datacenter_exclude = [] # Inventory paths to exclude +# datacenter_metric_include = [] ## if omitted or empty, all metrics are collected +# datacenter_metric_exclude = [ "*" ] ## Datacenters are not collected by default. +# # datacenter_instances = false ## false by default +# +# ## VSAN +# # vsan_metric_include = [] ## if omitted or empty, all metrics are collected +# # vsan_metric_exclude = [ "*" ] ## vSAN are not collected by default. +# ## Whether to skip verifying vSAN metrics against the ones from GetSupportedEntityTypes API. +# # vsan_metric_skip_verify = false ## false by default. +# +# ## Interval for sampling vSAN performance metrics, can be reduced down to +# ## 30 seconds for vSAN 8 U1. +# # vsan_interval = "5m" +# +# ## Plugin Settings +# ## separator character to use for measurement and field names (default: "_") +# # separator = "_" +# +# ## number of objects to retrieve per query for realtime resources (vms and hosts) +# ## set to 64 for vCenter 5.5 and 6.0 (default: 256) +# # max_query_objects = 256 +# +# ## number of metrics to retrieve per query for non-realtime resources (clusters and datastores) +# ## set to 64 for vCenter 5.5 and 6.0 (default: 256) +# # max_query_metrics = 256 +# +# ## number of go routines to use for collection and discovery of objects and metrics +# # collect_concurrency = 1 +# # discover_concurrency = 1 +# +# ## the interval before (re)discovering objects subject to metrics collection (default: 300s) +# # object_discovery_interval = "300s" +# +# ## timeout applies to any of the api request made to vcenter +# # timeout = "60s" +# +# ## When set to true, all samples are sent as integers. This makes the output +# ## data types backwards compatible with Telegraf 1.9 or lower. Normally all +# ## samples from vCenter, with the exception of percentages, are integer +# ## values, but under some conditions, some averaging takes place internally in +# ## the plugin. Setting this flag to "false" will send values as floats to +# ## preserve the full precision when averaging takes place. +# # use_int_samples = true +# +# ## Custom attributes from vCenter can be very useful for queries in order to slice the +# ## metrics along different dimension and for forming ad-hoc relationships. They are disabled +# ## by default, since they can add a considerable amount of tags to the resulting metrics. To +# ## enable, simply set custom_attribute_exclude to [] (empty set) and use custom_attribute_include +# ## to select the attributes you want to include. +# ## By default, since they can add a considerable amount of tags to the resulting metrics. To +# ## enable, simply set custom_attribute_exclude to [] (empty set) and use custom_attribute_include +# ## to select the attributes you want to include. +# # custom_attribute_include = [] +# # custom_attribute_exclude = ["*"] +# +# ## The number of vSphere 5 minute metric collection cycles to look back for non-realtime metrics. In +# ## some versions (6.7, 7.0 and possible more), certain metrics, such as cluster metrics, may be reported +# ## with a significant delay (>30min). If this happens, try increasing this number. Please note that increasing +# ## it too much may cause performance issues. +# # metric_lookback = 3 +# +# ## Optional SSL Config +# # ssl_ca = "/path/to/cafile" +# # ssl_cert = "/path/to/certfile" +# # ssl_key = "/path/to/keyfile" +# ## Use SSL but skip chain & host verification +# # insecure_skip_verify = false +# +# ## The Historical Interval value must match EXACTLY the interval in the daily +# # "Interval Duration" found on the VCenter server under Configure > General > Statistics > Statistic intervals +# # historical_interval = "5m" +# +# ## Specifies plugin behavior regarding disconnected servers +# ## Available choices : +# ## - error: telegraf will return an error on startup if one the servers is unreachable +# ## - ignore: telegraf will ignore unreachable servers on both startup and gather +# # disconnected_servers_behavior = "error" +# +# ## HTTP Proxy support +# # use_system_proxy = true +# # http_proxy_url = "" + + +# # A Webhooks Event collector +# [[inputs.webhooks]] +# ## Address and port to host Webhook listener on +# service_address = ":1619" +# +# ## Maximum duration before timing out read of the request +# # read_timeout = "10s" +# ## Maximum duration before timing out write of the response +# # write_timeout = "10s" +# +# [inputs.webhooks.filestack] +# path = "/filestack" +# +# ## HTTP basic auth +# #username = "" +# #password = "" +# +# [inputs.webhooks.github] +# path = "/github" +# # secret = "" +# +# ## HTTP basic auth +# #username = "" +# #password = "" +# +# [inputs.webhooks.mandrill] +# path = "/mandrill" +# +# ## HTTP basic auth +# #username = "" +# #password = "" +# +# [inputs.webhooks.rollbar] +# path = "/rollbar" +# +# ## HTTP basic auth +# #username = "" +# #password = "" +# +# [inputs.webhooks.papertrail] +# path = "/papertrail" +# +# ## HTTP basic auth +# #username = "" +# #password = "" +# +# [inputs.webhooks.particle] +# path = "/particle" +# +# ## HTTP basic auth +# #username = "" +# #password = "" +# +# [inputs.webhooks.artifactory] +# path = "/artifactory" + + +# # Input plugin to collect Windows Event Log messages +# # This plugin ONLY supports Windows +# [[inputs.win_eventlog]] +# ## Telegraf should have Administrator permissions to subscribe for some +# ## Windows Events channels (e.g. System log) +# +# ## LCID (Locale ID) for event rendering +# ## 1033 to force English language +# ## 0 to use default Windows locale +# # locale = 0 +# +# ## Name of eventlog, used only if xpath_query is empty +# ## Example: "Application" +# # eventlog_name = "" +# +# ## xpath_query can be in defined short form like "Event/System[EventID=999]" +# ## or you can form a XML Query. Refer to the Consuming Events article: +# ## https://docs.microsoft.com/en-us/windows/win32/wes/consuming-events +# ## XML query is the recommended form, because it is most flexible +# ## You can create or debug XML Query by creating Custom View in Windows Event Viewer +# ## and then copying resulting XML here +# xpath_query = ''' +# +# +# +# *[System[( (EventID >= 5152 and EventID <= 5158) or EventID=5379 or EventID=4672)]] +# +# +# +# +# +# +# +# +# +# +# +# +# +# +# ''' +# +# ## When true, event logs are read from the beginning; otherwise only future +# ## events will be logged. +# # from_beginning = false +# +# # Process UserData XML to fields, if this node exists in Event XML +# # process_userdata = true +# +# # Process EventData XML to fields, if this node exists in Event XML +# # process_eventdata = true +# +# ## Separator character to use for unrolled XML Data field names +# # separator = "_" +# +# ## Get only first line of Message field. For most events first line is +# ## usually more than enough +# # only_first_line_of_message = true +# +# ## Parse timestamp from TimeCreated.SystemTime event field. +# ## Will default to current time of telegraf processing on parsing error or if +# ## set to false +# # timestamp_from_event = true +# +# ## System field names: +# ## "Source", "EventID", "Version", "Level", "Task", "Opcode", "Keywords", +# ## "TimeCreated", "EventRecordID", "ActivityID", "RelatedActivityID", +# ## "ProcessID", "ThreadID", "ProcessName", "Channel", "Computer", "UserID", +# ## "UserName", "Message", "LevelText", "TaskText", "OpcodeText" +# ## +# ## In addition to System, Data fields can be unrolled from additional XML +# ## nodes in event. Human-readable representation of those nodes is formatted +# ## into event Message field, but XML is more machine-parsable +# +# ## Event fields to include as tags +# ## The values below are included by default. +# ## Globbing supported (e.g. "Level*" matches both "Level" and "LevelText") +# # event_tags = ["Source", "EventID", "Level", "LevelText", "Task", "TaskText", "Opcode", "OpcodeText", "Keywords", "Channel", "Computer"] +# +# ## Event fields to include +# ## All fields are sent by default. +# ## Globbing supported (e.g. "Level*" matches both "Level" and "LevelText") +# # event_fields = ["*"] +# +# ## Event fields to exclude +# ## Note that if you exclude all fields then no metrics are produced. A valid +# ## metric includes at least one field. +# ## Globbing supported (e.g. "Level*" matches both "Level" and "LevelText") +# # exclude_fields = [] +# +# ## Event fields to exclude if their value is empty or equals to zero +# ## The values below are included by default. +# ## Globbing supported (e.g. "Level*" matches both "Level" and "LevelText") +# # exclude_empty = ["Task", "Opcode", "*ActivityID", "UserID"] + + +# # This plugin implements the Zipkin http server to gather trace and timing data needed to troubleshoot latency problems in microservice architectures. +# [[inputs.zipkin]] +# ## URL path for span data +# # path = "/api/v1/spans" +# +# ## Port on which Telegraf listens +# # port = 9411 +# +# ## Maximum duration before timing out read of the request +# # read_timeout = "10s" +# ## Maximum duration before timing out write of the response +# # write_timeout = "10s" + diff --git a/agent/internal/service/component/telegraf.go b/agent/internal/service/component/telegraf.go new file mode 100644 index 0000000..9f950bd --- /dev/null +++ b/agent/internal/service/component/telegraf.go @@ -0,0 +1,68 @@ +package component + +import ( + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/boot" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/config/bin_path" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/log" + process_manager "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/process-manager" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/utils" + "io/fs" + "os" + "path/filepath" + "time" +) + +var ( + telegrafConfFile = filepath.Join(bin_path.BINDIR, "/data-collector/telegraf/conf/", "telegraf.conf") + bakFile = filepath.Join(bin_path.BINDIR, "/data-collector/telegraf/conf/", "telegraf.conf.bak") +) + +func UpdateTelegrafConfig(configText []byte) error { + // 1.先备份 + oldConfig, err := os.ReadFile(telegrafConfFile) + err = os.WriteFile(bakFile, oldConfig, fs.ModePerm) + if err != nil { + panic("备份文件失败:" + err.Error()) + return err + } + + // 2.后替换 + err = os.WriteFile(telegrafConfFile, configText, fs.ModePerm) + + if err != nil { + panic("替换文件失败:" + err.Error()) + go rollback(false, err) + return err + } + + // 3.再启动,让minit重启 + utils.StopCh <- os.Interrupt + time.Sleep(time.Second * 5) + go process_manager.Start(boot.ProcessUnitDir) + + return err +} + +func RecoverConfig() { + config, _ := os.ReadFile(bakFile) + os.WriteFile(telegrafConfFile, config, fs.ModePerm) + + utils.StopCh <- os.Interrupt + time.Sleep(time.Second * 5) + + go process_manager.Start(boot.ProcessUnitDir) +} + +func rollback(needRestart bool, err error) { + + log.Infof("Telegraf配置更新失败: %s", err) + config, _ := os.ReadFile(bakFile) + os.WriteFile(telegrafConfFile, config, fs.ModePerm) + if needRestart { + utils.StopCh <- os.Interrupt + time.Sleep(time.Second * 5) + + go process_manager.Start(boot.ProcessUnitDir) + } + +} diff --git a/agent/internal/service/heartbeat_service/heartbeat.go b/agent/internal/service/heartbeat_service/heartbeat.go new file mode 100644 index 0000000..c999aa4 --- /dev/null +++ b/agent/internal/service/heartbeat_service/heartbeat.go @@ -0,0 +1,102 @@ +package heartbeat_service + +import ( + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/config" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/config/agent" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/internal/model/heartbeat" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/internal/service/component" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/go-sysinfo" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/go-sysinfo/providers/shared" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/go-sysinfo/types" + "strings" + "time" +) + +var ( + agentConfig = agent.Agent + commonConfig = config.Config + + AgentState = heartbeat.HeartBeat{ + AgentVersion: "xxx", + IPAddress: "xxx", + LastStarttime: time.Now(), + MACAddress: "xxx", + SerialNo: "xxx", + } +) + +var ( + host types.Host + ip, mac string +) + +func init() { + host, _ = sysinfo.Host() + ip, mac, _ = shared.NamedNetwork(agentConfig.NetInterface) + + RefreshHeartBeatInfo() +} + +// RefreshHeartBeatInfo +// 更新心跳信息中的组件信息, 检查Telegraf状态 +func RefreshHeartBeatInfo() { + // 每 15 秒钟时执行一次 + ticker := time.NewTicker(15 * time.Second) + go func() { + for { + select { + case <-ticker.C: + // 读取状态 更新状态 + //0:离线,1:在线,2:故障 + AgentState.IPAddress = ip + AgentState.MACAddress = mac + AgentState.SerialNo = agentConfig.RID + AgentState.AgentVersion = commonConfig.AgentVersion + AgentState.LastStarttime = host.Info().BootTime + + AgentState.Architecture = host.Info().Architecture + AgentState.KernelVersion = host.Info().KernelVersion + AgentState.OS = host.Info().OS.Type + " " + host.Info().OS.Platform + " " + host.Info().OS.Version + AgentState.OSType = strings.ToLower(host.Info().OS.Type) + + telegraf, err := component.GetTelegrafProcessInfo() + if err != nil { + AgentState.COMInfo = []heartbeat.COMInfo{ + { + Name: "telegraf", + LastStarttime: time.Now(), + Status: "2", + }, + } + } else { + AgentState.COMInfo = []heartbeat.COMInfo{ + { + Name: "telegraf", + LastStarttime: telegraf.StartTime, + Status: "1", + }, + } + } + + processExporter, err := component.GetProcessExporterProcessInfo() + if err != nil { + AgentState.COMInfo = []heartbeat.COMInfo{ + { + Name: "process-exporter", + LastStarttime: time.Now(), + Status: "2", + }, + } + } else { + AgentState.COMInfo = []heartbeat.COMInfo{ + { + Name: "process-exporter", + LastStarttime: processExporter.StartTime, + Status: "1", + }, + } + } + } + } + }() +} diff --git a/agent/internal/service/load_resource_info/load_resource_info.go b/agent/internal/service/load_resource_info/load_resource_info.go new file mode 100644 index 0000000..7847b08 --- /dev/null +++ b/agent/internal/service/load_resource_info/load_resource_info.go @@ -0,0 +1,11 @@ +package load_resource_info + +import ( + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/config/agent" +) + +// 初次加载配置,如果有更新则在 /opt/agent目录 +// 配置优先级 /opt/agent/resource.yaml > ./config.yaml +func LoadAgentInfo(config agent.AgentConfig) { + agent.LoadAgentInfo(config) +} diff --git a/agent/internal/service/log_service/log_service.go b/agent/internal/service/log_service/log_service.go new file mode 100644 index 0000000..404b722 --- /dev/null +++ b/agent/internal/service/log_service/log_service.go @@ -0,0 +1,67 @@ +package log_service + +import ( + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/config/bin_path" + "io" + "os" +) + +func GetAgentLog(lineNum uint64) (logTexts []string, err error) { + filename := bin_path.AGENTLOG + logTexts, err = reverseRead(filename, lineNum) + return +} + +func GetAgentUpgradeLog(lineNum uint64) (logTexts []string, err error) { + filename := bin_path.AGENTUPGRADELOG + logTexts, err = reverseRead(filename, lineNum) + return +} + +func GetTelegrafLog(lineNum uint64) (logTexts []string, err error) { + filename := bin_path.TELEGRAFLOG + logTexts, err = reverseRead(filename, lineNum) + return +} + +func reverseRead(name string, lineNum uint64) ([]string, error) { + //打开文件 + file, err := os.Open(name) + if err != nil { + return nil, err + } + defer file.Close() + //获取文件大小 + fs, err := file.Stat() + if err != nil { + return nil, err + } + fileSize := fs.Size() + + var offset int64 = -1 //偏移量,初始化为-1,若为0则会读到EOF + char := make([]byte, 1) //用于读取单个字节 + lineStr := "" //存放一行的数据 + buff := make([]string, 0, 100) + for (-offset) <= fileSize { + //通过Seek函数从末尾移动游标然后每次读取一个字节 + file.Seek(offset, io.SeekEnd) + _, err := file.Read(char) + if err != nil { + return buff, err + } + if char[0] == '\n' { + offset-- //windows跳过'\r' + lineNum-- //到此读取完一行 + buff = append(buff, lineStr) + lineStr = "" + if lineNum == 0 { + return buff, nil + } + } else { + lineStr = string(char) + lineStr + } + offset-- + } + buff = append(buff, lineStr) + return buff, nil +} diff --git a/agent/internal/service/log_service/log_service_test.go b/agent/internal/service/log_service/log_service_test.go new file mode 100644 index 0000000..16242f7 --- /dev/null +++ b/agent/internal/service/log_service/log_service_test.go @@ -0,0 +1,9 @@ +package log_service + +import ( + "testing" +) + +func Test_reverseRead(t *testing.T) { + reverseRead("./testdata/1.log", 100) +} diff --git a/agent/internal/service/log_service/testdata/1.log b/agent/internal/service/log_service/testdata/1.log new file mode 100644 index 0000000..44a5b6a --- /dev/null +++ b/agent/internal/service/log_service/testdata/1.log @@ -0,0 +1,110 @@ +1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +1aaaa +2aaaaa +3 +4 +5 +6 +7 +8 +9 +10 +1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +1 +2 +3 +4 +5 +6 +7 +8 +9 +10 \ No newline at end of file diff --git a/agent/internal/validator/validator.go b/agent/internal/validator/validator.go new file mode 100644 index 0000000..25c7746 --- /dev/null +++ b/agent/internal/validator/validator.go @@ -0,0 +1,115 @@ +package validator + +import ( + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/internal/pkg/errors" + r "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/internal/pkg/response" + "github.com/gin-gonic/gin" + "github.com/gin-gonic/gin/binding" + "github.com/go-playground/locales/en" + "github.com/go-playground/locales/zh" + ut "github.com/go-playground/universal-translator" + "github.com/go-playground/validator/v10" + enTranslations "github.com/go-playground/validator/v10/translations/en" + zhTranslations "github.com/go-playground/validator/v10/translations/zh" + "reflect" + "strings" + "sync" +) + +type Page struct { + Page float64 `form:"page" json:"page" binding:"min=1"` // 必填,页面值>=1 + Limit float64 `form:"limit" json:"limit" binding:"min=1"` // 必填,每页条数值>=1 +} + +var trans ut.Translator // 全局验证器 + +var once sync.Once + +func InitValidatorTrans(locale string) { + once.Do(func() { validatorTrans(locale) }) +} + +func validatorTrans(locale string) { + var v *validator.Validate + var ok bool + if v, ok = binding.Validator.Engine().(*validator.Validate); !ok { + return + } + // 注册一个获取json tag的自定义方法 + v.RegisterTagNameFunc(func(field reflect.StructField) string { + label := field.Tag.Get("label") + if label == "" { + label = field.Tag.Get("json") + if label == "" { + label = field.Tag.Get("form") + } + } + + if label == "-" { + return "" + } + if label == "" { + return field.Name + } + return label + }) + + zhT := zh.New() // 中文翻译器 + enT := en.New() // 英文翻译器 + uni := ut.New(enT, zhT, enT) + + // locale 通常取决于 http 请求头的 'Accept-Language' + trans, ok = uni.GetTranslator(locale) + if !ok { + panic("Initialize a language not supported by the validator") + } + var err error + // 注册翻译器 + switch locale { + case "en": + err = enTranslations.RegisterDefaultTranslations(v, trans) + case "zh": + err = zhTranslations.RegisterDefaultTranslations(v, trans) + default: + err = enTranslations.RegisterDefaultTranslations(v, trans) + } + if err != nil { + panic("Failed to register translator when initializing validator") + } +} + +func ResponseError(c *gin.Context, err error) { + if errs, ok := err.(validator.ValidationErrors); ok { + fields := errs.Translate(trans) + for _, err := range fields { + r.Resp().FailCode(c, errors.InvalidParameter, err) + break + } + } else { + errStr := err.Error() + // multipart:nextpart:eof 错误表示验证器需要一些参数,但是调用者没有提交任何参数 + if strings.ReplaceAll(strings.ToLower(errStr), " ", "") == "multipart:nextpart:eof" { + r.Resp().FailCode(c, errors.InvalidParameter, "请根据要求填写必填项参数") + } else { + r.Resp().FailCode(c, errors.InvalidParameter, errStr) + } + } +} + +func CheckQueryParams(c *gin.Context, obj interface{}) error { + if err := c.ShouldBindQuery(obj); err != nil { + ResponseError(c, err) + return err + } + + return nil +} + +func CheckPostParams(c *gin.Context, obj interface{}) error { + if err := c.ShouldBind(obj); err != nil { + ResponseError(c, err) + return err + } + + return nil +} diff --git a/agent/main.go b/agent/main.go new file mode 100644 index 0000000..c4b6c2a --- /dev/null +++ b/agent/main.go @@ -0,0 +1,68 @@ +package main + +import ( + "fmt" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/boot" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/config" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/internal/command" + _ "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/internal/nats_service" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/internal/routers" + _ "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/internal/service/heartbeat_service" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/log" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/utils" + "os" + "os/signal" + "strings" + "syscall" + "time" +) + +func main() { + run() +} + +func run() { + script := strings.Split(boot.Run, ":") + switch script[0] { + case "http": + + go func() { + r := routers.SetRouters() + err := r.Run(fmt.Sprintf("%s:%d", config.Config.Server.Host, config.Config.Server.Port)) + if err != nil { + panic(err) + } + }() + + // wait for signals + chSysSig := make(chan os.Signal, 1) + signal.Notify(chSysSig, syscall.SIGINT, syscall.SIGTERM) + + select { + case <-chSysSig: + // 用户发送INTR字符(Ctrl+C)触发 + log.Infof("用户发送INTR字符(Ctrl+C)触发中止逻辑") + + break + case <-utils.ChUserSig: + // 通过接口 + log.Infof("用户通过中止服务接口触发中止逻辑") + break + } + //sig := <-chSysSig + utils.StopCh <- os.Interrupt + + // delay 3 seconds + log.Infof("优雅关闭中.....") + time.Sleep(time.Second * 5) + log.Infof("已完成关闭!") + + case "command": + if len(script) != 2 { + panic("命令错误,缺少重要参数") + } + command.Run(script[1]) + default: + panic("执行脚本错误") + } +} diff --git a/agent/pkg/convert/convert.go b/agent/pkg/convert/convert.go new file mode 100644 index 0000000..72e6243 --- /dev/null +++ b/agent/pkg/convert/convert.go @@ -0,0 +1,56 @@ +package convert + +import "time" + +func GetString(val interface{}) (s string) { + s, _ = val.(string) + return +} + +// GetBool returns the value associated with the key as a boolean. +func GetBool(val interface{}) (b bool) { + b, _ = val.(bool) + return +} + +// GetInt returns the value associated with the key as an integer. +func GetInt(val interface{}) (i int) { + i, _ = val.(int) + return +} + +// GetInt64 returns the value associated with the key as an integer. +func GetInt64(val interface{}) (i64 int64) { + i64, _ = val.(int64) + return +} + +// GetUint returns the value associated with the key as an unsigned integer. +func GetUint(val interface{}) (ui uint) { + ui, _ = val.(uint) + return +} + +// GetUint64 returns the value associated with the key as an unsigned integer. +func GetUint64(val interface{}) (ui64 uint64) { + ui64, _ = val.(uint64) + return +} + +// GetFloat64 returns the value associated with the key as a float64. +func GetFloat64(val interface{}) (f64 float64) { + f64, _ = val.(float64) + return +} + +// GetTime returns the value associated with the key as time. +func GetTime(val interface{}) (t time.Time) { + t, _ = val.(time.Time) + return +} + +// GetDuration returns the value associated with the key as a duration. +func GetDuration(val interface{}) (d time.Duration) { + d, _ = val.(time.Duration) + return +} diff --git a/agent/pkg/file-download-client/download.go b/agent/pkg/file-download-client/download.go new file mode 100644 index 0000000..a781afe --- /dev/null +++ b/agent/pkg/file-download-client/download.go @@ -0,0 +1,42 @@ +package file_download_client + +import ( + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/log" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/utils/httputils" + "net/http" + "os" + "time" +) + +func DownloadToLocal(fileUrl, objectName, localUpgradeCompressedFilePath string, retry int) error { + var fileBytes []byte + if retry <= 0 { + retry = 3 + } + + for retry > 0 { + object, statusCode, _, err := httputils.HttpGet(fileUrl, nil, 600*time.Second, map[string]string{}, []*http.Cookie{}) + if err != nil || statusCode >= 400 { + log.Errorf("下载安装包失败: %s", objectName) + return err + } else { + fileBytes = object + break + } + retry-- + } + + log.Infof("获取安装包成功: %s", objectName) + localFile, err := os.Create(localUpgradeCompressedFilePath) + if err != nil { + log.Errorf("创建本地安装文件失败: %s", localUpgradeCompressedFilePath) + return err + } + defer localFile.Close() + + if _, err := localFile.Write(fileBytes); err != nil { + log.Errorf("写入本地安装文件失败: %s", localUpgradeCompressedFilePath) + return err + } + return nil +} diff --git a/agent/pkg/file-download-client/download_test.go b/agent/pkg/file-download-client/download_test.go new file mode 100644 index 0000000..a3a9f5b --- /dev/null +++ b/agent/pkg/file-download-client/download_test.go @@ -0,0 +1,15 @@ +package file_download_client + +import ( + "net/http" + "testing" + "time" + + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/utils/httputils" +) + +func TestDownloadToLocal(t *testing.T) { + url := "http://127.0.0.1:2134/cpn/agentv1.1.0.tar.gz?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=dbCHCvaFG53JWtWzSIiK%2F20231108%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20231108T024819Z&X-Amz-Expires=86400&X-Amz-SignedHeaders=host&response-content-disposition=attachment%3B%20filename%3Dagentv1.1.0.tar.gz&X-Amz-Signature=2ce8303cbca3f6c07de60870a4cd8626f69c31bf0373d364d4de57a67f322b7c" + httputils.HttpGet(url, nil, 300*time.Second, map[string]string{}, []*http.Cookie{}) + +} diff --git a/agent/pkg/go-sysinfo/LICENSE.txt b/agent/pkg/go-sysinfo/LICENSE.txt new file mode 100644 index 0000000..d645695 --- /dev/null +++ b/agent/pkg/go-sysinfo/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/agent/pkg/go-sysinfo/NOTICE.txt b/agent/pkg/go-sysinfo/NOTICE.txt new file mode 100644 index 0000000..ac43539 --- /dev/null +++ b/agent/pkg/go-sysinfo/NOTICE.txt @@ -0,0 +1,5 @@ +Elastic go-sysinfo +Copyright 2017-2022 Elasticsearch B.V. + +This product includes software developed at +Elasticsearch, B.V. (https://www.elastic.co/). diff --git a/agent/pkg/go-sysinfo/README.md b/agent/pkg/go-sysinfo/README.md new file mode 100644 index 0000000..c0f35aa --- /dev/null +++ b/agent/pkg/go-sysinfo/README.md @@ -0,0 +1,81 @@ +# go-sysinfo + +[![go](https://github.com/elastic/go-sysinfo/actions/workflows/go.yml/badge.svg)](https://github.com/elastic/go-sysinfo/actions/workflows/go.yml) +[![Go Documentation](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)][godocs] + +[godocs]: http://godoc.org/github.com/elastic/go-sysinfo + +go-sysinfo is a library for collecting system information. This includes +information about the host machine and processes running on the host. + +The available features vary based on what has been implemented by the "provider" +for the operating system. At runtime you check to see if additional interfaces +are implemented by the returned `Host` or `Process`. For example: + +```go +process, err := sysinfo.Self() +if err != nil { + return err +} + +if handleCounter, ok := process.(types.OpenHandleCounter); ok { + count, err := handleCounter.OpenHandleCount() + if err != nil { + return err + } + log.Printf("%d open handles", count) +} +``` + +These tables show what methods are implemented as well as the extra interfaces +that are implemented. + +| `Host` Features | Darwin | Linux | Windows | AIX | +|------------------|--------|-------|---------|-----| +| `Info()` | x | x | x | x | +| `Memory()` | x | x | x | x | +| `CPUTimer` | x | x | x | x | +| `LoadAverage` | x | x | | | +| `VMStat` | | x | | | +| `NetworkCounters`| | x | | | + +| `Process` Features | Darwin | Linux | Windows | AIX | +|------------------------|--------|-------|---------|-----| +| `Info()` | x | x | x | x | +| `Memory()` | x | x | x | x | +| `User()` | x | x | x | x | +| `Parent()` | x | x | x | x | +| `CPUTimer` | x | x | x | x | +| `Environment` | x | x | | x | +| `OpenHandleEnumerator` | | x | | | +| `OpenHandleCounter` | | x | | | +| `Seccomp` | | x | | | +| `Capabilities` | | x | | | +| `NetworkCounters` | | x | | | + +### GOOS / GOARCH Pairs + +This table lists the OS and architectures for which a "provider" is implemented. + +| GOOS / GOARCH | Requires CGO | Tested | +|----------------|--------------|--------| +| aix/ppc64 | x | | +| darwin/amd64 | optional * | x | +| darwin/arm64 | optional * | x | +| linux/386 | | | +| linux/amd64 | | x | +| linux/arm | | | +| linux/arm64 | | | +| linux/mips | | | +| linux/mips64 | | | +| linux/mips64le | | | +| linux/mipsle | | | +| linux/ppc64 | | | +| linux/ppc64le | | | +| linux/riscv64 | | | +| linux/s390x | | | +| windows/amd64 | | x | +| windows/arm64 | | | +| windows/arm | | | + +* On darwin (macOS) host information like machineid and process information like memory, cpu, user and starttime require cgo. diff --git a/agent/pkg/go-sysinfo/internal/registry/registry.go b/agent/pkg/go-sysinfo/internal/registry/registry.go new file mode 100644 index 0000000..4b3b960 --- /dev/null +++ b/agent/pkg/go-sysinfo/internal/registry/registry.go @@ -0,0 +1,58 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package registry + +import ( + "fmt" + + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/go-sysinfo/types" +) + +var ( + hostProvider HostProvider + processProvider ProcessProvider +) + +type HostProvider interface { + Host() (types.Host, error) +} + +type ProcessProvider interface { + Processes() ([]types.Process, error) + Process(pid int) (types.Process, error) + Self() (types.Process, error) +} + +func Register(provider interface{}) { + if h, ok := provider.(HostProvider); ok { + if hostProvider != nil { + panic(fmt.Sprintf("HostProvider already registered: %v", hostProvider)) + } + hostProvider = h + } + + if p, ok := provider.(ProcessProvider); ok { + if processProvider != nil { + panic(fmt.Sprintf("ProcessProvider already registered: %v", processProvider)) + } + processProvider = p + } +} + +func GetHostProvider() HostProvider { return hostProvider } +func GetProcessProvider() ProcessProvider { return processProvider } diff --git a/agent/pkg/go-sysinfo/providers/aix/boottime_aix_ppc64.go b/agent/pkg/go-sysinfo/providers/aix/boottime_aix_ppc64.go new file mode 100644 index 0000000..e158f46 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/aix/boottime_aix_ppc64.go @@ -0,0 +1,78 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build aix && ppc64 + +package aix + +import ( + "encoding/binary" + "fmt" + "os" + "time" +) + +// utmp can't be used by "encoding/binary" if generated by cgo, +// some pads will be missing. +type utmp struct { + User [256]uint8 + Id [14]uint8 + Line [64]uint8 + XPad1 int16 + Pid int32 + Type int16 + XPad2 int16 + Time int64 + Termination int16 + Exit int16 + Host [256]uint8 + Xdblwordpad int32 + XreservedA [2]int32 + XreservedV [6]int32 +} + +const ( + typeBootTime = 2 +) + +// BootTime returns the time at which the machine was started, truncated to the nearest second +func BootTime() (time.Time, error) { + return bootTime("/etc/utmp") +} + +func bootTime(filename string) (time.Time, error) { + // Get boot time from /etc/utmp + file, err := os.Open(filename) + if err != nil { + return time.Time{}, fmt.Errorf("failed to get host uptime: cannot open /etc/utmp: %w", err) + } + + defer file.Close() + + for { + var utmp utmp + if err := binary.Read(file, binary.BigEndian, &utmp); err != nil { + break + } + + if utmp.Type == typeBootTime { + return time.Unix(utmp.Time, 0), nil + } + } + + return time.Time{}, fmt.Errorf("failed to get host uptime: no utmp record: %w", err) +} diff --git a/agent/pkg/go-sysinfo/providers/aix/boottime_aix_ppc64_test.go b/agent/pkg/go-sysinfo/providers/aix/boottime_aix_ppc64_test.go new file mode 100644 index 0000000..101a722 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/aix/boottime_aix_ppc64_test.go @@ -0,0 +1,35 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build aix && ppc64 + +package aix + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestBootTime(t *testing.T) { + bt, err := bootTime("testdata/utmp") + if err != nil { + t.Fatal(err) + } + + assert.EqualValues(t, bt.Unix(), 1585726535) +} diff --git a/agent/pkg/go-sysinfo/providers/aix/defs_aix.go b/agent/pkg/go-sysinfo/providers/aix/defs_aix.go new file mode 100644 index 0000000..423e351 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/aix/defs_aix.go @@ -0,0 +1,43 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build ignore + +package aix + +/* +#include +#include +#include +*/ +import "C" + +type prcred C.prcred_t + +type ( + pstatus C.pstatus_t + prTimestruc64 C.pr_timestruc64_t + prSigset C.pr_sigset_t + fltset C.fltset_t + lwpstatus C.lwpstatus_t + prSiginfo64 C.pr_siginfo64_t + prStack64 C.pr_stack64_t + prSigaction64 C.struct_pr_sigaction64 + prgregset C.prgregset_t + prfpregset C.prfpregset_t + pfamily C.pfamily_t +) diff --git a/agent/pkg/go-sysinfo/providers/aix/doc.go b/agent/pkg/go-sysinfo/providers/aix/doc.go new file mode 100644 index 0000000..aadec23 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/aix/doc.go @@ -0,0 +1,20 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Package aix implements the HostProvider and ProcessProvider interfaces +// for providing information about IBM AIX on ppc64. +package aix diff --git a/agent/pkg/go-sysinfo/providers/aix/host_aix_ppc64.go b/agent/pkg/go-sysinfo/providers/aix/host_aix_ppc64.go new file mode 100644 index 0000000..e8f5eaa --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/aix/host_aix_ppc64.go @@ -0,0 +1,229 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build aix && ppc64 && cgo + +package aix + +/* +#cgo LDFLAGS: -L/usr/lib -lperfstat + +#include +#include +#include + +*/ +import "C" + +import ( + "errors" + "fmt" + "os" + "strings" + "time" + + "github.com/joeshaw/multierror" + + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/go-sysinfo/internal/registry" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/go-sysinfo/providers/shared" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/go-sysinfo/types" +) + +//go:generate sh -c "go tool cgo -godefs defs_aix.go | sed 's/*byte/uint64/g' > ztypes_aix_ppc64.go" +// As cgo will return some psinfo's fields with *byte, binary.Read will refuse this type. + +func init() { + registry.Register(aixSystem{}) +} + +type aixSystem struct{} + +// Host returns a new AIX host. +func (aixSystem) Host() (types.Host, error) { + return newHost() +} + +type host struct { + info types.HostInfo +} + +// Architecture returns the architecture of the host +func Architecture() (string, error) { + return "ppc", nil +} + +// Info returns the host details. +func (h *host) Info() types.HostInfo { + return h.info +} + +// Info returns the current CPU usage of the host. +func (*host) CPUTime() (types.CPUTimes, error) { + clock := uint64(C.sysconf(C._SC_CLK_TCK)) + tick2nsec := func(val uint64) uint64 { + return val * 1e9 / clock + } + + cpudata := C.perfstat_cpu_total_t{} + + if _, err := C.perfstat_cpu_total(nil, &cpudata, C.sizeof_perfstat_cpu_total_t, 1); err != nil { + return types.CPUTimes{}, fmt.Errorf("error while callin perfstat_cpu_total: %w", err) + } + + return types.CPUTimes{ + User: time.Duration(tick2nsec(uint64(cpudata.user))), + System: time.Duration(tick2nsec(uint64(cpudata.sys))), + Idle: time.Duration(tick2nsec(uint64(cpudata.idle))), + IOWait: time.Duration(tick2nsec(uint64(cpudata.wait))), + }, nil +} + +// Memory returns the current memory usage of the host. +func (*host) Memory() (*types.HostMemoryInfo, error) { + var mem types.HostMemoryInfo + + pagesize := uint64(os.Getpagesize()) + + meminfo := C.perfstat_memory_total_t{} + _, err := C.perfstat_memory_total(nil, &meminfo, C.sizeof_perfstat_memory_total_t, 1) + if err != nil { + return nil, fmt.Errorf("perfstat_memory_total failed: %w", err) + } + + mem.Total = uint64(meminfo.real_total) * pagesize + mem.Free = uint64(meminfo.real_free) * pagesize + mem.Used = uint64(meminfo.real_inuse) * pagesize + + // There is no real equivalent to memory available in AIX. + mem.Available = mem.Free + + mem.VirtualTotal = uint64(meminfo.virt_total) * pagesize + mem.VirtualFree = mem.Free + uint64(meminfo.pgsp_free)*pagesize + mem.VirtualUsed = mem.VirtualTotal - mem.VirtualFree + + mem.Metrics = map[string]uint64{ + "bytes_coalesced": uint64(meminfo.bytes_coalesced), + "bytes_coalesced_mempool": uint64(meminfo.bytes_coalesced_mempool), + "real_pinned": uint64(meminfo.real_pinned) * pagesize, + "pgins": uint64(meminfo.pgins), + "pgouts": uint64(meminfo.pgouts), + "pgsp_free": uint64(meminfo.pgsp_free) * pagesize, + "pgsp_rsvd": uint64(meminfo.pgsp_rsvd) * pagesize, + } + + return &mem, nil +} + +func (h *host) FQDN() (string, error) { + return shared.FQDN() +} + +func newHost() (*host, error) { + h := &host{} + r := &reader{} + r.architecture(h) + r.bootTime(h) + r.hostname(h) + r.network(h) + r.kernelVersion(h) + r.os(h) + r.time(h) + r.uniqueID(h) + return h, r.Err() +} + +type reader struct { + errs []error +} + +func (r *reader) addErr(err error) bool { + if err != nil { + if !errors.Is(err, types.ErrNotImplemented) { + r.errs = append(r.errs, err) + } + return true + } + return false +} + +func (r *reader) Err() error { + if len(r.errs) > 0 { + return &multierror.MultiError{Errors: r.errs} + } + return nil +} + +func (r *reader) architecture(h *host) { + v, err := Architecture() + if r.addErr(err) { + return + } + h.info.Architecture = v +} + +func (r *reader) bootTime(h *host) { + v, err := BootTime() + if r.addErr(err) { + return + } + h.info.BootTime = v +} + +func (r *reader) hostname(h *host) { + v, err := os.Hostname() + if r.addErr(err) { + return + } + h.info.Hostname = strings.ToLower(v) +} + +func (r *reader) network(h *host) { + ips, macs, err := shared.Network() + if r.addErr(err) { + return + } + h.info.IPs = ips + h.info.MACs = macs +} + +func (r *reader) kernelVersion(h *host) { + v, err := KernelVersion() + if r.addErr(err) { + return + } + h.info.KernelVersion = v +} + +func (r *reader) os(h *host) { + v, err := OperatingSystem() + if r.addErr(err) { + return + } + h.info.OS = v +} + +func (*reader) time(h *host) { + h.info.Timezone, h.info.TimezoneOffsetSec = time.Now().Zone() +} + +func (r *reader) uniqueID(h *host) { + v, err := MachineID() + if r.addErr(err) { + return + } + h.info.UniqueID = v +} diff --git a/agent/pkg/go-sysinfo/providers/aix/kernel_aix_ppc64.go b/agent/pkg/go-sysinfo/providers/aix/kernel_aix_ppc64.go new file mode 100644 index 0000000..dc3af83 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/aix/kernel_aix_ppc64.go @@ -0,0 +1,59 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build aix && ppc64 && cgo + +package aix + +/* +#include +*/ +import "C" + +import ( + "fmt" + "strconv" +) + +var oslevel string + +func getKernelVersion() (int, int, error) { + name := C.struct_utsname{} + if _, err := C.uname(&name); err != nil { + return 0, 0, fmt.Errorf("kernel version: uname: %w", err) + } + + version, err := strconv.Atoi(C.GoString(&name.version[0])) + if err != nil { + return 0, 0, fmt.Errorf("parsing kernel version: %w", err) + } + + release, err := strconv.Atoi(C.GoString(&name.release[0])) + if err != nil { + return 0, 0, fmt.Errorf("parsing kernel release: %w", err) + } + return version, release, nil +} + +// KernelVersion returns the version of AIX kernel +func KernelVersion() (string, error) { + major, minor, err := getKernelVersion() + if err != nil { + return "", err + } + return strconv.Itoa(major) + "." + strconv.Itoa(minor), nil +} diff --git a/agent/pkg/go-sysinfo/providers/aix/machineid_aix_ppc64.go b/agent/pkg/go-sysinfo/providers/aix/machineid_aix_ppc64.go new file mode 100644 index 0000000..945ce34 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/aix/machineid_aix_ppc64.go @@ -0,0 +1,36 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build aix && ppc64 && cgo + +package aix + +/* +#include +*/ +import "C" + +import "fmt" + +// MachineID returns the id of the machine +func MachineID() (string, error) { + name := C.struct_utsname{} + if _, err := C.uname(&name); err != nil { + return "", fmt.Errorf("machine id: %w", err) + } + return C.GoString(&name.machine[0]), nil +} diff --git a/agent/pkg/go-sysinfo/providers/aix/os_aix_ppc64.go b/agent/pkg/go-sysinfo/providers/aix/os_aix_ppc64.go new file mode 100644 index 0000000..27eceef --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/aix/os_aix_ppc64.go @@ -0,0 +1,60 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build aix && ppc64 && cgo + +package aix + +import ( + "fmt" + "io/ioutil" + "strconv" + "strings" + + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/go-sysinfo/types" +) + +// OperatingSystem returns information of the host operating system +func OperatingSystem() (*types.OSInfo, error) { + return getOSInfo() +} + +func getOSInfo() (*types.OSInfo, error) { + major, minor, err := getKernelVersion() + if err != nil { + return nil, err + } + + // Retrieve build version from "/proc/version". + procVersion, err := ioutil.ReadFile("/proc/version") + if err != nil { + return nil, fmt.Errorf("failed to get OS info: cannot open /proc/version: %w", err) + } + build := strings.SplitN(string(procVersion), "\n", 4)[2] + + return &types.OSInfo{ + Type: "unix", + Family: "aix", + Platform: "aix", + Name: "aix", + Version: strconv.Itoa(major) + "." + strconv.Itoa(minor), + Major: major, + Minor: minor, + Patch: 0, // No patch version + Build: build, + }, nil +} diff --git a/agent/pkg/go-sysinfo/providers/aix/process_aix_ppc64.go b/agent/pkg/go-sysinfo/providers/aix/process_aix_ppc64.go new file mode 100644 index 0000000..9704dd4 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/aix/process_aix_ppc64.go @@ -0,0 +1,301 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build aix && ppc64 && cgo + +package aix + +/* +#cgo LDFLAGS: -L/usr/lib -lperfstat + +#include +#include +#include + +*/ +import "C" + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "strings" + "syscall" + "time" + "unsafe" + + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/go-sysinfo/types" +) + +// Processes returns a list of all actives processes. +func (aixSystem) Processes() ([]types.Process, error) { + // Retrieve processes using /proc instead of calling + // getprocs which will also retrieve kernel threads. + files, err := ioutil.ReadDir("/proc") + if err != nil { + return nil, fmt.Errorf("error while reading /proc: %w", err) + } + + processes := make([]types.Process, 0, len(files)) + for _, f := range files { + // Check that the file is a correct process directory. + // /proc also contains special files (/proc/version) and threads + // directories (/proc/pid directory but without any "as" file) + if _, err := os.Stat("/proc/" + f.Name() + "/as"); err == nil { + pid, _ := strconv.Atoi(f.Name()) + processes = append(processes, &process{pid: pid}) + } + } + + return processes, nil +} + +// Process returns the process designed by PID. +func (aixSystem) Process(pid int) (types.Process, error) { + p := process{pid: pid} + return &p, nil +} + +// Self returns the current process. +func (s aixSystem) Self() (types.Process, error) { + return s.Process(os.Getpid()) +} + +type process struct { + pid int + info *types.ProcessInfo + env map[string]string +} + +// PID returns the PID of a process. +func (p *process) PID() int { + return p.pid +} + +// Parent returns the parent of a process. +func (p *process) Parent() (types.Process, error) { + info, err := p.Info() + if err != nil { + return nil, err + } + return &process{pid: info.PPID}, nil +} + +// Info returns all information about the process. +func (p *process) Info() (types.ProcessInfo, error) { + if p.info != nil { + return *p.info, nil + } + + p.info = &types.ProcessInfo{ + PID: p.pid, + } + + // Retrieve PPID and StartTime + info := C.struct_procsinfo64{} + cpid := C.pid_t(p.pid) + + num, err := C.getprocs(unsafe.Pointer(&info), C.sizeof_struct_procsinfo64, nil, 0, &cpid, 1) + if num != 1 { + err = syscall.ESRCH + } + if err != nil { + return types.ProcessInfo{}, fmt.Errorf("error while calling getprocs: %w", err) + } + + p.info.PPID = int(info.pi_ppid) + // pi_start is the time in second since the process have started. + p.info.StartTime = time.Unix(0, int64(uint64(info.pi_start)*1000*uint64(time.Millisecond))) + + // Retrieve arguments and executable name + // If buffer is not large enough, args are truncated + buf := make([]byte, 8192) + var args []string + if _, err := C.getargs(unsafe.Pointer(&info), C.sizeof_struct_procsinfo64, (*C.char)(&buf[0]), 8192); err != nil { + return types.ProcessInfo{}, fmt.Errorf("error while calling getargs: %w", err) + } + + bbuf := bytes.NewBuffer(buf) + for { + arg, err := bbuf.ReadBytes(0) + if err == io.EOF || arg[0] == 0 { + break + } + if err != nil { + return types.ProcessInfo{}, fmt.Errorf("error while reading arguments: %w", err) + } + + args = append(args, string(chop(arg))) + } + + // For some special programs, getargs might return an empty buffer. + if len(args) == 0 { + args = append(args, "") + } + + // The first element of the arguments list is the executable path. + // There are some exceptions which don't have an executable path + // but rather a special name directly in args[0]. + if strings.Contains(args[0], "sshd: ") { + // ssh connections can be named "sshd: root@pts/11". + // If we are using filepath.Base, the result will only + // be 11 because of the last "/". + p.info.Name = args[0] + } else { + p.info.Name = filepath.Base(args[0]) + } + + // The process was launched using its absolute path, so we can retrieve + // the executable path from its "name". + if filepath.IsAbs(args[0]) { + p.info.Exe = filepath.Clean(args[0]) + } else { + // TODO: improve this case. The executable full path can still + // be retrieve in some cases. Look at os/executable_path.go + // in the stdlib. + // For the moment, let's "exe" be the same as "name" + p.info.Exe = p.info.Name + } + p.info.Args = args + + // Get CWD + cwd, err := os.Readlink("/proc/" + strconv.Itoa(p.pid) + "/cwd") + if err != nil { + if !os.IsNotExist(err) { + return types.ProcessInfo{}, fmt.Errorf("error while reading /proc/%s/cwd: %w", strconv.Itoa(p.pid), err) + } + } + + p.info.CWD = strings.TrimSuffix(cwd, "/") + + return *p.info, nil +} + +// Environment returns the environment of a process. +func (p *process) Environment() (map[string]string, error) { + if p.env != nil { + return p.env, nil + } + p.env = map[string]string{} + + /* If buffer is not large enough, args are truncated */ + buf := make([]byte, 8192) + info := C.struct_procsinfo64{} + info.pi_pid = C.pid_t(p.pid) + + if _, err := C.getevars(unsafe.Pointer(&info), C.sizeof_struct_procsinfo64, (*C.char)(&buf[0]), 8192); err != nil { + return nil, fmt.Errorf("error while calling getevars: %w", err) + } + + bbuf := bytes.NewBuffer(buf) + + delim := []byte{61} // "=" + + for { + line, err := bbuf.ReadBytes(0) + if err == io.EOF || line[0] == 0 { + break + } + if err != nil { + return nil, fmt.Errorf("error while calling getevars: %w", err) + } + + pair := bytes.SplitN(chop(line), delim, 2) + if len(pair) != 2 { + return nil, errors.New("error reading process environment") + } + p.env[string(pair[0])] = string(pair[1]) + } + + return p.env, nil +} + +// User returns the user IDs of a process. +func (p *process) User() (types.UserInfo, error) { + var prcred prcred + if err := p.decodeProcfsFile("cred", &prcred); err != nil { + return types.UserInfo{}, err + } + return types.UserInfo{ + UID: strconv.Itoa(int(prcred.Ruid)), + EUID: strconv.Itoa(int(prcred.Euid)), + SUID: strconv.Itoa(int(prcred.Suid)), + GID: strconv.Itoa(int(prcred.Rgid)), + EGID: strconv.Itoa(int(prcred.Egid)), + SGID: strconv.Itoa(int(prcred.Sgid)), + }, nil +} + +// Memory returns the current memory usage of a process. +func (p *process) Memory() (types.MemoryInfo, error) { + var mem types.MemoryInfo + pagesize := uint64(os.Getpagesize()) + + info := C.struct_procsinfo64{} + cpid := C.pid_t(p.pid) + + num, err := C.getprocs(unsafe.Pointer(&info), C.sizeof_struct_procsinfo64, nil, 0, &cpid, 1) + if num != 1 { + err = syscall.ESRCH + } + if err != nil { + return types.MemoryInfo{}, fmt.Errorf("error while calling getprocs: %w", err) + } + + mem.Resident = uint64(info.pi_drss+info.pi_trss) * pagesize + mem.Virtual = uint64(info.pi_dvm) * pagesize + + return mem, nil +} + +// CPUTime returns the current CPU usage of a process. +func (p *process) CPUTime() (types.CPUTimes, error) { + var pstatus pstatus + if err := p.decodeProcfsFile("status", &pstatus); err != nil { + return types.CPUTimes{}, err + } + return types.CPUTimes{ + User: time.Duration(pstatus.Utime.Sec*1e9 + int64(pstatus.Utime.Nsec)), + System: time.Duration(pstatus.Stime.Sec*1e9 + int64(pstatus.Stime.Nsec)), + }, nil +} + +func (p *process) decodeProcfsFile(name string, data interface{}) error { + fileName := "/proc/" + strconv.Itoa(p.pid) + "/" + name + + file, err := os.Open(fileName) + if err != nil { + return fmt.Errorf("error while opening %s: %w", fileName, err) + } + defer file.Close() + + if err := binary.Read(file, binary.BigEndian, data); err != nil { + return fmt.Errorf("error while decoding %s: %w", fileName, err) + } + + return nil +} + +func chop(buf []byte) []byte { + return buf[0 : len(buf)-1] +} diff --git a/agent/pkg/go-sysinfo/providers/aix/testdata/utmp b/agent/pkg/go-sysinfo/providers/aix/testdata/utmp new file mode 100644 index 0000000..9c65e24 Binary files /dev/null and b/agent/pkg/go-sysinfo/providers/aix/testdata/utmp differ diff --git a/agent/pkg/go-sysinfo/providers/aix/ztypes_aix_ppc64.go b/agent/pkg/go-sysinfo/providers/aix/ztypes_aix_ppc64.go new file mode 100644 index 0000000..0e369bb --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/aix/ztypes_aix_ppc64.go @@ -0,0 +1,158 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_aix.go + +//go:build aix && ppc64 + +package aix + +type prcred struct { + Euid uint64 + Ruid uint64 + Suid uint64 + Egid uint64 + Rgid uint64 + Sgid uint64 + X_pad [8]uint64 + X_pad1 uint32 + Ngroups uint32 + Groups [1]uint64 +} + +type pstatus struct { + Flag uint32 + Flag2 uint32 + Flags uint32 + Nlwp uint32 + Stat uint8 + Dmodel uint8 + X_pad1 [6]uint8 + Sigpend prSigset + Brkbase uint64 + Brksize uint64 + Stkbase uint64 + Stksize uint64 + Pid uint64 + Ppid uint64 + Pgid uint64 + Sid uint64 + Utime prTimestruc64 + Stime prTimestruc64 + Cutime prTimestruc64 + Cstime prTimestruc64 + Sigtrace prSigset + Flttrace fltset + Sysentry_offset uint32 + Sysexit_offset uint32 + X_pad [8]uint64 + Lwp lwpstatus +} + +type prTimestruc64 struct { + Sec int64 + Nsec int32 + X__pad uint32 +} + +type prSigset struct { + Set [4]uint64 +} + +type fltset struct { + Set [4]uint64 +} + +type lwpstatus struct { + Lwpid uint64 + Flags uint32 + X_pad1 [1]uint8 + State uint8 + Cursig uint16 + Why uint16 + What uint16 + Policy uint32 + Clname [8]uint8 + Lwppend prSigset + Lwphold prSigset + Info prSiginfo64 + Altstack prStack64 + Action prSigaction64 + X_pad2 uint32 + Syscall uint16 + Nsysarg uint16 + Sysarg [8]uint64 + Errno int32 + Ptid uint32 + X_pad [9]uint64 + Reg prgregset + Fpreg prfpregset + Family pfamily +} + +type prSiginfo64 struct { + Signo int32 + Errno int32 + Code int32 + Imm int32 + Status int32 + X__pad1 uint32 + Uid uint64 + Pid uint64 + Addr uint64 + Band int64 + Value [8]byte + X__pad [4]uint32 +} + +type prStack64 struct { + Sp uint64 + Size uint64 + Flags int32 + X__pad [5]int32 +} + +type prSigaction64 struct { + Union [8]byte + Mask prSigset + Flags int32 + X__pad [5]int32 +} + +type prgregset struct { + X__iar uint64 + X__msr uint64 + X__cr uint64 + X__lr uint64 + X__ctr uint64 + X__xer uint64 + X__fpscr uint64 + X__fpscrx uint64 + X__gpr [32]uint64 + X__pad1 [8]uint64 +} + +type prfpregset struct { + X__fpr [32]float64 +} + +type pfamily struct { + Extoff uint64 + Extsize uint64 + Pad [14]uint64 +} diff --git a/agent/pkg/go-sysinfo/providers/darwin/arch_darwin.go b/agent/pkg/go-sysinfo/providers/darwin/arch_darwin.go new file mode 100644 index 0000000..8b3ed91 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/darwin/arch_darwin.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build amd64 || arm64 + +package darwin + +import ( + "fmt" + + "golang.org/x/sys/unix" +) + +const hardwareMIB = "hw.machine" + +func Architecture() (string, error) { + arch, err := unix.Sysctl(hardwareMIB) + if err != nil { + return "", fmt.Errorf("failed to get architecture: %w", err) + } + + return arch, nil +} diff --git a/agent/pkg/go-sysinfo/providers/darwin/arch_darwin_test.go b/agent/pkg/go-sysinfo/providers/darwin/arch_darwin_test.go new file mode 100644 index 0000000..01a25a5 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/darwin/arch_darwin_test.go @@ -0,0 +1,30 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package darwin + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestArchitecture(t *testing.T) { + a, err := Architecture() + assert.NoError(t, err) + assert.NotEmpty(t, a) +} diff --git a/agent/pkg/go-sysinfo/providers/darwin/boottime_darwin.go b/agent/pkg/go-sysinfo/providers/darwin/boottime_darwin.go new file mode 100644 index 0000000..1954e2a --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/darwin/boottime_darwin.go @@ -0,0 +1,39 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build amd64 || arm64 + +package darwin + +import ( + "fmt" + "time" + + "golang.org/x/sys/unix" +) + +const kernBoottimeMIB = "kern.boottime" + +func BootTime() (time.Time, error) { + tv, err := unix.SysctlTimeval(kernBoottimeMIB) + if err != nil { + return time.Time{}, fmt.Errorf("failed to get host uptime: %w", err) + } + + bootTime := time.Unix(int64(tv.Sec), int64(tv.Usec)*int64(time.Microsecond)) + return bootTime, nil +} diff --git a/agent/pkg/go-sysinfo/providers/darwin/boottime_darwin_test.go b/agent/pkg/go-sysinfo/providers/darwin/boottime_darwin_test.go new file mode 100644 index 0000000..5ac93a0 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/darwin/boottime_darwin_test.go @@ -0,0 +1,32 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package darwin + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func TestBootTime(t *testing.T) { + bt, err := BootTime() + assert.NoError(t, err) + + t.Logf(bt.Format(time.RFC1123)) +} diff --git a/agent/pkg/go-sysinfo/providers/darwin/defs_darwin.go b/agent/pkg/go-sysinfo/providers/darwin/defs_darwin.go new file mode 100644 index 0000000..899c217 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/darwin/defs_darwin.go @@ -0,0 +1,69 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build ignore + +package darwin + +/* +#include +#include +*/ +import "C" + +type processState uint32 + +const ( + stateSIDL processState = iota + 1 + stateRun + stateSleep + stateStop + stateZombie +) + +const argMax = C.ARG_MAX + +type bsdInfo C.struct_proc_bsdinfo + +type procTaskInfo C.struct_proc_taskinfo + +type procTaskAllInfo C.struct_proc_taskallinfo + +type vinfoStat C.struct_vinfo_stat + +type fsid C.struct_fsid + +type vnodeInfo C.struct_vnode_info + +type vnodeInfoPath C.struct_vnode_info_path + +type procVnodePathInfo C.struct_proc_vnodepathinfo + +type vmStatisticsData C.vm_statistics_data_t + +type vmStatistics64Data C.vm_statistics64_data_t + +type vmSize C.vm_size_t + +const ( + cpuStateUser = C.CPU_STATE_USER + cpuStateSystem = C.CPU_STATE_SYSTEM + cpuStateIdle = C.CPU_STATE_IDLE + cpuStateNice = C.CPU_STATE_NICE +) + +type hostCPULoadInfo C.host_cpu_load_info_data_t diff --git a/agent/pkg/go-sysinfo/providers/darwin/doc.go b/agent/pkg/go-sysinfo/providers/darwin/doc.go new file mode 100644 index 0000000..20e80f0 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/darwin/doc.go @@ -0,0 +1,20 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Package darwin implements the HostProvider and ProcessProvider interfaces +// for providing information about MacOS. +package darwin diff --git a/agent/pkg/go-sysinfo/providers/darwin/host_darwin.go b/agent/pkg/go-sysinfo/providers/darwin/host_darwin.go new file mode 100644 index 0000000..41275cc --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/darwin/host_darwin.go @@ -0,0 +1,255 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build amd64 || arm64 + +package darwin + +import ( + "errors" + "fmt" + "os" + "strings" + "time" + + "github.com/joeshaw/multierror" + + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/go-sysinfo/internal/registry" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/go-sysinfo/providers/shared" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/go-sysinfo/types" +) + +func init() { + registry.Register(darwinSystem{}) +} + +type darwinSystem struct{} + +func (s darwinSystem) Host() (types.Host, error) { + return newHost() +} + +type host struct { + info types.HostInfo +} + +func (h *host) Info() types.HostInfo { + return h.info +} + +func (h *host) CPUTime() (types.CPUTimes, error) { + cpu, err := getHostCPULoadInfo() + if err != nil { + return types.CPUTimes{}, fmt.Errorf("failed to get host CPU usage: %w", err) + } + + ticksPerSecond := time.Duration(getClockTicks()) + + return types.CPUTimes{ + User: time.Duration(cpu.User) * time.Second / ticksPerSecond, + System: time.Duration(cpu.System) * time.Second / ticksPerSecond, + Idle: time.Duration(cpu.Idle) * time.Second / ticksPerSecond, + Nice: time.Duration(cpu.Nice) * time.Second / ticksPerSecond, + }, nil +} + +func (h *host) Memory() (*types.HostMemoryInfo, error) { + var mem types.HostMemoryInfo + + // Total physical memory. + total, err := MemTotal() + if err != nil { + return nil, fmt.Errorf("failed to get total physical memory: %w", err) + } + + mem.Total = total + + // Page size for computing byte totals. + pageSizeBytes, err := getPageSize() + if err != nil { + return nil, fmt.Errorf("failed to get page size: %w", err) + } + + // Swap + swap, err := getSwapUsage() + if err != nil { + return nil, fmt.Errorf("failed to get swap usage: %w", err) + } + + mem.VirtualTotal = swap.Total + mem.VirtualUsed = swap.Used + mem.VirtualFree = swap.Available + + // Virtual Memory Statistics + vmStat, err := getHostVMInfo64() + if errors.Is(err, types.ErrNotImplemented) { + return &mem, nil + } + + if err != nil { + return nil, fmt.Errorf("failed to get virtual memory statistics: %w", err) + } + + inactiveBytes := uint64(vmStat.Inactive_count) * pageSizeBytes + purgeableBytes := uint64(vmStat.Purgeable_count) * pageSizeBytes + mem.Metrics = map[string]uint64{ + "active_bytes": uint64(vmStat.Active_count) * pageSizeBytes, + "compressed_bytes": uint64(vmStat.Compressor_page_count) * pageSizeBytes, + "compressions_bytes": uint64(vmStat.Compressions) * pageSizeBytes, // Cumulative compressions. + "copy_on_write_faults": vmStat.Cow_faults, + "decompressions_bytes": uint64(vmStat.Decompressions) * pageSizeBytes, // Cumulative decompressions. + "external_bytes": uint64(vmStat.External_page_count) * pageSizeBytes, // File Cache / File-backed pages + "inactive_bytes": inactiveBytes, + "internal_bytes": uint64(vmStat.Internal_page_count) * pageSizeBytes, // App Memory / Anonymous + "page_ins_bytes": uint64(vmStat.Pageins) * pageSizeBytes, + "page_outs_bytes": uint64(vmStat.Pageouts) * pageSizeBytes, + "purgeable_bytes": purgeableBytes, + "purged_bytes": uint64(vmStat.Purges) * pageSizeBytes, + "reactivated_bytes": uint64(vmStat.Reactivations) * pageSizeBytes, + "speculative_bytes": uint64(vmStat.Speculative_count) * pageSizeBytes, + "swap_ins_bytes": uint64(vmStat.Swapins) * pageSizeBytes, + "swap_outs_bytes": uint64(vmStat.Swapouts) * pageSizeBytes, + "throttled_bytes": uint64(vmStat.Throttled_count) * pageSizeBytes, + "translation_faults": vmStat.Faults, + "uncompressed_bytes": uint64(vmStat.Total_uncompressed_pages_in_compressor) * pageSizeBytes, + "wired_bytes": uint64(vmStat.Wire_count) * pageSizeBytes, + "zero_filled_bytes": uint64(vmStat.Zero_fill_count) * pageSizeBytes, + } + + // From Activity Monitor: Memory Used = App Memory (internal) + Wired + Compressed + // https://support.apple.com/en-us/HT201538 + mem.Used = uint64(vmStat.Internal_page_count+vmStat.Wire_count+vmStat.Compressor_page_count) * pageSizeBytes + mem.Free = uint64(vmStat.Free_count) * pageSizeBytes + mem.Available = mem.Free + inactiveBytes + purgeableBytes + + return &mem, nil +} + +func (h *host) FQDN() (string, error) { + return shared.FQDN() +} + +func (h *host) LoadAverage() (*types.LoadAverageInfo, error) { + load, err := getLoadAverage() + if err != nil { + return nil, fmt.Errorf("failed to get loadavg: %w", err) + } + + scale := float64(load.scale) + + return &types.LoadAverageInfo{ + One: float64(load.load[0]) / scale, + Five: float64(load.load[1]) / scale, + Fifteen: float64(load.load[2]) / scale, + }, nil +} + +func newHost() (*host, error) { + h := &host{} + r := &reader{} + r.architecture(h) + r.bootTime(h) + r.hostname(h) + r.network(h) + r.kernelVersion(h) + r.os(h) + r.time(h) + r.uniqueID(h) + return h, r.Err() +} + +type reader struct { + errs []error +} + +func (r *reader) addErr(err error) bool { + if err != nil { + if !errors.Is(err, types.ErrNotImplemented) { + r.errs = append(r.errs, err) + } + return true + } + return false +} + +func (r *reader) Err() error { + if len(r.errs) > 0 { + return &multierror.MultiError{Errors: r.errs} + } + return nil +} + +func (r *reader) architecture(h *host) { + v, err := Architecture() + if r.addErr(err) { + return + } + h.info.Architecture = v +} + +func (r *reader) bootTime(h *host) { + v, err := BootTime() + if r.addErr(err) { + return + } + h.info.BootTime = v +} + +func (r *reader) hostname(h *host) { + v, err := os.Hostname() + if r.addErr(err) { + return + } + h.info.Hostname = strings.ToLower(v) +} + +func (r *reader) network(h *host) { + ips, macs, err := shared.Network() + if r.addErr(err) { + return + } + h.info.IPs = ips + h.info.MACs = macs +} + +func (r *reader) kernelVersion(h *host) { + v, err := KernelVersion() + if r.addErr(err) { + return + } + h.info.KernelVersion = v +} + +func (r *reader) os(h *host) { + v, err := OperatingSystem() + if r.addErr(err) { + return + } + h.info.OS = v +} + +func (r *reader) time(h *host) { + h.info.Timezone, h.info.TimezoneOffsetSec = time.Now().Zone() +} + +func (r *reader) uniqueID(h *host) { + v, err := MachineID() + if r.addErr(err) { + return + } + h.info.UniqueID = v +} diff --git a/agent/pkg/go-sysinfo/providers/darwin/host_darwin_test.go b/agent/pkg/go-sysinfo/providers/darwin/host_darwin_test.go new file mode 100644 index 0000000..8330d51 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/darwin/host_darwin_test.go @@ -0,0 +1,40 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build amd64 || arm64 + +package darwin + +import ( + "encoding/json" + "testing" + + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/go-sysinfo/internal/registry" +) + +var _ registry.HostProvider = darwinSystem{} + +func TestHost(t *testing.T) { + host, err := darwinSystem{}.Host() + if err != nil { + t.Logf("could not get all host info: %v\n", err) + } + + info := host.Info() + data, _ := json.MarshalIndent(info, "", " ") + t.Logf(string(data)) +} diff --git a/agent/pkg/go-sysinfo/providers/darwin/kernel_darwin.go b/agent/pkg/go-sysinfo/providers/darwin/kernel_darwin.go new file mode 100644 index 0000000..7246257 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/darwin/kernel_darwin.go @@ -0,0 +1,36 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build !386 + +package darwin + +import ( + "fmt" + "syscall" +) + +const kernelReleaseMIB = "kern.osrelease" + +func KernelVersion() (string, error) { + version, err := syscall.Sysctl(kernelReleaseMIB) + if err != nil { + return "", fmt.Errorf("failed to get kernel version: %w", err) + } + + return version, nil +} diff --git a/agent/pkg/go-sysinfo/providers/darwin/load_average_darwin.go b/agent/pkg/go-sysinfo/providers/darwin/load_average_darwin.go new file mode 100644 index 0000000..34f3a34 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/darwin/load_average_darwin.go @@ -0,0 +1,44 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build amd64 || arm64 + +package darwin + +import ( + "unsafe" + + "golang.org/x/sys/unix" +) + +const loadAverage = "vm.loadavg" + +type loadAvg struct { + load [3]uint32 + scale int +} + +func getLoadAverage() (*loadAvg, error) { + data, err := unix.SysctlRaw(loadAverage) + if err != nil { + return nil, err + } + + load := *(*loadAvg)(unsafe.Pointer((&data[0]))) + + return &load, nil +} diff --git a/agent/pkg/go-sysinfo/providers/darwin/load_average_darwin_test.go b/agent/pkg/go-sysinfo/providers/darwin/load_average_darwin_test.go new file mode 100644 index 0000000..cad8ffa --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/darwin/load_average_darwin_test.go @@ -0,0 +1,30 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package darwin + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestGetLoadAverage(t *testing.T) { + a, err := getLoadAverage() + assert.NoError(t, err) + assert.NotEmpty(t, a) +} diff --git a/agent/pkg/go-sysinfo/providers/darwin/machineid_darwin.go b/agent/pkg/go-sysinfo/providers/darwin/machineid_darwin.go new file mode 100644 index 0000000..4339366 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/darwin/machineid_darwin.go @@ -0,0 +1,60 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build (amd64 && cgo) || (arm64 && cgo) + +package darwin + +// #include +// #include +import "C" + +import ( + "fmt" + "unsafe" +) + +// MachineID returns the Hardware UUID also accessible via +// About this Mac -> System Report and as the field +// IOPlatformUUID in the output of "ioreg -d2 -c IOPlatformExpertDevice". +func MachineID() (string, error) { + return getHostUUID() +} + +func getHostUUID() (string, error) { + var uuidC C.uuid_t + var id [unsafe.Sizeof(uuidC)]C.uchar + wait := C.struct_timespec{5, 0} // 5 seconds + + ret, err := C.gethostuuid(&id[0], &wait) + if ret != 0 { + if err != nil { + return "", fmt.Errorf("gethostuuid failed with %v: %w", ret, err) + } + + return "", fmt.Errorf("gethostuuid failed with %v", ret) + } + + var uuidStringC C.uuid_string_t + var uuid [unsafe.Sizeof(uuidStringC)]C.char + _, err = C.uuid_unparse_upper(&id[0], &uuid[0]) + if err != nil { + return "", fmt.Errorf("uuid_unparse_upper failed: %w", err) + } + + return C.GoString(&uuid[0]), nil +} diff --git a/agent/pkg/go-sysinfo/providers/darwin/machineid_nocgo_darwin.go b/agent/pkg/go-sysinfo/providers/darwin/machineid_nocgo_darwin.go new file mode 100644 index 0000000..cd21179 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/darwin/machineid_nocgo_darwin.go @@ -0,0 +1,30 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build (amd64 && !cgo) || (arm64 && !cgo) + +package darwin + +import ( + "fmt" + + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/go-sysinfo/types" +) + +func MachineID() (string, error) { + return "", fmt.Errorf("machineid requires cgo: %w", types.ErrNotImplemented) +} diff --git a/agent/pkg/go-sysinfo/providers/darwin/memory_darwin.go b/agent/pkg/go-sysinfo/providers/darwin/memory_darwin.go new file mode 100644 index 0000000..73dd7cf --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/darwin/memory_darwin.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build amd64 || arm64 + +package darwin + +import ( + "fmt" + + "golang.org/x/sys/unix" +) + +const hwMemsizeMIB = "hw.memsize" + +func MemTotal() (uint64, error) { + size, err := unix.SysctlUint64(hwMemsizeMIB) + if err != nil { + return 0, fmt.Errorf("failed to get mem total: %w", err) + } + + return size, nil +} diff --git a/agent/pkg/go-sysinfo/providers/darwin/memory_darwin_test.go b/agent/pkg/go-sysinfo/providers/darwin/memory_darwin_test.go new file mode 100644 index 0000000..bcb1495 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/darwin/memory_darwin_test.go @@ -0,0 +1,30 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package darwin + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestMemory(t *testing.T) { + m, err := MemTotal() + assert.NoError(t, err) + assert.NotZero(t, m) +} diff --git a/agent/pkg/go-sysinfo/providers/darwin/os.go b/agent/pkg/go-sysinfo/providers/darwin/os.go new file mode 100644 index 0000000..f959af9 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/darwin/os.go @@ -0,0 +1,94 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package darwin + +import ( + "fmt" + "io/ioutil" + "strconv" + "strings" + + "howett.net/plist" + + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/go-sysinfo/types" +) + +const ( + systemVersionPlist = "/System/Library/CoreServices/SystemVersion.plist" + + plistProductName = "ProductName" + plistProductVersion = "ProductVersion" + plistProductBuildVersion = "ProductBuildVersion" +) + +func OperatingSystem() (*types.OSInfo, error) { + data, err := ioutil.ReadFile(systemVersionPlist) + if err != nil { + return nil, fmt.Errorf("failed to read plist file: %w", err) + } + + return getOSInfo(data) +} + +func getOSInfo(data []byte) (*types.OSInfo, error) { + attrs := map[string]string{} + if _, err := plist.Unmarshal(data, &attrs); err != nil { + return nil, fmt.Errorf("failed to unmarshal plist data: %w", err) + } + + productName, found := attrs[plistProductName] + if !found { + return nil, fmt.Errorf("plist key %v not found", plistProductName) + } + + version, found := attrs[plistProductVersion] + if !found { + return nil, fmt.Errorf("plist key %v not found", plistProductVersion) + } + + build, found := attrs[plistProductBuildVersion] + if !found { + return nil, fmt.Errorf("plist key %v not found", plistProductBuildVersion) + } + + var major, minor, patch int + for i, v := range strings.SplitN(version, ".", 3) { + switch i { + case 0: + major, _ = strconv.Atoi(v) + case 1: + minor, _ = strconv.Atoi(v) + case 2: + patch, _ = strconv.Atoi(v) + default: + break + } + } + + return &types.OSInfo{ + Type: "macos", + Family: "darwin", + Platform: "darwin", + Name: productName, + Version: version, + Major: major, + Minor: minor, + Patch: patch, + Build: build, + }, nil +} diff --git a/agent/pkg/go-sysinfo/providers/darwin/os_test.go b/agent/pkg/go-sysinfo/providers/darwin/os_test.go new file mode 100644 index 0000000..027585b --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/darwin/os_test.go @@ -0,0 +1,59 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package darwin + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +const SystemVersionPlist = ` + + + + ProductBuildVersion + 16G1114 + ProductCopyright + 1983-2017 Apple Inc. + ProductName + Mac OS X + ProductUserVisibleVersion + 10.12.6 + ProductVersion + 10.12.6 + + +` + +func TestOperatingSystem(t *testing.T) { + osInfo, err := getOSInfo([]byte(SystemVersionPlist)) + if err != nil { + t.Fatal(err) + } + + assert.Equal(t, "macos", osInfo.Type) + assert.Equal(t, "darwin", osInfo.Family) + assert.Equal(t, "darwin", osInfo.Platform) + assert.Equal(t, "Mac OS X", osInfo.Name) + assert.Equal(t, "10.12.6", osInfo.Version) + assert.Equal(t, 10, osInfo.Major) + assert.Equal(t, 12, osInfo.Minor) + assert.Equal(t, 6, osInfo.Patch) + assert.Equal(t, "16G1114", osInfo.Build) +} diff --git a/agent/pkg/go-sysinfo/providers/darwin/process_cgo_darwin.go b/agent/pkg/go-sysinfo/providers/darwin/process_cgo_darwin.go new file mode 100644 index 0000000..094f950 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/darwin/process_cgo_darwin.go @@ -0,0 +1,58 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build (amd64 && cgo) || (arm64 && cgo) + +package darwin + +// #cgo LDFLAGS:-lproc +// #include +// #include +import "C" + +import ( + "errors" + "unsafe" +) + +//go:generate sh -c "go tool cgo -godefs defs_darwin.go > ztypes_darwin.go" + +func getProcTaskAllInfo(pid int, info *procTaskAllInfo) error { + size := C.int(unsafe.Sizeof(*info)) + ptr := unsafe.Pointer(info) + + n, err := C.proc_pidinfo(C.int(pid), C.PROC_PIDTASKALLINFO, 0, ptr, size) + if err != nil { + return err + } else if n != size { + return errors.New("failed to read process info with proc_pidinfo") + } + + return nil +} + +func getProcVnodePathInfo(pid int, info *procVnodePathInfo) error { + size := C.int(unsafe.Sizeof(*info)) + ptr := unsafe.Pointer(info) + + n := C.proc_pidinfo(C.int(pid), C.PROC_PIDVNODEPATHINFO, 0, ptr, size) + if n != size { + return errors.New("failed to read vnode info with proc_pidinfo") + } + + return nil +} diff --git a/agent/pkg/go-sysinfo/providers/darwin/process_darwin.go b/agent/pkg/go-sysinfo/providers/darwin/process_darwin.go new file mode 100644 index 0000000..a9f98a0 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/darwin/process_darwin.go @@ -0,0 +1,254 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build amd64 || arm64 + +package darwin + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "os" + "strconv" + "strings" + "syscall" + "time" + + "golang.org/x/sys/unix" + + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/go-sysinfo/types" +) + +var errInvalidProcargs2Data = errors.New("invalid kern.procargs2 data") + +func (s darwinSystem) Processes() ([]types.Process, error) { + ps, err := unix.SysctlKinfoProcSlice("kern.proc.all") + if err != nil { + return nil, fmt.Errorf("failed to read process table: %w", err) + } + + processes := make([]types.Process, 0, len(ps)) + for _, kp := range ps { + pid := kp.Proc.P_pid + if pid == 0 { + continue + } + + processes = append(processes, &process{ + pid: int(pid), + }) + } + + return processes, nil +} + +func (s darwinSystem) Process(pid int) (types.Process, error) { + p := process{pid: pid} + + return &p, nil +} + +func (s darwinSystem) Self() (types.Process, error) { + return s.Process(os.Getpid()) +} + +type process struct { + info *types.ProcessInfo + pid int + cwd string + exe string + args []string + env map[string]string +} + +func (p *process) PID() int { + return p.pid +} + +func (p *process) Parent() (types.Process, error) { + info, err := p.Info() + if err != nil { + return nil, err + } + + return &process{pid: info.PPID}, nil +} + +func (p *process) Info() (types.ProcessInfo, error) { + if p.info != nil { + return *p.info, nil + } + + var task procTaskAllInfo + if err := getProcTaskAllInfo(p.pid, &task); err != nil && err != types.ErrNotImplemented { + return types.ProcessInfo{}, err + } + + var vnode procVnodePathInfo + if err := getProcVnodePathInfo(p.pid, &vnode); err != nil && err != types.ErrNotImplemented { + return types.ProcessInfo{}, err + } + + if err := kern_procargs(p.pid, p); err != nil { + return types.ProcessInfo{}, err + } + + p.info = &types.ProcessInfo{ + Name: int8SliceToString(task.Pbsd.Pbi_name[:]), + PID: p.pid, + PPID: int(task.Pbsd.Pbi_ppid), + CWD: int8SliceToString(vnode.Cdir.Path[:]), + Exe: p.exe, + Args: p.args, + StartTime: time.Unix(int64(task.Pbsd.Pbi_start_tvsec), + int64(task.Pbsd.Pbi_start_tvusec)*int64(time.Microsecond)), + } + + return *p.info, nil +} + +func (p *process) User() (types.UserInfo, error) { + kproc, err := unix.SysctlKinfoProc("kern.proc.pid", p.pid) + if err != nil { + return types.UserInfo{}, err + } + + egid := "" + if len(kproc.Eproc.Ucred.Groups) > 0 { + egid = strconv.Itoa(int(kproc.Eproc.Ucred.Groups[0])) + } + + return types.UserInfo{ + UID: strconv.Itoa(int(kproc.Eproc.Pcred.P_ruid)), + EUID: strconv.Itoa(int(kproc.Eproc.Ucred.Uid)), + SUID: strconv.Itoa(int(kproc.Eproc.Pcred.P_svuid)), + GID: strconv.Itoa(int(kproc.Eproc.Pcred.P_rgid)), + SGID: strconv.Itoa(int(kproc.Eproc.Pcred.P_svgid)), + EGID: egid, + }, nil +} + +func (p *process) Environment() (map[string]string, error) { + return p.env, nil +} + +func (p *process) CPUTime() (types.CPUTimes, error) { + var task procTaskAllInfo + if err := getProcTaskAllInfo(p.pid, &task); err != nil { + return types.CPUTimes{}, err + } + return types.CPUTimes{ + User: time.Duration(task.Ptinfo.Total_user), + System: time.Duration(task.Ptinfo.Total_system), + }, nil +} + +func (p *process) Memory() (types.MemoryInfo, error) { + var task procTaskAllInfo + if err := getProcTaskAllInfo(p.pid, &task); err != nil { + return types.MemoryInfo{}, err + } + return types.MemoryInfo{ + Virtual: task.Ptinfo.Virtual_size, + Resident: task.Ptinfo.Resident_size, + Metrics: map[string]uint64{ + "page_ins": uint64(task.Ptinfo.Pageins), + "page_faults": uint64(task.Ptinfo.Faults), + }, + }, nil +} + +// wrapper around sysctl KERN_PROCARGS2 +// callbacks params are optional, +// up to the caller as to which pieces of data they want +func kern_procargs(pid int, p *process) error { + data, err := unix.SysctlRaw("kern.procargs2", pid) + if err != nil { + if errors.Is(err, syscall.EINVAL) { + // sysctl returns "invalid argument" for both "no such process" + // and "operation not permitted" errors. + return fmt.Errorf("no such process or operation not permitted: %w", err) + } + return err + } + + return parseKernProcargs2(data, p) +} + +func parseKernProcargs2(data []byte, p *process) error { + // argc + if len(data) < 4 { + return errInvalidProcargs2Data + } + argc := binary.LittleEndian.Uint32(data) + data = data[4:] + + // exe + lines := strings.Split(string(data), "\x00") + p.exe = lines[0] + lines = lines[1:] + + // Skip nulls that may be appended after the exe. + for len(lines) > 0 { + if lines[0] != "" { + break + } + lines = lines[1:] + } + + // argv + if c := min(argc, uint32(len(lines))); c > 0 { + p.args = lines[:c] + lines = lines[c:] + } + + // env vars + env := make(map[string]string, len(lines)) + for _, l := range lines { + if len(l) == 0 { + break + } + + key, val, _ := strings.Cut(l, "=") + env[key] = val + } + p.env = env + + return nil +} + +func int8SliceToString(s []int8) string { + buf := bytes.NewBuffer(make([]byte, len(s))) + buf.Reset() + + for _, b := range s { + if b == 0 { + break + } + buf.WriteByte(byte(b)) + } + return buf.String() +} + +func min(a, b uint32) uint32 { + if a < b { + return a + } + return b +} diff --git a/agent/pkg/go-sysinfo/providers/darwin/process_darwin_test.go b/agent/pkg/go-sysinfo/providers/darwin/process_darwin_test.go new file mode 100644 index 0000000..4ed52d5 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/darwin/process_darwin_test.go @@ -0,0 +1,240 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build amd64 || arm64 + +package darwin + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "os" + "os/exec" + "syscall" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/go-sysinfo/internal/registry" +) + +var ( + _ registry.HostProvider = darwinSystem{} + _ registry.ProcessProvider = darwinSystem{} +) + +func TestKernProcInfo(t *testing.T) { + var p process + if err := kern_procargs(os.Getpid(), &p); err != nil { + t.Fatal(err) + } + + exe, err := os.Executable() + if err != nil { + t.Fatal(err) + } + assert.Equal(t, exe, p.exe) + assert.Equal(t, os.Args, p.args) +} + +const ( + noValueEnvVar = "_GO_SYSINFO_NO_VALUE" + emptyValueEnvVar = "_GO_SYSINFO_EMPTY_VALUE" + fooValueEnvVar = "_GO_SYSINFO_FOO_VALUE" +) + +func TestProcessEnvironment(t *testing.T) { + cmd := exec.Command("go", "test", "-v", "-run", "^TestProcessEnvironmentInternal$") + cmd.Env = os.Environ() + cmd.Env = append(cmd.Env, + // Activate the test case. + "GO_SYSINFO_ENV_TESTING=1", + // Set specific values that the test asserts. + noValueEnvVar, + emptyValueEnvVar+"=", + fooValueEnvVar+"=FOO", + ) + + out, err := cmd.CombinedOutput() + require.NoError(t, err, "TestProcessEnvironmentInternal failed:\n"+string(out)) +} + +func TestProcessEnvironmentInternal(t *testing.T) { + // This test case is executes in its own process space with a specific + // environment set by TestProcessEnvironment. + if os.Getenv("GO_SYSINFO_ENV_TESTING") != "1" { + t.Skip() + } + + var p process + if err := kern_procargs(os.Getpid(), &p); err != nil { + t.Fatal(err) + } + + value, exists := p.env[noValueEnvVar] + assert.True(t, exists, "Missing "+noValueEnvVar) + assert.Equal(t, "", value) + + value, exists = p.env[emptyValueEnvVar] + assert.True(t, exists, "Missing "+emptyValueEnvVar) + assert.Equal(t, "", value) + + assert.Equal(t, "FOO", p.env[fooValueEnvVar]) +} + +func TestProcesses(t *testing.T) { + var s darwinSystem + processes, err := s.Processes() + if err != nil { + t.Fatal(err) + } + + var count int + for _, proc := range processes { + processInfo, err := proc.Info() + switch { + // Ignore processes that no longer exist or that cannot be accessed. + case errors.Is(err, syscall.ESRCH), + errors.Is(err, syscall.EPERM), + errors.Is(err, syscall.EINVAL): + continue + case err != nil: + t.Fatalf("failed to get process info for PID=%d: %v", proc.PID(), err) + default: + count++ + } + + if processInfo.PID == 0 { + t.Fatalf("empty pid in %#v", processInfo) + } + + if processInfo.Exe == "" { + t.Fatalf("empty exec in %#v", processInfo) + } + + u, err := proc.User() + require.NoError(t, err) + + require.NotEmpty(t, u.UID) + require.NotEmpty(t, u.EUID) + require.NotEmpty(t, u.SUID) + require.NotEmpty(t, u.GID) + require.NotEmpty(t, u.EGID) + require.NotEmpty(t, u.SGID) + } + + assert.NotZero(t, count, "failed to get process info for any processes") +} + +func TestParseKernProcargs2(t *testing.T) { + testCases := []struct { + data []byte + process process + err error + }{ + {data: nil, err: errInvalidProcargs2Data}, + {data: []byte{}, err: errInvalidProcargs2Data}, + {data: []byte{0xFF, 0xFF, 0xFF, 0xFF}, process: process{env: map[string]string{}}}, + {data: []byte{0, 0, 0, 0}, process: process{env: map[string]string{}}}, + {data: []byte{5, 0, 0, 0}, process: process{env: map[string]string{}}}, + { + data: buildKernProcargs2Data(3, "./example", []string{"/Users/test/example", "--one", "--two"}, []string{"TZ=UTC", "FOO="}), + process: process{ + exe: "./example", + args: []string{"/Users/test/example", "--one", "--two"}, + env: map[string]string{ + "TZ": "UTC", + "FOO": "", + }, + }, + }, + } + + for i, tc := range testCases { + tc := tc + t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { + var p process + err := parseKernProcargs2(tc.data, &p) + if tc.err != nil { + assert.ErrorIs(t, err, tc.err) + } else { + assert.EqualValues(t, tc.process, p) + } + }) + } +} + +func FuzzParseKernProcargs2(f *testing.F) { + f.Add([]byte(nil)) + f.Add([]byte{0, 0, 0, 0}) + f.Add([]byte{10, 0, 0, 0}) + f.Add([]byte{0xFF, 0xFF, 0xFF, 0xFF}) + f.Add(buildKernProcargs2Data(-1, "./foo", []string{"/Users/john/foo", "-c"}, []string{"TZ=UTC"})) + f.Add(buildKernProcargs2Data(2, "./foo", []string{"/Users/john/foo", "-c"}, []string{"TZ=UTC"})) + f.Add(buildKernProcargs2Data(100, "./foo", []string{"/Users/john/foo", "-c"}, []string{"TZ=UTC"})) + + f.Fuzz(func(t *testing.T, b []byte) { + p := &process{} + _ = parseKernProcargs2(b, p) + }) +} + +// buildKernProcargs2Data builds a response that is similar to what +// sysctl kern.procargs2 returns. +func buildKernProcargs2Data(argc int32, exe string, args, envs []string) []byte { + // argc + data := make([]byte, 4) + binary.LittleEndian.PutUint32(data, uint32(argc)) + buf := bytes.NewBuffer(data) + + // exe with optional extra null padding + buf.WriteString(exe) + buf.WriteByte(0) + buf.WriteByte(0) + + // argv + for _, arg := range args { + buf.WriteString(arg) + buf.WriteByte(0) + } + + // env + for _, env := range envs { + buf.WriteString(env) + buf.WriteByte(0) + } + + // The returned buffer from the real kern.procargs2 contains more data than + // what go-sysinfo parses. This is a rough simulation of that extra data. + buf.Write(bytes.Repeat([]byte{0}, 100)) + buf.WriteString("ptr_munge=") + buf.Write(bytes.Repeat([]byte{0}, 18)) + buf.WriteString("main_stack==") + buf.Write(bytes.Repeat([]byte{0}, 43)) + buf.WriteString("executable_file=0x1a01000010,0x36713a1") + buf.WriteString("dyld_file=0x1a01000010,0xfffffff0008839c") + buf.WriteString("executable_cdhash=5ca6024f9cdaa3a9fe515bfad77e1acf0f6b15b6") + buf.WriteString("executable_boothash=a4a5613c07091ef0a221ee75a924341406eab85e") + buf.WriteString("arm64e_abi=os") + buf.WriteString("th_port=") + buf.Write(bytes.Repeat([]byte{0}, 11)) + + return buf.Bytes() +} diff --git a/agent/pkg/go-sysinfo/providers/darwin/process_nocgo_darwin.go b/agent/pkg/go-sysinfo/providers/darwin/process_nocgo_darwin.go new file mode 100644 index 0000000..c74c636 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/darwin/process_nocgo_darwin.go @@ -0,0 +1,30 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build (amd64 && !cgo) || (arm64 && !cgo) + +package darwin + +import "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/go-sysinfo/types" + +func getProcTaskAllInfo(pid int, info *procTaskAllInfo) error { + return types.ErrNotImplemented +} + +func getProcVnodePathInfo(pid int, info *procVnodePathInfo) error { + return types.ErrNotImplemented +} diff --git a/agent/pkg/go-sysinfo/providers/darwin/syscall_cgo_darwin.go b/agent/pkg/go-sysinfo/providers/darwin/syscall_cgo_darwin.go new file mode 100644 index 0000000..ce4ee10 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/darwin/syscall_cgo_darwin.go @@ -0,0 +1,71 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build (amd64 && cgo) || (arm64 && cgo) + +package darwin + +/* +#cgo LDFLAGS:-lproc +#include +#include +#include +#include +*/ +import "C" + +import ( + "fmt" + "unsafe" +) + +func getHostCPULoadInfo() (*cpuUsage, error) { + var count C.mach_msg_type_number_t = C.HOST_CPU_LOAD_INFO_COUNT + var cpu cpuUsage + status := C.host_statistics(C.host_t(C.mach_host_self()), + C.HOST_CPU_LOAD_INFO, + C.host_info_t(unsafe.Pointer(&cpu)), + &count) + + if status != C.KERN_SUCCESS { + return nil, fmt.Errorf("host_statistics returned status %d", status) + } + + return &cpu, nil +} + +// getClockTicks returns the number of click ticks in one jiffie. +func getClockTicks() int { + return int(C.sysconf(C._SC_CLK_TCK)) +} + +func getHostVMInfo64() (*vmStatistics64Data, error) { + var count C.mach_msg_type_number_t = C.HOST_VM_INFO64_COUNT + + var vmStat vmStatistics64Data + status := C.host_statistics64( + C.host_t(C.mach_host_self()), + C.HOST_VM_INFO64, + C.host_info_t(unsafe.Pointer(&vmStat)), + &count) + + if status != C.KERN_SUCCESS { + return nil, fmt.Errorf("host_statistics64 returned status %d", status) + } + + return &vmStat, nil +} diff --git a/agent/pkg/go-sysinfo/providers/darwin/syscall_darwin.go b/agent/pkg/go-sysinfo/providers/darwin/syscall_darwin.go new file mode 100644 index 0000000..fe14050 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/darwin/syscall_darwin.go @@ -0,0 +1,68 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build amd64 || arm64 + +package darwin + +import ( + "bytes" + "encoding/binary" + "fmt" + + "golang.org/x/sys/unix" +) + +type cpuUsage struct { + User uint32 + System uint32 + Idle uint32 + Nice uint32 +} + +func getPageSize() (uint64, error) { + i, err := unix.SysctlUint32("vm.pagesize") + if err != nil { + return 0, fmt.Errorf("vm.pagesize returned %w", err) + } + + return uint64(i), nil +} + +// From sysctl.h - xsw_usage. +type swapUsage struct { + Total uint64 + Available uint64 + Used uint64 + PageSize uint64 +} + +const vmSwapUsageMIB = "vm.swapusage" + +func getSwapUsage() (*swapUsage, error) { + var swap swapUsage + data, err := unix.SysctlRaw(vmSwapUsageMIB) + if err != nil { + return nil, err + } + + if err := binary.Read(bytes.NewReader(data), binary.LittleEndian, &swap); err != nil { + return nil, err + } + + return &swap, nil +} diff --git a/agent/pkg/go-sysinfo/providers/darwin/syscall_nocgo_darwin.go b/agent/pkg/go-sysinfo/providers/darwin/syscall_nocgo_darwin.go new file mode 100644 index 0000000..143ccbe --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/darwin/syscall_nocgo_darwin.go @@ -0,0 +1,39 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build (amd64 && !cgo) || (arm64 && !cgo) + +package darwin + +import ( + "fmt" + + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/go-sysinfo/types" +) + +func getHostCPULoadInfo() (*cpuUsage, error) { + return nil, fmt.Errorf("host cpu load requires cgo: %w", types.ErrNotImplemented) +} + +// getClockTicks returns the number of click ticks in one jiffie. +func getClockTicks() int { + return 0 +} + +func getHostVMInfo64() (*vmStatistics64Data, error) { + return nil, fmt.Errorf("host vm info requires cgo: %w", types.ErrNotImplemented) +} diff --git a/agent/pkg/go-sysinfo/providers/darwin/ztypes_darwin.go b/agent/pkg/go-sysinfo/providers/darwin/ztypes_darwin.go new file mode 100644 index 0000000..4ad6779 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/darwin/ztypes_darwin.go @@ -0,0 +1,187 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_darwin.go + +package darwin + +type processState uint32 + +const ( + stateSIDL processState = iota + 1 + stateRun + stateSleep + stateStop + stateZombie +) + +const argMax = 0x40000 + +type bsdInfo struct { + Pbi_flags uint32 + Pbi_status uint32 + Pbi_xstatus uint32 + Pbi_pid uint32 + Pbi_ppid uint32 + Pbi_uid uint32 + Pbi_gid uint32 + Pbi_ruid uint32 + Pbi_rgid uint32 + Pbi_svuid uint32 + Pbi_svgid uint32 + Rfu_1 uint32 + Pbi_comm [16]int8 + Pbi_name [32]int8 + Pbi_nfiles uint32 + Pbi_pgid uint32 + Pbi_pjobc uint32 + E_tdev uint32 + E_tpgid uint32 + Pbi_nice int32 + Pbi_start_tvsec uint64 + Pbi_start_tvusec uint64 +} + +type procTaskInfo struct { + Virtual_size uint64 + Resident_size uint64 + Total_user uint64 + Total_system uint64 + Threads_user uint64 + Threads_system uint64 + Policy int32 + Faults int32 + Pageins int32 + Cow_faults int32 + Messages_sent int32 + Messages_received int32 + Syscalls_mach int32 + Syscalls_unix int32 + Csw int32 + Threadnum int32 + Numrunning int32 + Priority int32 +} + +type procTaskAllInfo struct { + Pbsd bsdInfo + Ptinfo procTaskInfo +} + +type vinfoStat struct { + Dev uint32 + Mode uint16 + Nlink uint16 + Ino uint64 + Uid uint32 + Gid uint32 + Atime int64 + Atimensec int64 + Mtime int64 + Mtimensec int64 + Ctime int64 + Ctimensec int64 + Birthtime int64 + Birthtimensec int64 + Size int64 + Blocks int64 + Blksize int32 + Flags uint32 + Gen uint32 + Rdev uint32 + Qspare [2]int64 +} + +type fsid struct { + Val [2]int32 +} + +type vnodeInfo struct { + Stat vinfoStat + Type int32 + Pad int32 + Fsid fsid +} + +type vnodeInfoPath struct { + Vi vnodeInfo + Path [1024]int8 +} + +type procVnodePathInfo struct { + Cdir vnodeInfoPath + Rdir vnodeInfoPath +} + +type vmStatisticsData struct { + Free_count uint32 + Active_count uint32 + Inactive_count uint32 + Wire_count uint32 + Zero_fill_count uint32 + Reactivations uint32 + Pageins uint32 + Pageouts uint32 + Faults uint32 + Cow_faults uint32 + Lookups uint32 + Hits uint32 + Purgeable_count uint32 + Purges uint32 + Speculative_count uint32 +} + +type vmStatistics64Data struct { + Free_count uint32 + Active_count uint32 + Inactive_count uint32 + Wire_count uint32 + Zero_fill_count uint64 + Reactivations uint64 + Pageins uint64 + Pageouts uint64 + Faults uint64 + Cow_faults uint64 + Lookups uint64 + Hits uint64 + Purges uint64 + Purgeable_count uint32 + Speculative_count uint32 + Decompressions uint64 + Compressions uint64 + Swapins uint64 + Swapouts uint64 + Compressor_page_count uint32 + Throttled_count uint32 + External_page_count uint32 + Internal_page_count uint32 + Total_uncompressed_pages_in_compressor uint64 +} + +type vmSize uint64 + +const ( + cpuStateUser = 0x0 + cpuStateSystem = 0x1 + cpuStateIdle = 0x2 + cpuStateNice = 0x3 +) + +type hostCPULoadInfo struct { + Ticks [4]uint32 +} diff --git a/agent/pkg/go-sysinfo/providers/linux/arch_linux.go b/agent/pkg/go-sysinfo/providers/linux/arch_linux.go new file mode 100644 index 0000000..e1d2893 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/arch_linux.go @@ -0,0 +1,40 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package linux + +import ( + "fmt" + "syscall" +) + +func Architecture() (string, error) { + var uname syscall.Utsname + if err := syscall.Uname(&uname); err != nil { + return "", fmt.Errorf("architecture: %w", err) + } + + data := make([]byte, 0, len(uname.Machine)) + for _, v := range uname.Machine { + if v == 0 { + break + } + data = append(data, byte(v)) + } + + return string(data), nil +} diff --git a/agent/pkg/go-sysinfo/providers/linux/boottime_linux.go b/agent/pkg/go-sysinfo/providers/linux/boottime_linux.go new file mode 100644 index 0000000..58665a7 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/boottime_linux.go @@ -0,0 +1,47 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package linux + +import ( + "sync" + "time" + + "github.com/prometheus/procfs" +) + +var ( + bootTimeValue time.Time // Cached boot time. + bootTimeLock sync.Mutex // Lock that guards access to bootTime. +) + +func bootTime(fs procfs.FS) (time.Time, error) { + bootTimeLock.Lock() + defer bootTimeLock.Unlock() + + if !bootTimeValue.IsZero() { + return bootTimeValue, nil + } + + stat, err := fs.Stat() + if err != nil { + return time.Time{}, err + } + + bootTimeValue = time.Unix(int64(stat.BootTime), 0) + return bootTimeValue, nil +} diff --git a/agent/pkg/go-sysinfo/providers/linux/capabilities_linux.go b/agent/pkg/go-sysinfo/providers/linux/capabilities_linux.go new file mode 100644 index 0000000..7edf5a7 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/capabilities_linux.go @@ -0,0 +1,122 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package linux + +import ( + "strconv" + + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/go-sysinfo/types" +) + +// capabilityNames is mapping of capability constant values to names. +// +// Generated with: +// +// curl -s https://raw.githubusercontent.com/torvalds/linux/master/include/uapi/linux/capability.h | \ +// grep -P '^#define CAP_\w+\s+\d+' | \ +// perl -pe 's/#define CAP_(\w+)\s+(\d+)/\2: "\L\1",/g' +var capabilityNames = map[int]string{ + 0: "chown", + 1: "dac_override", + 2: "dac_read_search", + 3: "fowner", + 4: "fsetid", + 5: "kill", + 6: "setgid", + 7: "setuid", + 8: "setpcap", + 9: "linux_immutable", + 10: "net_bind_service", + 11: "net_broadcast", + 12: "net_admin", + 13: "net_raw", + 14: "ipc_lock", + 15: "ipc_owner", + 16: "sys_module", + 17: "sys_rawio", + 18: "sys_chroot", + 19: "sys_ptrace", + 20: "sys_pacct", + 21: "sys_admin", + 22: "sys_boot", + 23: "sys_nice", + 24: "sys_resource", + 25: "sys_time", + 26: "sys_tty_config", + 27: "mknod", + 28: "lease", + 29: "audit_write", + 30: "audit_control", + 31: "setfcap", + 32: "mac_override", + 33: "mac_admin", + 34: "syslog", + 35: "wake_alarm", + 36: "block_suspend", + 37: "audit_read", + 38: "perfmon", + 39: "bpf", + 40: "checkpoint_restore", +} + +func capabilityName(num int) string { + name, found := capabilityNames[num] + if found { + return name + } + + return strconv.Itoa(num) +} + +func readCapabilities(content []byte) (*types.CapabilityInfo, error) { + var cap types.CapabilityInfo + + err := parseKeyValue(content, ':', func(key, value []byte) error { + var err error + switch string(key) { + case "CapInh": + cap.Inheritable, err = decodeBitMap(string(value), capabilityName) + if err != nil { + return err + } + case "CapPrm": + cap.Permitted, err = decodeBitMap(string(value), capabilityName) + if err != nil { + return err + } + case "CapEff": + cap.Effective, err = decodeBitMap(string(value), capabilityName) + if err != nil { + return err + } + case "CapBnd": + cap.Bounding, err = decodeBitMap(string(value), capabilityName) + if err != nil { + return err + } + case "CapAmb": + cap.Ambient, err = decodeBitMap(string(value), capabilityName) + if err != nil { + return err + } + } + return nil + }) + + return &cap, err +} diff --git a/agent/pkg/go-sysinfo/providers/linux/container.go b/agent/pkg/go-sysinfo/providers/linux/container.go new file mode 100644 index 0000000..7eee188 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/container.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package linux + +import ( + "bufio" + "bytes" + "fmt" + "io/ioutil" + "os" +) + +const procOneCgroup = "/proc/1/cgroup" + +// IsContainerized returns true if this process is containerized. +func IsContainerized() (bool, error) { + data, err := ioutil.ReadFile(procOneCgroup) + if err != nil { + if os.IsNotExist(err) { + return false, nil + } + + return false, fmt.Errorf("failed to read process cgroups: %w", err) + } + + return isContainerizedCgroup(data) +} + +func isContainerizedCgroup(data []byte) (bool, error) { + s := bufio.NewScanner(bytes.NewReader(data)) + for n := 0; s.Scan(); n++ { + line := s.Bytes() + + // Following a suggestion on Stack Overflow on how to detect + // being inside a container: https://stackoverflow.com/a/20012536/235203 + if bytes.Contains(line, []byte("docker")) || bytes.Contains(line, []byte(".slice")) || bytes.Contains(line, []byte("lxc")) || bytes.Contains(line, []byte("kubepods")) { + return true, nil + } + } + + return false, s.Err() +} diff --git a/agent/pkg/go-sysinfo/providers/linux/container_test.go b/agent/pkg/go-sysinfo/providers/linux/container_test.go new file mode 100644 index 0000000..b1d7658 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/container_test.go @@ -0,0 +1,151 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package linux + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +const nonContainerizedCgroup = `11:freezer:/ +10:pids:/init.scope +9:memory:/init.scope +8:cpuset:/ +7:perf_event:/ +6:hugetlb:/ +5:blkio:/init.scope +4:net_cls,net_prio:/ +3:devices:/init.scope +2:cpu,cpuacct:/init.scope +1:name=systemd:/init.scope +` + +const containerCgroup = `14:name=systemd:/docker/81438f4655cd771c425607dcf7654f4dc03c073c0123edc45fcfad28132e8c60 +13:pids:/docker/81438f4655cd771c425607dcf7654f4dc03c073c0123edc45fcfad28132e8c60 +12:hugetlb:/docker/81438f4655cd771c425607dcf7654f4dc03c073c0123edc45fcfad28132e8c60 +11:net_prio:/docker/81438f4655cd771c425607dcf7654f4dc03c073c0123edc45fcfad28132e8c60 +10:perf_event:/docker/81438f4655cd771c425607dcf7654f4dc03c073c0123edc45fcfad28132e8c60 +9:net_cls:/docker/81438f4655cd771c425607dcf7654f4dc03c073c0123edc45fcfad28132e8c60 +8:freezer:/docker/81438f4655cd771c425607dcf7654f4dc03c073c0123edc45fcfad28132e8c60 +7:devices:/docker/81438f4655cd771c425607dcf7654f4dc03c073c0123edc45fcfad28132e8c60 +6:memory:/docker/81438f4655cd771c425607dcf7654f4dc03c073c0123edc45fcfad28132e8c60 +5:blkio:/docker/81438f4655cd771c425607dcf7654f4dc03c073c0123edc45fcfad28132e8c60 +4:cpuacct:/docker/81438f4655cd771c425607dcf7654f4dc03c073c0123edc45fcfad28132e8c60 +3:cpu:/docker/81438f4655cd771c425607dcf7654f4dc03c073c0123edc45fcfad28132e8c60 +2:cpuset:/docker/81438f4655cd771c425607dcf7654f4dc03c073c0123edc45fcfad28132e8c60 +1:name=openrc:/docker +` + +const containerHostPIDNamespaceCgroup = `14:name=systemd:/ +13:pids:/ +12:hugetlb:/ +11:net_prio:/ +10:perf_event:/ +9:net_cls:/ +8:freezer:/ +7:devices:/ +6:memory:/ +5:blkio:/ +4:cpuacct:/ +3:cpu:/ +2:cpuset:/ +1:name=openrc:/ +` + +const lxcCgroup = `9:hugetlb:/lxc/81438f4655cd771c425607dcf7654f4dc03c073c0123edc45fcfad28132e8c60 +8:perf_event:/lxc/81438f4655cd771c425607dcf7654f4dc03c073c0123edc45fcfad28132e8c60 +7:blkio:/lxc/81438f4655cd771c425607dcf7654f4dc03c073c0123edc45fcfad28132e8c60 +6:freezer:/lxc/81438f4655cd771c425607dcf7654f4dc03c073c0123edc45fcfad28132e8c60 +5:devices:/lxc/81438f4655cd771c425607dcf7654f4dc03c073c0123edc45fcfad28132e8c60 +4:memory:/lxc/81438f4655cd771c425607dcf7654f4dc03c073c0123edc45fcfad28132e8c60 +3:cpuacct:/lxc/81438f4655cd771c425607dcf7654f4dc03c073c0123edc45fcfad28132e8c60 +2:cpu:/lxc/81438f4655cd771c425607dcf7654f4dc03c073c0123edc45fcfad28132e8c60 +1:cpuset:/lxc/81438f4655cd771c425607dcf7654f4dc03c073c0123edc45fcfad28132e8c60` + +const systemdCgroup = `12:hugetlb:/service.slice/podc1281d63_01ab_11ea_ba0a_3cfdfe55a1c0.slice/e2b68f8a6e227921b236c686a243e8ff50f561f493d401da7ac3f8cae28f08b1 +11:perf_event:/service.slice/podc1281d63_01ab_11ea_ba0a_3cfdfe55a1c0.slice/e2b68f8a6e227921b236c686a243e8ff50f561f493d401da7ac3f8cae28f08b1 +10:pids:/service.slice/podc1281d63_01ab_11ea_ba0a_3cfdfe55a1c0.slice/e2b68f8a6e227921b236c686a243e8ff50f561f493d401da7ac3f8cae28f08b1 +9:cpu,cpuacct:/service.slice/podc1281d63_01ab_11ea_ba0a_3cfdfe55a1c0.slice/e2b68f8a6e227921b236c686a243e8ff50f561f493d401da7ac3f8cae28f08b1 +8:cpuset:/service.slice/podc1281d63_01ab_11ea_ba0a_3cfdfe55a1c0.slice/e2b68f8a6e227921b236c686a243e8ff50f561f493d401da7ac3f8cae28f08b1 +7:memory:/service.slice/podc1281d63_01ab_11ea_ba0a_3cfdfe55a1c0.slice/e2b68f8a6e227921b236c686a243e8ff50f561f493d401da7ac3f8cae28f08b1 +6:freezer:/service.slice/podc1281d63_01ab_11ea_ba0a_3cfdfe55a1c0.slice/e2b68f8a6e227921b236c686a243e8ff50f561f493d401da7ac3f8cae28f08b1 +5:rdma:/ +4:net_cls,net_prio:/service.slice/podc1281d63_01ab_11ea_ba0a_3cfdfe55a1c0.slice/e2b68f8a6e227921b236c686a243e8ff50f561f493d401da7ac3f8cae28f08b1 +3:devices:/service.slice/podc1281d63_01ab_11ea_ba0a_3cfdfe55a1c0.slice/e2b68f8a6e227921b236c686a243e8ff50f561f493d401da7ac3f8cae28f08b1 +2:blkio:/service.slice/podc1281d63_01ab_11ea_ba0a_3cfdfe55a1c0.slice/e2b68f8a6e227921b236c686a243e8ff50f561f493d401da7ac3f8cae28f08b1 +1:name=systemd:/service.slice/podc1281d63_01ab_11ea_ba0a_3cfdfe55a1c0.slice/e2b68f8a6e227921b236c686a243e8ff50f561f493d401da7ac3f8cae28f08b1` + +const emptyCgroup = `` + +const kubernetesCgroup = `11:perf_event:/kubepods/burstable/podb83789a8-5f9d-11ea-bae1-0a0084deb344/9f99515d52142271cfeebef269bf4b7609b9b69b62008d6a5d316f561ccf061d +10:freezer:/kubepods/burstable/podb83789a8-5f9d-11ea-bae1-0a0084deb344/9f99515d52142271cfeebef269bf4b7609b9b69b62008d6a5d316f561ccf061d +9:hugetlb:/kubepods/burstable/podb83789a8-5f9d-11ea-bae1-0a0084deb344/9f99515d52142271cfeebef269bf4b7609b9b69b62008d6a5d316f561ccf061d +8:devices:/kubepods/burstable/podb83789a8-5f9d-11ea-bae1-0a0084deb344/9f99515d52142271cfeebef269bf4b7609b9b69b62008d6a5d316f561ccf061d +7:blkio:/kubepods/burstable/podb83789a8-5f9d-11ea-bae1-0a0084deb344/9f99515d52142271cfeebef269bf4b7609b9b69b62008d6a5d316f561ccf061d +6:cpuset:/kubepods/burstable/podb83789a8-5f9d-11ea-bae1-0a0084deb344/9f99515d52142271cfeebef269bf4b7609b9b69b62008d6a5d316f561ccf061d +5:cpu,cpuacct:/kubepods/burstable/podb83789a8-5f9d-11ea-bae1-0a0084deb344/9f99515d52142271cfeebef269bf4b7609b9b69b62008d6a5d316f561ccf061d +4:pids:/kubepods/burstable/podb83789a8-5f9d-11ea-bae1-0a0084deb344/9f99515d52142271cfeebef269bf4b7609b9b69b62008d6a5d316f561ccf061d +3:memory:/kubepods/burstable/podb83789a8-5f9d-11ea-bae1-0a0084deb344/9f99515d52142271cfeebef269bf4b7609b9b69b62008d6a5d316f561ccf061d +2:net_cls,net_prio:/kubepods/burstable/podb83789a8-5f9d-11ea-bae1-0a0084deb344/9f99515d52142271cfeebef269bf4b7609b9b69b62008d6a5d316f561ccf061d +1:name=systemd:/kubepods/burstable/podb83789a8-5f9d-11ea-bae1-0a0084deb344/9f99515d52142271cfeebef269bf4b7609b9b69b62008d6a5d316f561ccf061d +` + +func TestIsContainerized(t *testing.T) { + tests := []struct { + cgroupStr string + containerized bool + }{ + { + cgroupStr: nonContainerizedCgroup, + containerized: false, + }, + { + cgroupStr: containerCgroup, + containerized: true, + }, + { + cgroupStr: containerHostPIDNamespaceCgroup, + containerized: false, + }, + { + cgroupStr: lxcCgroup, + containerized: true, + }, + { + cgroupStr: systemdCgroup, + containerized: true, + }, + { + cgroupStr: emptyCgroup, + containerized: false, + }, + { + cgroupStr: kubernetesCgroup, + containerized: true, + }, + } + + for _, test := range tests { + containerized, err := isContainerizedCgroup([]byte(test.cgroupStr)) + if err != nil { + t.Fatal(err) + } + assert.Equal(t, test.containerized, containerized) + } +} diff --git a/agent/pkg/go-sysinfo/providers/linux/doc.go b/agent/pkg/go-sysinfo/providers/linux/doc.go new file mode 100644 index 0000000..53d3c36 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/doc.go @@ -0,0 +1,20 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Package linux implements the HostProvider and ProcessProvider interfaces +// for providing information about Linux. +package linux diff --git a/agent/pkg/go-sysinfo/providers/linux/host_fqdn_integration_docker_linux_test.go b/agent/pkg/go-sysinfo/providers/linux/host_fqdn_integration_docker_linux_test.go new file mode 100644 index 0000000..cbb25c9 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/host_fqdn_integration_docker_linux_test.go @@ -0,0 +1,54 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build integration && docker + +package linux + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestHost_FQDN_set(t *testing.T) { + host, err := newLinuxSystem("").Host() + if err != nil { + t.Fatal(fmt.Errorf("could not get host information: %w", err)) + } + + gotFQDN, err := host.FQDN() + require.NoError(t, err) + if gotFQDN != wantFQDN { + t.Errorf("got FQDN %q, want: %q", gotFQDN, wantFQDN) + } +} + +func TestHost_FQDN_not_set(t *testing.T) { + host, err := newLinuxSystem("").Host() + if err != nil { + t.Fatal(fmt.Errorf("could not get host information: %w", err)) + } + + gotFQDN, err := host.FQDN() + require.NoError(t, err) + hostname := host.Info().Hostname + if gotFQDN != hostname { + t.Errorf("name and FQDN should be the same but hostname: %s, FQDN %s", hostname, gotFQDN) + } +} diff --git a/agent/pkg/go-sysinfo/providers/linux/host_fqdn_integration_linux_test.go b/agent/pkg/go-sysinfo/providers/linux/host_fqdn_integration_linux_test.go new file mode 100644 index 0000000..2e226fe --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/host_fqdn_integration_linux_test.go @@ -0,0 +1,183 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build integration + +package linux + +import ( + "context" + "fmt" + "io" + "os" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/client" + "github.com/docker/docker/pkg/stdcopy" +) + +const ( + wantHostname = "hostname" + wantDomain = "some.domain" + wantFQDN = wantHostname + "." + wantDomain +) + +func TestHost_FQDN(t *testing.T) { + const envKey = "GO_VERSION" + goversion, ok := os.LookupEnv(envKey) + if !ok { + t.Fatalf("environment variable %s not set, please set a Go version", + envKey) + } + image := "golang:" + goversion + + tcs := []struct { + name string + cf container.Config + }{ + { + name: "TestHost_FQDN_set_hostname+domainname", + cf: container.Config{ + Hostname: wantHostname, + Domainname: wantDomain, + AttachStderr: testing.Verbose(), + AttachStdout: testing.Verbose(), + WorkingDir: "/usr/src/elastic/go-sysinfo", + Image: image, + Cmd: []string{ + "go", "test", "-v", + "-tags", "integration,docker", + "-run", "^TestHost_FQDN_set$", + "./providers/linux", + }, + Tty: false, + }, + }, + { + name: "TestHost_FQDN_set_hostname_only", + cf: container.Config{ + Hostname: wantFQDN, + AttachStderr: testing.Verbose(), + AttachStdout: testing.Verbose(), + WorkingDir: "/usr/src/elastic/go-sysinfo", + Image: image, + Cmd: []string{ + "go", "test", "-v", + "-tags", "integration,docker", + "-run", "^TestHost_FQDN_set$", + "./providers/linux", + }, + Tty: false, + }, + }, + { + name: "TestHost_FQDN_not_set", + cf: container.Config{ + AttachStderr: testing.Verbose(), + AttachStdout: testing.Verbose(), + WorkingDir: "/usr/src/elastic/go-sysinfo", + Image: image, + Cmd: []string{ + "go", "test", "-v", "-count", "1", + "-tags", "integration,docker", + "-run", "^TestHost_FQDN_not_set$", + "./providers/linux", + }, + Tty: false, + }, + }, + } + + cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation()) + if err != nil { + t.Fatalf("failed to create docker client: %v", err) + } + defer cli.Close() + + for _, tc := range tcs { + t.Run(tc.name, func(t *testing.T) { + runOnDocker(t, cli, &tc.cf) + }) + } +} + +func runOnDocker(t *testing.T, cli *client.Client, cf *container.Config) { + ctx := context.Background() + + pwd, err := os.Getwd() + if err != nil { + t.Fatalf("could not get current directory: %v", err) + } + wd := pwd + "../../../" + + reader, err := cli.ImagePull(ctx, cf.Image, types.ImagePullOptions{}) + if err != nil { + t.Fatalf("failed to pull image %s: %v", cf.Image, err) + } + defer reader.Close() + io.Copy(os.Stderr, reader) + + resp, err := cli.ContainerCreate(ctx, cf, &container.HostConfig{ + AutoRemove: false, + Binds: []string{wd + ":/usr/src/elastic/go-sysinfo"}, + }, nil, nil, "") + if err != nil { + t.Fatalf("could not create docker conteiner: %v", err) + } + defer func() { + err = cli.ContainerRemove(ctx, resp.ID, types.ContainerRemoveOptions{ + Force: true, RemoveVolumes: true, + }) + if err != nil { + t.Logf("WARNING: could not remove docker container: %v", err) + } + }() + + if err := cli.ContainerStart(ctx, resp.ID, types.ContainerStartOptions{}); err != nil { + t.Fatalf("could not start docker container: %v", err) + } + + statusCh, errCh := cli.ContainerWait(ctx, resp.ID, container.WaitConditionNotRunning) + select { + case err := <-errCh: + if err != nil { + // Not using fatal as we might be able to recover the container + // logs. + t.Errorf("docker ContainerWait failed: %v", err) + } + case s := <-statusCh: + if s.StatusCode != 0 { + msg := fmt.Sprintf("container exited with status code %d", s.StatusCode) + if s.Error != nil { + msg = fmt.Sprintf("%s: error: %s", msg, s.Error.Message) + } + + // Not using fatal as we might be able to recover the container + // logs. + t.Errorf(msg) + } + } + + out, err := cli.ContainerLogs(ctx, resp.ID, types.ContainerLogsOptions{ShowStderr: true, ShowStdout: true}) + if err != nil { + t.Fatalf("could not get container logs: %v", err) + } + + stdcopy.StdCopy(os.Stdout, os.Stderr, out) +} diff --git a/agent/pkg/go-sysinfo/providers/linux/host_linux.go b/agent/pkg/go-sysinfo/providers/linux/host_linux.go new file mode 100644 index 0000000..41f446b --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/host_linux.go @@ -0,0 +1,264 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package linux + +import ( + "errors" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strings" + "time" + + "github.com/joeshaw/multierror" + "github.com/prometheus/procfs" + + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/go-sysinfo/internal/registry" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/go-sysinfo/providers/shared" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/go-sysinfo/types" +) + +func init() { + registry.Register(newLinuxSystem("")) +} + +type linuxSystem struct { + procFS procFS +} + +func newLinuxSystem(hostFS string) linuxSystem { + mountPoint := filepath.Join(hostFS, procfs.DefaultMountPoint) + fs, _ := procfs.NewFS(mountPoint) + return linuxSystem{ + procFS: procFS{FS: fs, mountPoint: mountPoint}, + } +} + +func (s linuxSystem) Host() (types.Host, error) { + return newHost(s.procFS) +} + +type host struct { + procFS procFS + stat procfs.Stat + info types.HostInfo +} + +func (h *host) Info() types.HostInfo { + return h.info +} + +func (h *host) Memory() (*types.HostMemoryInfo, error) { + content, err := ioutil.ReadFile(h.procFS.path("meminfo")) + if err != nil { + return nil, err + } + + return parseMemInfo(content) +} + +func (h *host) FQDN() (string, error) { + return shared.FQDN() +} + +// VMStat reports data from /proc/vmstat on linux. +func (h *host) VMStat() (*types.VMStatInfo, error) { + content, err := ioutil.ReadFile(h.procFS.path("vmstat")) + if err != nil { + return nil, err + } + + return parseVMStat(content) +} + +// LoadAverage reports data from /proc/loadavg on linux. +func (h *host) LoadAverage() (*types.LoadAverageInfo, error) { + loadAvg, err := h.procFS.LoadAvg() + if err != nil { + return nil, err + } + + return &types.LoadAverageInfo{ + One: loadAvg.Load1, + Five: loadAvg.Load5, + Fifteen: loadAvg.Load15, + }, nil +} + +// NetworkCounters reports data from /proc/net on linux +func (h *host) NetworkCounters() (*types.NetworkCountersInfo, error) { + snmpRaw, err := ioutil.ReadFile(h.procFS.path("net/snmp")) + if err != nil { + return nil, err + } + snmp, err := getNetSnmpStats(snmpRaw) + if err != nil { + return nil, err + } + + netstatRaw, err := ioutil.ReadFile(h.procFS.path("net/netstat")) + if err != nil { + return nil, err + } + netstat, err := getNetstatStats(netstatRaw) + if err != nil { + return nil, err + } + + return &types.NetworkCountersInfo{SNMP: snmp, Netstat: netstat}, nil +} + +func (h *host) CPUTime() (types.CPUTimes, error) { + stat, err := h.procFS.Stat() + if err != nil { + return types.CPUTimes{}, err + } + + return types.CPUTimes{ + User: time.Duration(stat.CPUTotal.User * float64(time.Second)), + System: time.Duration(stat.CPUTotal.System * float64(time.Second)), + Idle: time.Duration(stat.CPUTotal.Idle * float64(time.Second)), + IOWait: time.Duration(stat.CPUTotal.Iowait * float64(time.Second)), + IRQ: time.Duration(stat.CPUTotal.IRQ * float64(time.Second)), + Nice: time.Duration(stat.CPUTotal.Nice * float64(time.Second)), + SoftIRQ: time.Duration(stat.CPUTotal.SoftIRQ * float64(time.Second)), + Steal: time.Duration(stat.CPUTotal.Steal * float64(time.Second)), + }, nil +} + +func newHost(fs procFS) (*host, error) { + stat, err := fs.Stat() + if err != nil { + return nil, fmt.Errorf("failed to read proc stat: %w", err) + } + + h := &host{stat: stat, procFS: fs} + r := &reader{} + r.architecture(h) + r.bootTime(h) + r.containerized(h) + r.hostname(h) + r.network(h) + r.kernelVersion(h) + r.os(h) + r.time(h) + r.uniqueID(h) + + return h, r.Err() +} + +type reader struct { + errs []error +} + +func (r *reader) addErr(err error) bool { + if err != nil { + if !errors.Is(err, types.ErrNotImplemented) { + r.errs = append(r.errs, err) + } + return true + } + return false +} + +func (r *reader) Err() error { + if len(r.errs) > 0 { + return &multierror.MultiError{Errors: r.errs} + } + return nil +} + +func (r *reader) architecture(h *host) { + v, err := Architecture() + if r.addErr(err) { + return + } + h.info.Architecture = v +} + +func (r *reader) bootTime(h *host) { + v, err := bootTime(h.procFS.FS) + if r.addErr(err) { + return + } + h.info.BootTime = v +} + +func (r *reader) containerized(h *host) { + v, err := IsContainerized() + if r.addErr(err) { + return + } + h.info.Containerized = &v +} + +func (r *reader) hostname(h *host) { + v, err := os.Hostname() + if r.addErr(err) { + return + } + h.info.Hostname = strings.ToLower(v) +} + +func (r *reader) network(h *host) { + ips, macs, err := shared.Network() + if r.addErr(err) { + return + } + h.info.IPs = ips + h.info.MACs = macs +} + +func (r *reader) kernelVersion(h *host) { + v, err := KernelVersion() + if r.addErr(err) { + return + } + h.info.KernelVersion = v +} + +func (r *reader) os(h *host) { + v, err := OperatingSystem() + if r.addErr(err) { + return + } + h.info.OS = v +} + +func (r *reader) time(h *host) { + h.info.Timezone, h.info.TimezoneOffsetSec = time.Now().Zone() +} + +func (r *reader) uniqueID(h *host) { + v, err := MachineID() + if r.addErr(err) { + return + } + h.info.UniqueID = v +} + +type procFS struct { + procfs.FS + mountPoint string +} + +func (fs *procFS) path(p ...string) string { + elem := append([]string{fs.mountPoint}, p...) + return filepath.Join(elem...) +} diff --git a/agent/pkg/go-sysinfo/providers/linux/host_linux_test.go b/agent/pkg/go-sysinfo/providers/linux/host_linux_test.go new file mode 100644 index 0000000..12f79e8 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/host_linux_test.go @@ -0,0 +1,112 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package linux + +import ( + "encoding/json" + "testing" + + "github.com/stretchr/testify/assert" + + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/go-sysinfo/internal/registry" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/go-sysinfo/types" +) + +var _ registry.HostProvider = linuxSystem{} + +func TestHost(t *testing.T) { + host, err := newLinuxSystem("").Host() + if err != nil { + t.Logf("could not get all host info: %v\n", err) + } + + info := host.Info() + data, _ := json.MarshalIndent(info, "", " ") + t.Logf(string(data)) +} + +func TestHostMemoryInfo(t *testing.T) { + host, err := newLinuxSystem("testdata/ubuntu1710").Host() + if err != nil { + t.Fatal(err) + } + m, err := host.Memory() + if err != nil { + t.Fatal(err) + } + + assert.EqualValues(t, 4139057152, m.Total) + assert.NotContains(t, m.Metrics, "MemTotal") + assert.Contains(t, m.Metrics, "Slab") +} + +func TestHostVMStat(t *testing.T) { + host, err := newLinuxSystem("testdata/ubuntu1710").Host() + if err != nil { + t.Fatal(err) + } + s, err := host.(types.VMStat).VMStat() + if err != nil { + t.Fatal(err) + } + + data, err := json.MarshalIndent(s, "", " ") + if err != nil { + t.Fatal(err) + } + t.Log(string(data)) +} + +func TestHostLoadAverage(t *testing.T) { + host, err := newLinuxSystem("testdata/ubuntu1710").Host() + if err != nil { + t.Fatal(err) + } + s, err := host.(types.LoadAverage).LoadAverage() + if err != nil { + t.Fatal(err) + } + + data, err := json.Marshal(s) + if err != nil { + t.Fatal(err) + } + t.Log(string(data)) +} + +func TestHostNetworkCounters(t *testing.T) { + host, err := newLinuxSystem("testdata/fedora30").Host() + if err != nil { + t.Fatal(err) + } + + s, err := host.(types.NetworkCounters).NetworkCounters() + if err != nil { + t.Fatal(err) + } + + assert.NotEmpty(t, s.Netstat.IPExt) + assert.NotEmpty(t, s.Netstat.TCPExt) + assert.NotEmpty(t, s.SNMP.IP) + + data, err := json.MarshalIndent(s, "", " ") + if err != nil { + t.Fatal(err) + } + t.Log(string(data)) +} diff --git a/agent/pkg/go-sysinfo/providers/linux/kernel_linux.go b/agent/pkg/go-sysinfo/providers/linux/kernel_linux.go new file mode 100644 index 0000000..1695fb8 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/kernel_linux.go @@ -0,0 +1,40 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package linux + +import ( + "fmt" + "syscall" +) + +func KernelVersion() (string, error) { + var uname syscall.Utsname + if err := syscall.Uname(&uname); err != nil { + return "", fmt.Errorf("kernel version: %w", err) + } + + data := make([]byte, 0, len(uname.Release)) + for _, v := range uname.Release { + if v == 0 { + break + } + data = append(data, byte(v)) + } + + return string(data), nil +} diff --git a/agent/pkg/go-sysinfo/providers/linux/machineid.go b/agent/pkg/go-sysinfo/providers/linux/machineid.go new file mode 100644 index 0000000..f97d3e0 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/machineid.go @@ -0,0 +1,60 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package linux + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" + + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/go-sysinfo/types" +) + +// Possible (current and historic) locations of the machine-id file. +// These will be searched in order. +var machineIDFiles = []string{"/etc/machine-id", "/var/lib/dbus/machine-id", "/var/db/dbus/machine-id"} + +func MachineID() (string, error) { + var contents []byte + var err error + + for _, file := range machineIDFiles { + contents, err = ioutil.ReadFile(file) + if err != nil { + if os.IsNotExist(err) { + // Try next location + continue + } + + // Return with error on any other error + return "", fmt.Errorf("failed to read %v: %w", file, err) + } + + // Found it + break + } + + if os.IsNotExist(err) { + // None of the locations existed + return "", types.ErrNotImplemented + } + + contents = bytes.TrimSpace(contents) + return string(contents), nil +} diff --git a/agent/pkg/go-sysinfo/providers/linux/memory_linux.go b/agent/pkg/go-sysinfo/providers/linux/memory_linux.go new file mode 100644 index 0000000..ea5d0fd --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/memory_linux.go @@ -0,0 +1,72 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package linux + +import ( + "fmt" + + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/go-sysinfo/types" +) + +func parseMemInfo(content []byte) (*types.HostMemoryInfo, error) { + memInfo := &types.HostMemoryInfo{ + Metrics: map[string]uint64{}, + } + + hasAvailable := false + err := parseKeyValue(content, ':', func(key, value []byte) error { + num, err := parseBytesOrNumber(value) + if err != nil { + return fmt.Errorf("failed to parse %v value of %v: %w", string(key), string(value), err) + } + + k := string(key) + switch k { + case "MemTotal": + memInfo.Total = num + case "MemAvailable": + hasAvailable = true + memInfo.Available = num + case "MemFree": + memInfo.Free = num + case "SwapTotal": + memInfo.VirtualTotal = num + case "SwapFree": + memInfo.VirtualFree = num + default: + memInfo.Metrics[k] = num + } + + return nil + }) + if err != nil { + return nil, err + } + + memInfo.Used = memInfo.Total - memInfo.Free + memInfo.VirtualUsed = memInfo.VirtualTotal - memInfo.VirtualFree + + // MemAvailable was added in kernel 3.14. + if !hasAvailable { + // Linux uses this for the calculation (but we are using a simpler calculation). + // https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=34e431b0ae398fc54ea69ff85ec700722c9da773 + memInfo.Available = memInfo.Free + memInfo.Metrics["Buffers"] + memInfo.Metrics["Cached"] + } + + return memInfo, nil +} diff --git a/agent/pkg/go-sysinfo/providers/linux/os.go b/agent/pkg/go-sysinfo/providers/linux/os.go new file mode 100644 index 0000000..91b92a6 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/os.go @@ -0,0 +1,318 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package linux + +import ( + "bufio" + "bytes" + "fmt" + "os" + "path/filepath" + "regexp" + "strconv" + "strings" + + "github.com/joeshaw/multierror" + + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/go-sysinfo/types" +) + +const ( + osRelease = "/etc/os-release" + lsbRelease = "/etc/lsb-release" + distribRelease = "/etc/*-release" + versionGrok = `(?P(?P[0-9]+)\.?(?P[0-9]+)?\.?(?P\w+)?)(?: \((?P[-\w ]+)\))?` +) + +var ( + // distribReleaseRegexp parses the /etc/-release file. See man lsb-release. + distribReleaseRegexp = regexp.MustCompile(`(?P[\w]+).* ` + versionGrok) + + // versionRegexp parses version numbers (e.g. 6 or 6.1 or 6.1.0 or 6.1.0_20150102). + versionRegexp = regexp.MustCompile(versionGrok) +) + +// familyMap contains a mapping of family -> []platforms. +var familyMap = map[string][]string{ + "arch": {"arch", "antergos", "manjaro"}, + "redhat": { + "redhat", "fedora", "centos", "scientific", "oraclelinux", "ol", + "amzn", "rhel", "almalinux", "openeuler", "rocky", + }, + "debian": {"debian", "ubuntu", "raspbian", "linuxmint"}, + "suse": {"suse", "sles", "opensuse"}, +} + +var platformToFamilyMap map[string]string + +func init() { + platformToFamilyMap = map[string]string{} + for family, platformList := range familyMap { + for _, platform := range platformList { + platformToFamilyMap[platform] = family + } + } +} + +func OperatingSystem() (*types.OSInfo, error) { + return getOSInfo("") +} + +func getOSInfo(baseDir string) (*types.OSInfo, error) { + osInfo, err := getOSRelease(baseDir) + if err != nil { + // Fallback + return findDistribRelease(baseDir) + } + + // For the redhat family, enrich version info with data from + // /etc/[distrib]-release because the minor and patch info isn't always + // present in os-release. + if osInfo.Family != "redhat" { + return osInfo, nil + } + + distInfo, err := findDistribRelease(baseDir) + if err != nil { + return osInfo, err + } + osInfo.Major = distInfo.Major + osInfo.Minor = distInfo.Minor + osInfo.Patch = distInfo.Patch + osInfo.Codename = distInfo.Codename + return osInfo, nil +} + +func getOSRelease(baseDir string) (*types.OSInfo, error) { + lsbRel, _ := os.ReadFile(filepath.Join(baseDir, lsbRelease)) + + osRel, err := os.ReadFile(filepath.Join(baseDir, osRelease)) + if err != nil { + return nil, err + } + if len(osRel) == 0 { + return nil, fmt.Errorf("%v is empty: %w", osRelease, err) + } + + return parseOSRelease(append(lsbRel, osRel...)) +} + +func parseOSRelease(content []byte) (*types.OSInfo, error) { + fields := map[string]string{} + + s := bufio.NewScanner(bytes.NewReader(content)) + for s.Scan() { + line := bytes.TrimSpace(s.Bytes()) + + // Skip blank lines and comments. + if len(line) == 0 || bytes.HasPrefix(line, []byte("#")) { + continue + } + + parts := bytes.SplitN(s.Bytes(), []byte("="), 2) + if len(parts) != 2 { + continue + } + + key := string(bytes.TrimSpace(parts[0])) + val := string(bytes.TrimSpace(parts[1])) + fields[key] = val + + // Trim quotes. + val, err := strconv.Unquote(val) + if err == nil { + fields[key] = strings.TrimSpace(val) + } + } + + if s.Err() != nil { + return nil, s.Err() + } + + return makeOSInfo(fields) +} + +func makeOSInfo(osRelease map[string]string) (*types.OSInfo, error) { + os := &types.OSInfo{ + Type: "linux", + Platform: firstOf(osRelease, "ID", "DISTRIB_ID"), + Name: firstOf(osRelease, "NAME", "PRETTY_NAME"), + Version: firstOf(osRelease, "VERSION", "VERSION_ID", "DISTRIB_RELEASE"), + Build: osRelease["BUILD_ID"], + Codename: firstOf(osRelease, "VERSION_CODENAME", "DISTRIB_CODENAME"), + } + + if os.Codename == "" { + // Some OSes use their own CODENAME keys (e.g UBUNTU_CODENAME). + for k, v := range osRelease { + if strings.Contains(k, "CODENAME") { + os.Codename = v + break + } + } + } + + if os.Platform == "" { + // Fallback to the first word of the Name field. + os.Platform, _, _ = strings.Cut(os.Name, " ") + } + + os.Family = linuxFamily(os.Platform) + if os.Family == "" { + // ID_LIKE is a space-separated list of OS identifiers that this + // OS is similar to. Use this to figure out the Linux family. + for _, id := range strings.Fields(osRelease["ID_LIKE"]) { + os.Family = linuxFamily(id) + if os.Family != "" { + break + } + } + } + + if os.Version != "" { + // Try parsing info from the version. + keys := versionRegexp.SubexpNames() + for i, m := range versionRegexp.FindStringSubmatch(os.Version) { + switch keys[i] { + case "major": + os.Major, _ = strconv.Atoi(m) + case "minor": + os.Minor, _ = strconv.Atoi(m) + case "patch": + os.Patch, _ = strconv.Atoi(m) + case "codename": + if os.Codename == "" { + os.Codename = m + } + } + } + } + + return os, nil +} + +func findDistribRelease(baseDir string) (*types.OSInfo, error) { + var errs []error + matches, err := filepath.Glob(filepath.Join(baseDir, distribRelease)) + if err != nil { + return nil, err + } + for _, path := range matches { + if strings.HasSuffix(path, osRelease) || strings.HasSuffix(path, lsbRelease) { + continue + } + + info, err := os.Stat(path) + if err != nil || info.IsDir() || info.Size() == 0 { + continue + } + + osInfo, err := getDistribRelease(path) + if err != nil { + errs = append(errs, fmt.Errorf("in %s: %w", path, err)) + continue + } + return osInfo, err + } + return nil, fmt.Errorf("no valid /etc/-release file found: %w", &multierror.MultiError{Errors: errs}) +} + +func getDistribRelease(file string) (*types.OSInfo, error) { + data, err := os.ReadFile(file) + if err != nil { + return nil, err + } + parts := bytes.SplitN(data, []byte("\n"), 2) + if len(parts) != 2 { + return nil, fmt.Errorf("failed to parse %v", file) + } + + // Use distrib as platform name. + var platform string + if parts := strings.SplitN(filepath.Base(file), "-", 2); len(parts) > 0 { + platform = strings.ToLower(parts[0]) + } + + return parseDistribRelease(platform, parts[0]) +} + +func parseDistribRelease(platform string, content []byte) (*types.OSInfo, error) { + var ( + line = string(bytes.TrimSpace(content)) + keys = distribReleaseRegexp.SubexpNames() + os = &types.OSInfo{ + Type: "linux", + Platform: platform, + } + ) + + for i, m := range distribReleaseRegexp.FindStringSubmatch(line) { + switch keys[i] { + case "name": + os.Name = m + case "version": + os.Version = m + case "major": + os.Major, _ = strconv.Atoi(m) + case "minor": + os.Minor, _ = strconv.Atoi(m) + case "patch": + os.Patch, _ = strconv.Atoi(m) + case "codename": + os.Version += " (" + m + ")" + os.Codename = m + } + } + + os.Family = linuxFamily(os.Platform) + return os, nil +} + +// firstOf returns the first non-empty value found in the map while +// iterating over keys. +func firstOf(kv map[string]string, keys ...string) string { + for _, key := range keys { + if v := kv[key]; v != "" { + return v + } + } + return "" +} + +// linuxFamily returns the linux distribution family associated to the OS platform. +// If there is no family associated then it returns an empty string. +func linuxFamily(platform string) string { + if platform == "" { + return "" + } + + platform = strings.ToLower(platform) + + // First try a direct lookup. + if family, found := platformToFamilyMap[platform]; found { + return family + } + + // Try prefix matching (e.g. opensuse matches opensuse-tumpleweed). + for platformPrefix, family := range platformToFamilyMap { + if strings.HasPrefix(platform, platformPrefix) { + return family + } + } + return "" +} diff --git a/agent/pkg/go-sysinfo/providers/linux/os_test.go b/agent/pkg/go-sysinfo/providers/linux/os_test.go new file mode 100644 index 0000000..3075c96 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/os_test.go @@ -0,0 +1,420 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build !windows + +package linux + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/go-sysinfo/types" +) + +func TestOperatingSystem(t *testing.T) { + t.Run("almalinux9", func(t *testing.T) { + // Data from 'docker pull almalinux:9'. + os, err := getOSInfo("testdata/almalinux9") + if err != nil { + t.Fatal(err) + } + assert.Equal(t, types.OSInfo{ + Type: "linux", + Family: "redhat", + Platform: "almalinux", + Name: "AlmaLinux", + Version: "9.1 (Lime Lynx)", + Major: 9, + Minor: 1, + Codename: "Lime Lynx", + }, *os) + t.Logf("%#v", os) + }) + t.Run("alpine3.17", func(t *testing.T) { + // Data from 'docker pull alpine:3.17.3'. + os, err := getOSInfo("testdata/alpine3.17") + if err != nil { + t.Fatal(err) + } + assert.Equal(t, types.OSInfo{ + Type: "linux", + Platform: "alpine", + Name: "Alpine Linux", + Version: "3.17.3", + Major: 3, + Minor: 17, + Patch: 3, + }, *os) + t.Logf("%#v", os) + }) + t.Run("amazon2017.03", func(t *testing.T) { + os, err := getOSInfo("testdata/amazon2017.03") + if err != nil { + t.Fatal(err) + } + assert.Equal(t, types.OSInfo{ + Type: "linux", + Family: "redhat", + Platform: "amzn", + Name: "Amazon Linux AMI", + Version: "2017.03", + Major: 2017, + Minor: 3, + Patch: 0, + }, *os) + t.Logf("%#v", os) + }) + t.Run("archlinux", func(t *testing.T) { + os, err := getOSInfo("testdata/archlinux") + if err != nil { + t.Fatal(err) + } + assert.Equal(t, types.OSInfo{ + Type: "linux", + Family: "arch", + Platform: "archarm", + Name: "Arch Linux ARM", + Build: "rolling", + }, *os) + t.Logf("%#v", os) + }) + t.Run("centos6", func(t *testing.T) { + os, err := getOSInfo("testdata/centos6") + if err != nil { + t.Fatal(err) + } + assert.Equal(t, types.OSInfo{ + Type: "linux", + Family: "redhat", + Platform: "centos", + Name: "CentOS", + Version: "6.9 (Final)", + Major: 6, + Minor: 9, + Codename: "Final", + }, *os) + t.Logf("%#v", os) + }) + t.Run("centos7", func(t *testing.T) { + os, err := getOSInfo("testdata/centos7") + if err != nil { + t.Fatal(err) + } + assert.Equal(t, types.OSInfo{ + Type: "linux", + Family: "redhat", + Platform: "centos", + Name: "CentOS Linux", + Version: "7 (Core)", + Major: 7, + Minor: 4, + Patch: 1708, + Codename: "Core", + }, *os) + t.Logf("%#v", os) + }) + t.Run("centos7.8", func(t *testing.T) { + os, err := getOSInfo("testdata/centos7.8") + if err != nil { + t.Fatal(err) + } + assert.Equal(t, types.OSInfo{ + Type: "linux", + Family: "redhat", + Platform: "centos", + Name: "CentOS Linux", + Version: "7 (Core)", + Major: 7, + Minor: 8, + Patch: 2003, + Codename: "Core", + }, *os) + t.Logf("%#v", os) + }) + t.Run("debian9", func(t *testing.T) { + os, err := getOSInfo("testdata/debian9") + if err != nil { + t.Fatal(err) + } + assert.Equal(t, types.OSInfo{ + Type: "linux", + Family: "debian", + Platform: "debian", + Name: "Debian GNU/Linux", + Version: "9 (stretch)", + Major: 9, + Codename: "stretch", + }, *os) + t.Logf("%#v", os) + }) + t.Run("raspbian9", func(t *testing.T) { + os, err := getOSInfo("testdata/raspbian9") + if err != nil { + t.Fatal(err) + } + assert.Equal(t, types.OSInfo{ + Type: "linux", + Family: "debian", + Platform: "raspbian", + Name: "Raspbian GNU/Linux", + Version: "9 (stretch)", + Major: 9, + Codename: "stretch", + }, *os) + t.Logf("%#v", os) + }) + t.Run("linuxmint20", func(t *testing.T) { + os, err := getOSInfo("testdata/linuxmint20") + if err != nil { + t.Fatal(err) + } + assert.Equal(t, types.OSInfo{ + Type: "linux", + Family: "debian", + Platform: "linuxmint", + Name: "Linux Mint", + Version: "20 (Ulyana)", + Major: 20, + Codename: "ulyana", + }, *os) + t.Logf("%#v", os) + }) + t.Run("manjaro23", func(t *testing.T) { + os, err := getOSInfo("testdata/manjaro23") + if err != nil { + t.Fatal(err) + } + assert.Equal(t, types.OSInfo{ + Type: "linux", + Family: "arch", + Platform: "manjaro-arm", + Name: "Manjaro ARM", + Version: "23.02", + Major: 23, + Minor: 2, + }, *os) + t.Logf("%#v", os) + }) + t.Run("redhat7", func(t *testing.T) { + os, err := getOSInfo("testdata/redhat7") + if err != nil { + t.Fatal(err) + } + assert.Equal(t, types.OSInfo{ + Type: "linux", + Family: "redhat", + Platform: "rhel", + Name: "Red Hat Enterprise Linux Server", + Version: "7.6 (Maipo)", + Major: 7, + Minor: 6, + Codename: "Maipo", + }, *os) + t.Logf("%#v", os) + }) + t.Run("redhat9", func(t *testing.T) { + // Data from 'docker pull redhat/ubi9:9.0.0-1468'. + os, err := getOSInfo("testdata/redhat9") + if err != nil { + t.Fatal(err) + } + assert.Equal(t, types.OSInfo{ + Type: "linux", + Family: "redhat", + Platform: "rhel", + Name: "Red Hat Enterprise Linux", + Version: "9.0 (Plow)", + Major: 9, + Minor: 0, + Codename: "Plow", + }, *os) + t.Logf("%#v", os) + }) + t.Run("rockylinux9", func(t *testing.T) { + // Data from 'docker pull rockylinux:9.0'. + os, err := getOSInfo("testdata/rockylinux9") + if err != nil { + t.Fatal(err) + } + assert.Equal(t, types.OSInfo{ + Type: "linux", + Family: "redhat", + Platform: "rocky", + Name: "Rocky Linux", + Version: "9.0 (Blue Onyx)", + Major: 9, + Minor: 0, + Codename: "Blue Onyx", + }, *os) + t.Logf("%#v", os) + }) + t.Run("openeuler20.03", func(t *testing.T) { + os, err := getOSInfo("testdata/openeuler20.03") + if err != nil { + t.Fatal(err) + } + assert.Equal(t, types.OSInfo{ + Type: "linux", + Family: "redhat", + Platform: "openEuler", + Name: "openEuler", + Version: "20.03 (LTS-SP3)", + Major: 20, + Minor: 3, + Codename: "LTS-SP3", + }, *os) + t.Logf("%#v", os) + }) + t.Run("opensuse-leap15.4", func(t *testing.T) { + os, err := getOSInfo("testdata/opensuse-leap15.4") + if err != nil { + t.Fatal(err) + } + assert.Equal(t, types.OSInfo{ + Type: "linux", + Family: "suse", + Platform: "opensuse-leap", + Name: "openSUSE Leap", + Version: "15.4", + Major: 15, + Minor: 4, + }, *os) + t.Logf("%#v", os) + }) + t.Run("opensuse-tumbleweed", func(t *testing.T) { + os, err := getOSInfo("testdata/opensuse-tumbleweed") + if err != nil { + t.Fatal(err) + } + assert.Equal(t, types.OSInfo{ + Type: "linux", + Family: "suse", + Platform: "opensuse-tumbleweed", + Name: "openSUSE Tumbleweed", + Version: "20230108", + Major: 20230108, + }, *os) + t.Logf("%#v", os) + }) + t.Run("oraclelinux7", func(t *testing.T) { + os, err := getOSInfo("testdata/oraclelinux7") + if err != nil { + t.Fatal(err) + } + assert.Equal(t, types.OSInfo{ + Type: "linux", + Family: "redhat", + Platform: "ol", + Name: "Oracle Linux Server", + Version: "7.9", + Major: 7, + Minor: 9, + }, *os) + t.Logf("%#v", os) + }) + t.Run("ubuntu1404", func(t *testing.T) { + os, err := getOSInfo("testdata/ubuntu1404") + if err != nil { + t.Fatal(err) + } + assert.Equal(t, types.OSInfo{ + Type: "linux", + Family: "debian", + Platform: "ubuntu", + Name: "Ubuntu", + Version: "14.04.5 LTS, Trusty Tahr", + Major: 14, + Minor: 4, + Patch: 5, + Codename: "trusty", + }, *os) + t.Logf("%#v", os) + }) + t.Run("ubuntu1710", func(t *testing.T) { + os, err := getOSInfo("testdata/ubuntu1710") + if err != nil { + t.Fatal(err) + } + assert.Equal(t, types.OSInfo{ + Type: "linux", + Family: "debian", + Platform: "ubuntu", + Name: "Ubuntu", + Version: "17.10 (Artful Aardvark)", + Major: 17, + Minor: 10, + Patch: 0, + Codename: "artful", + }, *os) + t.Logf("%#v", os) + }) + t.Run("ubuntu2204", func(t *testing.T) { + os, err := getOSInfo("testdata/ubuntu2204") + if err != nil { + t.Fatal(err) + } + assert.Equal(t, types.OSInfo{ + Type: "linux", + Family: "debian", + Platform: "ubuntu", + Name: "Ubuntu", + Version: "22.04 LTS (Jammy Jellyfish)", + Major: 22, + Minor: 4, + Codename: "jammy", + }, *os) + t.Logf("%#v", os) + }) + t.Run("fedora30", func(t *testing.T) { + os, err := getOSInfo("testdata/fedora30") + if err != nil { + t.Fatal(err) + } + assert.Equal(t, types.OSInfo{ + Type: "linux", + Family: "redhat", + Platform: "fedora", + Name: "Fedora", + Version: "30 (Container Image)", + Major: 30, + Minor: 0, + Patch: 0, + Codename: "Thirty", + }, *os) + t.Logf("%#v", os) + }) + t.Run("dir_release", func(t *testing.T) { + os, err := getOSInfo("testdata/dir_release") + if err != nil { + t.Fatal(err) + } + assert.Equal(t, types.OSInfo{ + Type: "linux", + Family: "redhat", + Platform: "centos", + Name: "CentOS Linux", + Version: "7 (Core)", + Major: 7, + Minor: 4, + Patch: 1708, + Codename: "Core", + }, *os) + t.Logf("%#v", os) + }) +} diff --git a/agent/pkg/go-sysinfo/providers/linux/process_linux.go b/agent/pkg/go-sysinfo/providers/linux/process_linux.go new file mode 100644 index 0000000..e5a3ab0 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/process_linux.go @@ -0,0 +1,282 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package linux + +import ( + "bytes" + "io/ioutil" + "os" + "strconv" + "strings" + "time" + + "github.com/prometheus/procfs" + + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/go-sysinfo/types" +) + +const userHz = 100 + +func (s linuxSystem) Processes() ([]types.Process, error) { + procs, err := s.procFS.AllProcs() + if err != nil { + return nil, err + } + + processes := make([]types.Process, 0, len(procs)) + for _, proc := range procs { + processes = append(processes, &process{Proc: proc, fs: s.procFS}) + } + return processes, nil +} + +func (s linuxSystem) Process(pid int) (types.Process, error) { + proc, err := s.procFS.NewProc(pid) + if err != nil { + return nil, err + } + + return &process{Proc: proc, fs: s.procFS}, nil +} + +func (s linuxSystem) Self() (types.Process, error) { + proc, err := s.procFS.Self() + if err != nil { + return nil, err + } + + return &process{Proc: proc, fs: s.procFS}, nil +} + +type process struct { + procfs.Proc + fs procFS + info *types.ProcessInfo +} + +func (p *process) PID() int { + return p.Proc.PID +} + +func (p *process) Parent() (types.Process, error) { + info, err := p.Info() + if err != nil { + return nil, err + } + + proc, err := p.fs.NewProc(info.PPID) + if err != nil { + return nil, err + } + + return &process{Proc: proc, fs: p.fs}, nil +} + +func (p *process) path(pa ...string) string { + return p.fs.path(append([]string{strconv.Itoa(p.PID())}, pa...)...) +} + +func (p *process) CWD() (string, error) { + // TODO: add CWD to procfs + cwd, err := os.Readlink(p.path("cwd")) + if os.IsNotExist(err) { + return "", nil + } + + return cwd, err +} + +func (p *process) Info() (types.ProcessInfo, error) { + if p.info != nil { + return *p.info, nil + } + + stat, err := p.NewStat() + if err != nil { + return types.ProcessInfo{}, err + } + + exe, err := p.Executable() + if err != nil { + return types.ProcessInfo{}, err + } + + args, err := p.CmdLine() + if err != nil { + return types.ProcessInfo{}, err + } + + cwd, err := p.CWD() + if err != nil { + return types.ProcessInfo{}, err + } + + bootTime, err := bootTime(p.fs.FS) + if err != nil { + return types.ProcessInfo{}, err + } + + p.info = &types.ProcessInfo{ + Name: stat.Comm, + PID: p.PID(), + PPID: stat.PPID, + CWD: cwd, + Exe: exe, + Args: args, + StartTime: bootTime.Add(ticksToDuration(stat.Starttime)), + } + + return *p.info, nil +} + +func (p *process) Memory() (types.MemoryInfo, error) { + stat, err := p.NewStat() + if err != nil { + return types.MemoryInfo{}, err + } + + return types.MemoryInfo{ + Resident: uint64(stat.ResidentMemory()), + Virtual: uint64(stat.VirtualMemory()), + }, nil +} + +func (p *process) CPUTime() (types.CPUTimes, error) { + stat, err := p.NewStat() + if err != nil { + return types.CPUTimes{}, err + } + + return types.CPUTimes{ + User: ticksToDuration(uint64(stat.UTime)), + System: ticksToDuration(uint64(stat.STime)), + }, nil +} + +// OpenHandles returns the list of open file descriptors of the process. +func (p *process) OpenHandles() ([]string, error) { + return p.Proc.FileDescriptorTargets() +} + +// OpenHandles returns the number of open file descriptors of the process. +func (p *process) OpenHandleCount() (int, error) { + return p.Proc.FileDescriptorsLen() +} + +func (p *process) Environment() (map[string]string, error) { + // TODO: add Environment to procfs + content, err := ioutil.ReadFile(p.path("environ")) + if err != nil { + return nil, err + } + + env := map[string]string{} + pairs := bytes.Split(content, []byte{0}) + for _, kv := range pairs { + parts := bytes.SplitN(kv, []byte{'='}, 2) + if len(parts) != 2 { + continue + } + + key := string(bytes.TrimSpace(parts[0])) + if key == "" { + continue + } + + env[key] = string(parts[1]) + } + + return env, nil +} + +func (p *process) Seccomp() (*types.SeccompInfo, error) { + content, err := ioutil.ReadFile(p.path("status")) + if err != nil { + return nil, err + } + + return readSeccompFields(content) +} + +func (p *process) Capabilities() (*types.CapabilityInfo, error) { + content, err := ioutil.ReadFile(p.path("status")) + if err != nil { + return nil, err + } + + return readCapabilities(content) +} + +func (p *process) User() (types.UserInfo, error) { + content, err := ioutil.ReadFile(p.path("status")) + if err != nil { + return types.UserInfo{}, err + } + + var user types.UserInfo + err = parseKeyValue(content, ':', func(key, value []byte) error { + // See proc(5) for the format of /proc/[pid]/status + switch string(key) { + case "Uid": + ids := strings.Split(string(value), "\t") + if len(ids) >= 3 { + user.UID = ids[0] + user.EUID = ids[1] + user.SUID = ids[2] + } + case "Gid": + ids := strings.Split(string(value), "\t") + if len(ids) >= 3 { + user.GID = ids[0] + user.EGID = ids[1] + user.SGID = ids[2] + } + } + return nil + }) + + return user, nil +} + +// NetworkStats reports network stats for an individual PID. +func (p *process) NetworkCounters() (*types.NetworkCountersInfo, error) { + snmpRaw, err := ioutil.ReadFile(p.path("net/snmp")) + if err != nil { + return nil, err + } + snmp, err := getNetSnmpStats(snmpRaw) + if err != nil { + return nil, err + } + + netstatRaw, err := ioutil.ReadFile(p.path("net/netstat")) + if err != nil { + return nil, err + } + netstat, err := getNetstatStats(netstatRaw) + if err != nil { + return nil, err + } + + return &types.NetworkCountersInfo{SNMP: snmp, Netstat: netstat}, nil +} + +func ticksToDuration(ticks uint64) time.Duration { + seconds := float64(ticks) / float64(userHz) * float64(time.Second) + return time.Duration(int64(seconds)) +} diff --git a/agent/pkg/go-sysinfo/providers/linux/process_linux_test.go b/agent/pkg/go-sysinfo/providers/linux/process_linux_test.go new file mode 100644 index 0000000..117dbd5 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/process_linux_test.go @@ -0,0 +1,52 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package linux + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/go-sysinfo/internal/registry" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/go-sysinfo/types" +) + +var ( + _ registry.HostProvider = linuxSystem{} + _ registry.ProcessProvider = linuxSystem{} +) + +func TestProcessNetstat(t *testing.T) { + proc, err := newLinuxSystem("").Self() + if err != nil { + t.Fatal(err) + } + procNetwork, ok := proc.(types.NetworkCounters) + if !ok { + t.Fatalf("error, cannot cast to types.NetworkCounters") + } + stats, err := procNetwork.NetworkCounters() + if err != nil { + t.Fatal(err) + } + + assert.NotEmpty(t, stats.SNMP.ICMP, "ICMP") + assert.NotEmpty(t, stats.SNMP.IP, "IP") + assert.NotEmpty(t, stats.SNMP.TCP, "TCP") + assert.NotEmpty(t, stats.SNMP.UDP, "UDP") +} diff --git a/agent/pkg/go-sysinfo/providers/linux/procnet.go b/agent/pkg/go-sysinfo/providers/linux/procnet.go new file mode 100644 index 0000000..04279d2 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/procnet.go @@ -0,0 +1,127 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package linux + +import ( + "errors" + "fmt" + "reflect" + "strconv" + "strings" + + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/go-sysinfo/types" +) + +// fillStruct is some reflection work that can dynamically fill one of our tagged `netstat` structs with netstat data +func fillStruct(str interface{}, data map[string]map[string]uint64) { + val := reflect.ValueOf(str).Elem() + typ := reflect.TypeOf(str).Elem() + + for i := 0; i < typ.NumField(); i++ { + field := typ.Field(i) + if tag := field.Tag.Get("netstat"); tag != "" { + if values, ok := data[tag]; ok { + val.Field(i).Set(reflect.ValueOf(values)) + } + } + } +} + +// parseEntry parses two lines from the net files, the first line being keys, the second being values +func parseEntry(line1, line2 string) (map[string]uint64, error) { + keyArr := strings.Split(strings.TrimSpace(line1), " ") + valueArr := strings.Split(strings.TrimSpace(line2), " ") + + if len(keyArr) != len(valueArr) { + return nil, errors.New("key and value lines are mismatched") + } + + counters := make(map[string]uint64, len(valueArr)) + for iter, value := range valueArr { + + // This if-else block is to deal with the MaxConn value in SNMP, + // which is a signed value according to RFC2012. + // This library emulates the behavior of the kernel: store all values as a uint, then cast to a signed value for printing + // Users of this library need to be aware that this value should be printed as a signed int or hex value to make it useful. + var parsed uint64 + var err error + if strings.Contains(value, "-") { + signedParsed, err := strconv.ParseInt(value, 10, 64) + if err != nil { + return nil, fmt.Errorf("error parsing string to int in line: %#v: %w", valueArr, err) + } + parsed = uint64(signedParsed) + } else { + parsed, err = strconv.ParseUint(value, 10, 64) + if err != nil { + return nil, fmt.Errorf("error parsing string to int in line: %#v: %w", valueArr, err) + } + } + + counters[keyArr[iter]] = parsed + } + return counters, nil +} + +// parseNetFile parses an entire file, and returns a 2D map, representing how files are sorted by protocol +func parseNetFile(body string) (map[string]map[string]uint64, error) { + fileMetrics := make(map[string]map[string]uint64) + bodySplit := strings.Split(strings.TrimSpace(body), "\n") + // There should be an even number of lines. If not, something is wrong. + if len(bodySplit)%2 != 0 { + return nil, fmt.Errorf("badly parsed body: %s", body) + } + // in the network counters, data is divided into two-line sections: a line of keys, and a line of values + // With each line + for index := 0; index < len(bodySplit); index += 2 { + keysSplit := strings.Split(bodySplit[index], ":") + valuesSplit := strings.Split(bodySplit[index+1], ":") + if len(keysSplit) != 2 || len(valuesSplit) != 2 { + return nil, fmt.Errorf("wrong number of keys: %#v", keysSplit) + } + valMap, err := parseEntry(keysSplit[1], valuesSplit[1]) + if err != nil { + return nil, fmt.Errorf("error parsing lines: %w", err) + } + fileMetrics[valuesSplit[0]] = valMap + } + return fileMetrics, nil +} + +// getNetSnmpStats pulls snmp stats from /proc/net +func getNetSnmpStats(raw []byte) (types.SNMP, error) { + snmpData, err := parseNetFile(string(raw)) + if err != nil { + return types.SNMP{}, fmt.Errorf("error parsing SNMP: %w", err) + } + output := types.SNMP{} + fillStruct(&output, snmpData) + + return output, nil +} + +// getNetstatStats pulls netstat stats from /proc/net +func getNetstatStats(raw []byte) (types.Netstat, error) { + netstatData, err := parseNetFile(string(raw)) + if err != nil { + return types.Netstat{}, fmt.Errorf("error parsing netstat: %w", err) + } + output := types.Netstat{} + fillStruct(&output, netstatData) + return output, nil +} diff --git a/agent/pkg/go-sysinfo/providers/linux/procnet_test.go b/agent/pkg/go-sysinfo/providers/linux/procnet_test.go new file mode 100644 index 0000000..25b210b --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/procnet_test.go @@ -0,0 +1,51 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package linux + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/go-sysinfo/types" +) + +func TestParseFile(t *testing.T) { + file := `Ip: Forwarding DefaultTTL InReceives InHdrErrors InAddrErrors ForwDatagrams InUnknownProtos InDiscards InDelivers OutRequests OutDiscards OutNoRoutes ReasmTimeout ReasmReqds ReasmOKs ReasmFails FragOKs FragFails FragCreates +Ip: 1 64 23123056 0 19 21075 0 0 21842645 16017256 1 4 0 0 0 0 0 0 0 +Icmp: InMsgs InErrors InCsumErrors InDestUnreachs InTimeExcds InParmProbs InSrcQuenchs InRedirects InEchos InEchoReps InTimestamps InTimestampReps InAddrMasks InAddrMaskReps OutMsgs OutErrors OutDestUnreachs OutTimeExcds OutParmProbs OutSrcQuenchs OutRedirects OutEchos OutEchoReps OutTimestamps OutTimestampReps OutAddrMasks OutAddrMaskReps +Icmp: 487 1 0 486 0 0 0 0 1 0 0 0 0 0 570 0 569 0 0 0 0 0 1 0 0 0 0 +IcmpMsg: InType3 InType8 OutType0 OutType3 +IcmpMsg: 486 1 1 569 +Tcp: RtoAlgorithm RtoMin RtoMax MaxConn ActiveOpens PassiveOpens AttemptFails EstabResets CurrEstab InSegs OutSegs RetransSegs InErrs OutRsts InCsumErrors +Tcp: 1 200 120000 -1 4160 873 1408 334 10 21825426 34620510 10474 0 6102 0 +Udp: InDatagrams NoPorts InErrors OutDatagrams RcvbufErrors SndbufErrors InCsumErrors IgnoredMulti +Udp: 16755 33 0 16829 0 0 0 0 +UdpLite: InDatagrams NoPorts InErrors OutDatagrams RcvbufErrors SndbufErrors InCsumErrors IgnoredMulti +UdpLite: 0 0 0 0 0 0 0 0` + mapStr, err := parseNetFile(file) + if err != nil { + t.Fatalf("error: %s", err) + } + testOut := types.SNMP{} + fillStruct(&testOut, mapStr) + + assert.NotEmpty(t, testOut.IP) + assert.Equal(t, uint64(16755), testOut.UDP["InDatagrams"]) + assert.Equal(t, uint64(0xffffffffffffffff), testOut.TCP["MaxConn"]) +} diff --git a/agent/pkg/go-sysinfo/providers/linux/seccomp_linux.go b/agent/pkg/go-sysinfo/providers/linux/seccomp_linux.go new file mode 100644 index 0000000..4307b39 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/seccomp_linux.go @@ -0,0 +1,69 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package linux + +import ( + "strconv" + + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/go-sysinfo/types" +) + +type SeccompMode uint8 + +const ( + SeccompModeDisabled SeccompMode = iota + SeccompModeStrict + SeccompModeFilter +) + +func (m SeccompMode) String() string { + switch m { + case SeccompModeDisabled: + return "disabled" + case SeccompModeStrict: + return "strict" + case SeccompModeFilter: + return "filter" + default: + return strconv.Itoa(int(m)) + } +} + +func readSeccompFields(content []byte) (*types.SeccompInfo, error) { + var seccomp types.SeccompInfo + + err := parseKeyValue(content, ':', func(key, value []byte) error { + switch string(key) { + case "Seccomp": + mode, err := strconv.ParseUint(string(value), 10, 8) + if err != nil { + return err + } + seccomp.Mode = SeccompMode(mode).String() + case "NoNewPrivs": + noNewPrivs, err := strconv.ParseBool(string(value)) + if err != nil { + return err + } + seccomp.NoNewPrivs = &noNewPrivs + } + return nil + }) + + return &seccomp, err +} diff --git a/agent/pkg/go-sysinfo/providers/linux/testdata/almalinux9/etc/almalinux-release b/agent/pkg/go-sysinfo/providers/linux/testdata/almalinux9/etc/almalinux-release new file mode 100644 index 0000000..f64637e --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/testdata/almalinux9/etc/almalinux-release @@ -0,0 +1 @@ +AlmaLinux release 9.1 (Lime Lynx) diff --git a/agent/pkg/go-sysinfo/providers/linux/testdata/almalinux9/etc/os-release b/agent/pkg/go-sysinfo/providers/linux/testdata/almalinux9/etc/os-release new file mode 100644 index 0000000..c4c75b4 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/testdata/almalinux9/etc/os-release @@ -0,0 +1 @@ +../usr/lib/os-release \ No newline at end of file diff --git a/agent/pkg/go-sysinfo/providers/linux/testdata/almalinux9/etc/redhat-release b/agent/pkg/go-sysinfo/providers/linux/testdata/almalinux9/etc/redhat-release new file mode 100644 index 0000000..1fe6ce0 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/testdata/almalinux9/etc/redhat-release @@ -0,0 +1 @@ +almalinux-release \ No newline at end of file diff --git a/agent/pkg/go-sysinfo/providers/linux/testdata/almalinux9/etc/system-release b/agent/pkg/go-sysinfo/providers/linux/testdata/almalinux9/etc/system-release new file mode 100644 index 0000000..1fe6ce0 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/testdata/almalinux9/etc/system-release @@ -0,0 +1 @@ +almalinux-release \ No newline at end of file diff --git a/agent/pkg/go-sysinfo/providers/linux/testdata/almalinux9/usr/lib/os-release b/agent/pkg/go-sysinfo/providers/linux/testdata/almalinux9/usr/lib/os-release new file mode 100644 index 0000000..e3210a0 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/testdata/almalinux9/usr/lib/os-release @@ -0,0 +1,18 @@ +NAME="AlmaLinux" +VERSION="9.1 (Lime Lynx)" +ID="almalinux" +ID_LIKE="rhel centos fedora" +VERSION_ID="9.1" +PLATFORM_ID="platform:el9" +PRETTY_NAME="AlmaLinux 9.1 (Lime Lynx)" +ANSI_COLOR="0;34" +LOGO="fedora-logo-icon" +CPE_NAME="cpe:/o:almalinux:almalinux:9::baseos" +HOME_URL="https://almalinux.org/" +DOCUMENTATION_URL="https://wiki.almalinux.org/" +BUG_REPORT_URL="https://bugs.almalinux.org/" + +ALMALINUX_MANTISBT_PROJECT="AlmaLinux-9" +ALMALINUX_MANTISBT_PROJECT_VERSION="9.1" +REDHAT_SUPPORT_PRODUCT="AlmaLinux" +REDHAT_SUPPORT_PRODUCT_VERSION="9.1" diff --git a/agent/pkg/go-sysinfo/providers/linux/testdata/alpine3.17/etc/alpine-release b/agent/pkg/go-sysinfo/providers/linux/testdata/alpine3.17/etc/alpine-release new file mode 100644 index 0000000..56cc1b6 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/testdata/alpine3.17/etc/alpine-release @@ -0,0 +1 @@ +3.17.3 diff --git a/agent/pkg/go-sysinfo/providers/linux/testdata/alpine3.17/etc/os-release b/agent/pkg/go-sysinfo/providers/linux/testdata/alpine3.17/etc/os-release new file mode 100644 index 0000000..8478d8e --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/testdata/alpine3.17/etc/os-release @@ -0,0 +1,6 @@ +NAME="Alpine Linux" +ID=alpine +VERSION_ID=3.17.3 +PRETTY_NAME="Alpine Linux v3.17" +HOME_URL="https://alpinelinux.org/" +BUG_REPORT_URL="https://gitlab.alpinelinux.org/alpine/aports/-/issues" diff --git a/agent/pkg/go-sysinfo/providers/linux/testdata/amazon2017.03/etc/os-release b/agent/pkg/go-sysinfo/providers/linux/testdata/amazon2017.03/etc/os-release new file mode 100644 index 0000000..330f89f --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/testdata/amazon2017.03/etc/os-release @@ -0,0 +1,9 @@ +NAME="Amazon Linux AMI" +VERSION="2017.03" +ID="amzn" +ID_LIKE="rhel fedora" +VERSION_ID="2017.03" +PRETTY_NAME="Amazon Linux AMI 2017.03" +ANSI_COLOR="0;33" +CPE_NAME="cpe:/o:amazon:linux:2017.03:ga" +HOME_URL="http://aws.amazon.com/amazon-linux-ami/" diff --git a/agent/pkg/go-sysinfo/providers/linux/testdata/amazon2017.03/etc/system-release b/agent/pkg/go-sysinfo/providers/linux/testdata/amazon2017.03/etc/system-release new file mode 100644 index 0000000..d3d7307 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/testdata/amazon2017.03/etc/system-release @@ -0,0 +1 @@ +Amazon Linux AMI release 2017.03 diff --git a/agent/pkg/go-sysinfo/providers/linux/testdata/archlinux/etc/arch-release b/agent/pkg/go-sysinfo/providers/linux/testdata/archlinux/etc/arch-release new file mode 100644 index 0000000..e69de29 diff --git a/agent/pkg/go-sysinfo/providers/linux/testdata/archlinux/etc/os-release b/agent/pkg/go-sysinfo/providers/linux/testdata/archlinux/etc/os-release new file mode 100644 index 0000000..c4c75b4 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/testdata/archlinux/etc/os-release @@ -0,0 +1 @@ +../usr/lib/os-release \ No newline at end of file diff --git a/agent/pkg/go-sysinfo/providers/linux/testdata/archlinux/usr/lib/os-release b/agent/pkg/go-sysinfo/providers/linux/testdata/archlinux/usr/lib/os-release new file mode 100644 index 0000000..6218b0e --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/testdata/archlinux/usr/lib/os-release @@ -0,0 +1,11 @@ +NAME="Arch Linux ARM" +PRETTY_NAME="Arch Linux ARM" +ID=archarm +ID_LIKE=arch +BUILD_ID=rolling +ANSI_COLOR="38;2;23;147;209" +HOME_URL="https://archlinuxarm.org/" +DOCUMENTATION_URL="https://archlinuxarm.org/wiki" +SUPPORT_URL="https://archlinuxarm.org/forum" +BUG_REPORT_URL="https://github.com/archlinuxarm/PKGBUILDs/issues" +LOGO=archlinux-logo diff --git a/agent/pkg/go-sysinfo/providers/linux/testdata/centos6/etc/centos-release b/agent/pkg/go-sysinfo/providers/linux/testdata/centos6/etc/centos-release new file mode 100644 index 0000000..4ee9d87 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/testdata/centos6/etc/centos-release @@ -0,0 +1 @@ +CentOS release 6.9 (Final) diff --git a/agent/pkg/go-sysinfo/providers/linux/testdata/centos6/etc/redhat-release b/agent/pkg/go-sysinfo/providers/linux/testdata/centos6/etc/redhat-release new file mode 100644 index 0000000..05b34c7 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/testdata/centos6/etc/redhat-release @@ -0,0 +1 @@ +centos-release \ No newline at end of file diff --git a/agent/pkg/go-sysinfo/providers/linux/testdata/centos6/etc/system-release b/agent/pkg/go-sysinfo/providers/linux/testdata/centos6/etc/system-release new file mode 100644 index 0000000..05b34c7 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/testdata/centos6/etc/system-release @@ -0,0 +1 @@ +centos-release \ No newline at end of file diff --git a/agent/pkg/go-sysinfo/providers/linux/testdata/centos7.8/etc/centos-release b/agent/pkg/go-sysinfo/providers/linux/testdata/centos7.8/etc/centos-release new file mode 100644 index 0000000..5bb15e0 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/testdata/centos7.8/etc/centos-release @@ -0,0 +1 @@ +CentOS Linux release 7.8.2003 (Core) diff --git a/agent/pkg/go-sysinfo/providers/linux/testdata/centos7.8/etc/os-release b/agent/pkg/go-sysinfo/providers/linux/testdata/centos7.8/etc/os-release new file mode 100644 index 0000000..c4c75b4 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/testdata/centos7.8/etc/os-release @@ -0,0 +1 @@ +../usr/lib/os-release \ No newline at end of file diff --git a/agent/pkg/go-sysinfo/providers/linux/testdata/centos7.8/etc/redhat-release b/agent/pkg/go-sysinfo/providers/linux/testdata/centos7.8/etc/redhat-release new file mode 100644 index 0000000..05b34c7 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/testdata/centos7.8/etc/redhat-release @@ -0,0 +1 @@ +centos-release \ No newline at end of file diff --git a/agent/pkg/go-sysinfo/providers/linux/testdata/centos7.8/etc/system-release b/agent/pkg/go-sysinfo/providers/linux/testdata/centos7.8/etc/system-release new file mode 100644 index 0000000..05b34c7 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/testdata/centos7.8/etc/system-release @@ -0,0 +1 @@ +centos-release \ No newline at end of file diff --git a/agent/pkg/go-sysinfo/providers/linux/testdata/centos7.8/usr/lib/os-release b/agent/pkg/go-sysinfo/providers/linux/testdata/centos7.8/usr/lib/os-release new file mode 100644 index 0000000..7037a94 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/testdata/centos7.8/usr/lib/os-release @@ -0,0 +1,16 @@ +NAME="CentOS Linux" +VERSION="7 (Core)" +ID="centos" +ID_LIKE="rhel fedora" +VERSION_ID="7" +PRETTY_NAME="CentOS Linux 7 (Core)" +ANSI_COLOR="0;31" +CPE_NAME="cpe:/o:centos:centos:7" +HOME_URL="https://www.centos.org/" +BUG_REPORT_URL="https://bugs.centos.org/" + +CENTOS_MANTISBT_PROJECT="CentOS-7" +CENTOS_MANTISBT_PROJECT_VERSION="7" +REDHAT_SUPPORT_PRODUCT="centos" +REDHAT_SUPPORT_PRODUCT_VERSION="7" + diff --git a/agent/pkg/go-sysinfo/providers/linux/testdata/centos7/etc/centos-release b/agent/pkg/go-sysinfo/providers/linux/testdata/centos7/etc/centos-release new file mode 100644 index 0000000..70f514b --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/testdata/centos7/etc/centos-release @@ -0,0 +1 @@ +CentOS Linux release 7.4.1708 (Core) diff --git a/agent/pkg/go-sysinfo/providers/linux/testdata/centos7/etc/os-release b/agent/pkg/go-sysinfo/providers/linux/testdata/centos7/etc/os-release new file mode 100644 index 0000000..7037a94 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/testdata/centos7/etc/os-release @@ -0,0 +1,16 @@ +NAME="CentOS Linux" +VERSION="7 (Core)" +ID="centos" +ID_LIKE="rhel fedora" +VERSION_ID="7" +PRETTY_NAME="CentOS Linux 7 (Core)" +ANSI_COLOR="0;31" +CPE_NAME="cpe:/o:centos:centos:7" +HOME_URL="https://www.centos.org/" +BUG_REPORT_URL="https://bugs.centos.org/" + +CENTOS_MANTISBT_PROJECT="CentOS-7" +CENTOS_MANTISBT_PROJECT_VERSION="7" +REDHAT_SUPPORT_PRODUCT="centos" +REDHAT_SUPPORT_PRODUCT_VERSION="7" + diff --git a/agent/pkg/go-sysinfo/providers/linux/testdata/centos7/etc/redhat-release b/agent/pkg/go-sysinfo/providers/linux/testdata/centos7/etc/redhat-release new file mode 100644 index 0000000..05b34c7 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/testdata/centos7/etc/redhat-release @@ -0,0 +1 @@ +centos-release \ No newline at end of file diff --git a/agent/pkg/go-sysinfo/providers/linux/testdata/centos7/etc/system-release b/agent/pkg/go-sysinfo/providers/linux/testdata/centos7/etc/system-release new file mode 100644 index 0000000..05b34c7 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/testdata/centos7/etc/system-release @@ -0,0 +1 @@ +centos-release \ No newline at end of file diff --git a/agent/pkg/go-sysinfo/providers/linux/testdata/debian6/etc/debian_version b/agent/pkg/go-sysinfo/providers/linux/testdata/debian6/etc/debian_version new file mode 100644 index 0000000..c7d48f0 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/testdata/debian6/etc/debian_version @@ -0,0 +1 @@ +6.0.10 diff --git a/agent/pkg/go-sysinfo/providers/linux/testdata/debian9/etc/debian_version b/agent/pkg/go-sysinfo/providers/linux/testdata/debian9/etc/debian_version new file mode 100644 index 0000000..c3cae12 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/testdata/debian9/etc/debian_version @@ -0,0 +1 @@ +9.3 diff --git a/agent/pkg/go-sysinfo/providers/linux/testdata/debian9/etc/os-release b/agent/pkg/go-sysinfo/providers/linux/testdata/debian9/etc/os-release new file mode 100644 index 0000000..c4c75b4 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/testdata/debian9/etc/os-release @@ -0,0 +1 @@ +../usr/lib/os-release \ No newline at end of file diff --git a/agent/pkg/go-sysinfo/providers/linux/testdata/debian9/usr/lib/os-release b/agent/pkg/go-sysinfo/providers/linux/testdata/debian9/usr/lib/os-release new file mode 100644 index 0000000..ea56ca0 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/testdata/debian9/usr/lib/os-release @@ -0,0 +1,8 @@ +PRETTY_NAME="Debian GNU/Linux 9 (stretch)" +NAME="Debian GNU/Linux" +VERSION_ID="9" +VERSION="9 (stretch)" +ID=debian +HOME_URL="https://www.debian.org/" +SUPPORT_URL="https://www.debian.org/support" +BUG_REPORT_URL="https://bugs.debian.org/" diff --git a/agent/pkg/go-sysinfo/providers/linux/testdata/dir_release/etc/aaa-release/.dir b/agent/pkg/go-sysinfo/providers/linux/testdata/dir_release/etc/aaa-release/.dir new file mode 100644 index 0000000..e69de29 diff --git a/agent/pkg/go-sysinfo/providers/linux/testdata/dir_release/etc/centos-release b/agent/pkg/go-sysinfo/providers/linux/testdata/dir_release/etc/centos-release new file mode 100644 index 0000000..70f514b --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/testdata/dir_release/etc/centos-release @@ -0,0 +1 @@ +CentOS Linux release 7.4.1708 (Core) diff --git a/agent/pkg/go-sysinfo/providers/linux/testdata/dir_release/etc/os-release b/agent/pkg/go-sysinfo/providers/linux/testdata/dir_release/etc/os-release new file mode 100644 index 0000000..7037a94 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/testdata/dir_release/etc/os-release @@ -0,0 +1,16 @@ +NAME="CentOS Linux" +VERSION="7 (Core)" +ID="centos" +ID_LIKE="rhel fedora" +VERSION_ID="7" +PRETTY_NAME="CentOS Linux 7 (Core)" +ANSI_COLOR="0;31" +CPE_NAME="cpe:/o:centos:centos:7" +HOME_URL="https://www.centos.org/" +BUG_REPORT_URL="https://bugs.centos.org/" + +CENTOS_MANTISBT_PROJECT="CentOS-7" +CENTOS_MANTISBT_PROJECT_VERSION="7" +REDHAT_SUPPORT_PRODUCT="centos" +REDHAT_SUPPORT_PRODUCT_VERSION="7" + diff --git a/agent/pkg/go-sysinfo/providers/linux/testdata/dir_release/etc/redhat-release b/agent/pkg/go-sysinfo/providers/linux/testdata/dir_release/etc/redhat-release new file mode 100644 index 0000000..05b34c7 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/testdata/dir_release/etc/redhat-release @@ -0,0 +1 @@ +centos-release \ No newline at end of file diff --git a/agent/pkg/go-sysinfo/providers/linux/testdata/dir_release/etc/system-release b/agent/pkg/go-sysinfo/providers/linux/testdata/dir_release/etc/system-release new file mode 100644 index 0000000..05b34c7 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/testdata/dir_release/etc/system-release @@ -0,0 +1 @@ +centos-release \ No newline at end of file diff --git a/agent/pkg/go-sysinfo/providers/linux/testdata/fedora30/etc/fedora-release b/agent/pkg/go-sysinfo/providers/linux/testdata/fedora30/etc/fedora-release new file mode 100644 index 0000000..245bb42 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/testdata/fedora30/etc/fedora-release @@ -0,0 +1 @@ +../usr/lib/fedora-release \ No newline at end of file diff --git a/agent/pkg/go-sysinfo/providers/linux/testdata/fedora30/etc/os-release b/agent/pkg/go-sysinfo/providers/linux/testdata/fedora30/etc/os-release new file mode 100644 index 0000000..c4c75b4 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/testdata/fedora30/etc/os-release @@ -0,0 +1 @@ +../usr/lib/os-release \ No newline at end of file diff --git a/agent/pkg/go-sysinfo/providers/linux/testdata/fedora30/etc/redhat-release b/agent/pkg/go-sysinfo/providers/linux/testdata/fedora30/etc/redhat-release new file mode 100644 index 0000000..fc49fa6 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/testdata/fedora30/etc/redhat-release @@ -0,0 +1 @@ +fedora-release \ No newline at end of file diff --git a/agent/pkg/go-sysinfo/providers/linux/testdata/fedora30/etc/system-release b/agent/pkg/go-sysinfo/providers/linux/testdata/fedora30/etc/system-release new file mode 100644 index 0000000..fc49fa6 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/testdata/fedora30/etc/system-release @@ -0,0 +1 @@ +fedora-release \ No newline at end of file diff --git a/agent/pkg/go-sysinfo/providers/linux/testdata/fedora30/proc/net/netstat b/agent/pkg/go-sysinfo/providers/linux/testdata/fedora30/proc/net/netstat new file mode 100644 index 0000000..7b390ba --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/testdata/fedora30/proc/net/netstat @@ -0,0 +1,4 @@ +TcpExt: SyncookiesSent SyncookiesRecv SyncookiesFailed EmbryonicRsts PruneCalled RcvPruned OfoPruned OutOfWindowIcmps LockDroppedIcmps ArpFilter TW TWRecycled TWKilled PAWSActive PAWSEstab DelayedACKs DelayedACKLocked DelayedACKLost ListenOverflows ListenDrops TCPHPHits TCPPureAcks TCPHPAcks TCPRenoRecovery TCPSackRecovery TCPSACKReneging TCPSACKReorder TCPRenoReorder TCPTSReorder TCPFullUndo TCPPartialUndo TCPDSACKUndo TCPLossUndo TCPLostRetransmit TCPRenoFailures TCPSackFailures TCPLossFailures TCPFastRetrans TCPSlowStartRetrans TCPTimeouts TCPLossProbes TCPLossProbeRecovery TCPRenoRecoveryFail TCPSackRecoveryFail TCPRcvCollapsed TCPBacklogCoalesce TCPDSACKOldSent TCPDSACKOfoSent TCPDSACKRecv TCPDSACKOfoRecv TCPAbortOnData TCPAbortOnClose TCPAbortOnMemory TCPAbortOnTimeout TCPAbortOnLinger TCPAbortFailed TCPMemoryPressures TCPMemoryPressuresChrono TCPSACKDiscard TCPDSACKIgnoredOld TCPDSACKIgnoredNoUndo TCPSpuriousRTOs TCPMD5NotFound TCPMD5Unexpected TCPMD5Failure TCPSackShifted TCPSackMerged TCPSackShiftFallback TCPBacklogDrop PFMemallocDrop TCPMinTTLDrop TCPDeferAcceptDrop IPReversePathFilter TCPTimeWaitOverflow TCPReqQFullDoCookies TCPReqQFullDrop TCPRetransFail TCPRcvCoalesce TCPOFOQueue TCPOFODrop TCPOFOMerge TCPChallengeACK TCPSYNChallenge TCPFastOpenActive TCPFastOpenActiveFail TCPFastOpenPassive TCPFastOpenPassiveFail TCPFastOpenListenOverflow TCPFastOpenCookieReqd TCPFastOpenBlackhole TCPSpuriousRtxHostQueues BusyPollRxPackets TCPAutoCorking TCPFromZeroWindowAdv TCPToZeroWindowAdv TCPWantZeroWindowAdv TCPSynRetrans TCPOrigDataSent TCPHystartTrainDetect TCPHystartTrainCwnd TCPHystartDelayDetect TCPHystartDelayCwnd TCPACKSkippedSynRecv TCPACKSkippedPAWS TCPACKSkippedSeq TCPACKSkippedFinWait2 TCPACKSkippedTimeWait TCPACKSkippedChallenge TCPWinProbe TCPKeepAlive TCPMTUPFail TCPMTUPSuccess TCPDelivered TCPDeliveredCE TCPAckCompressed TCPZeroWindowDrop TCPRcvQDrop +TcpExt: 0 0 2 2 0 0 0 8 0 0 934 0 0 0 15 287657 86 1174 0 0 13549524 3703992 2542609 0 118 0 467 0 168 3 39 35 359 1387 0 5 1 3781 2 1955 7458 366 0 2 0 278312 1238 11 4216 2 1494 272 0 77 0 0 0 0 82 186 1869 3 0 0 0 25682 9680 660 0 0 0 0 0 0 0 0 0 3548522 127400 0 10 0 0 0 0 0 0 0 0 0 0 0 104 883 883 71010 257 29255516 148 3243 18 2382 3 1 3 0 0 0 0 204720 0 0 29260768 0 109908 0 0 +IpExt: InNoRoutes InTruncatedPkts InMcastPkts OutMcastPkts InBcastPkts OutBcastPkts InOctets OutOctets InMcastOctets OutMcastOctets InBcastOctets OutBcastOctets InCsumErrors InNoECTPkts InECT1Pkts InECT0Pkts InCEPkts ReasmOverlaps +IpExt: 0 0 0 0 601711 0 102243038277 39839396260 0 0 143425103 0 0 76854883 0 3160941 0 0 diff --git a/agent/pkg/go-sysinfo/providers/linux/testdata/fedora30/proc/net/snmp b/agent/pkg/go-sysinfo/providers/linux/testdata/fedora30/proc/net/snmp new file mode 100644 index 0000000..588937f --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/testdata/fedora30/proc/net/snmp @@ -0,0 +1,12 @@ +Ip: Forwarding DefaultTTL InReceives InHdrErrors InAddrErrors ForwDatagrams InUnknownProtos InDiscards InDelivers OutRequests OutDiscards OutNoRoutes ReasmTimeout ReasmReqds ReasmOKs ReasmFails FragOKs FragFails FragCreates +Ip: 1 64 23685774 0 19 22122 0 0 22328773 16478755 1 4 0 0 0 0 0 0 0 +Icmp: InMsgs InErrors InCsumErrors InDestUnreachs InTimeExcds InParmProbs InSrcQuenchs InRedirects InEchos InEchoReps InTimestamps InTimestampReps InAddrMasks InAddrMaskReps OutMsgs OutErrors OutDestUnreachs OutTimeExcds OutParmProbs OutSrcQuenchs OutRedirects OutEchos OutEchoReps OutTimestamps OutTimestampReps OutAddrMasks OutAddrMaskReps +Icmp: 487 1 0 486 0 0 0 0 1 0 0 0 0 0 571 0 570 0 0 0 0 0 1 0 0 0 0 +IcmpMsg: InType3 InType8 OutType0 OutType3 +IcmpMsg: 486 1 1 570 +Tcp: RtoAlgorithm RtoMin RtoMax MaxConn ActiveOpens PassiveOpens AttemptFails EstabResets CurrEstab InSegs OutSegs RetransSegs InErrs OutRsts InCsumErrors +Tcp: 1 200 120000 -1 4224 885 1408 343 8 22311083 35810350 10752 0 6286 0 +Udp: InDatagrams NoPorts InErrors OutDatagrams RcvbufErrors SndbufErrors InCsumErrors IgnoredMulti +Udp: 17225 34 0 17301 0 0 0 0 +UdpLite: InDatagrams NoPorts InErrors OutDatagrams RcvbufErrors SndbufErrors InCsumErrors IgnoredMulti +UdpLite: 0 0 0 0 0 0 0 0 diff --git a/agent/pkg/go-sysinfo/providers/linux/testdata/fedora30/proc/stat b/agent/pkg/go-sysinfo/providers/linux/testdata/fedora30/proc/stat new file mode 100644 index 0000000..afa00df --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/testdata/fedora30/proc/stat @@ -0,0 +1,12 @@ +cpu 31608779 223223 15924288 1134746753 223880 4348430 1995355 266903 0 0 +cpu0 7929502 52284 3994607 283776341 58002 1041339 448737 62980 0 0 +cpu1 7871778 55337 3986872 283642724 50819 1099882 580838 68839 0 0 +cpu2 7951421 68162 4002718 283618789 59074 1071996 526388 65991 0 0 +cpu3 7856076 47438 3940090 283708897 55983 1135211 439391 69093 0 0 +intr 3649757765 110 9 0 0 0 0 0 0 1 0 1493876 32 18 0 0 2918021 0 0 0 0 0 0 0 0 0 0 0 8660478 0 19957110 246 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +ctxt 5607775026 +btime 1571459083 +processes 4891735 +procs_running 1 +procs_blocked 0 +softirq 1934569580 0 876016023 1299 20736488 9710454 0 130 357152191 25831 670927164 diff --git a/agent/pkg/go-sysinfo/providers/linux/testdata/fedora30/usr/lib/fedora-release b/agent/pkg/go-sysinfo/providers/linux/testdata/fedora30/usr/lib/fedora-release new file mode 100644 index 0000000..88caaa8 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/testdata/fedora30/usr/lib/fedora-release @@ -0,0 +1 @@ +Fedora release 30 (Thirty) diff --git a/agent/pkg/go-sysinfo/providers/linux/testdata/fedora30/usr/lib/os-release b/agent/pkg/go-sysinfo/providers/linux/testdata/fedora30/usr/lib/os-release new file mode 100644 index 0000000..8fc7f4a --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/testdata/fedora30/usr/lib/os-release @@ -0,0 +1,21 @@ +NAME=Fedora +VERSION="30 (Container Image)" +ID=fedora +VERSION_ID=30 +VERSION_CODENAME="" +PLATFORM_ID="platform:f30" +PRETTY_NAME="Fedora 30 (Container Image)" +ANSI_COLOR="0;34" +LOGO=fedora-logo-icon +CPE_NAME="cpe:/o:fedoraproject:fedora:30" +HOME_URL="https://fedoraproject.org/" +DOCUMENTATION_URL="https://docs.fedoraproject.org/en-US/fedora/f30/system-administrators-guide/" +SUPPORT_URL="https://fedoraproject.org/wiki/Communicating_and_getting_help" +BUG_REPORT_URL="https://bugzilla.redhat.com/" +REDHAT_BUGZILLA_PRODUCT="Fedora" +REDHAT_BUGZILLA_PRODUCT_VERSION=30 +REDHAT_SUPPORT_PRODUCT="Fedora" +REDHAT_SUPPORT_PRODUCT_VERSION=30 +PRIVACY_POLICY_URL="https://fedoraproject.org/wiki/Legal:PrivacyPolicy" +VARIANT="Container Image" +VARIANT_ID=container diff --git a/agent/pkg/go-sysinfo/providers/linux/testdata/fedora30/usr/lib/redhat-release b/agent/pkg/go-sysinfo/providers/linux/testdata/fedora30/usr/lib/redhat-release new file mode 100644 index 0000000..88caaa8 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/testdata/fedora30/usr/lib/redhat-release @@ -0,0 +1 @@ +Fedora release 30 (Thirty) diff --git a/agent/pkg/go-sysinfo/providers/linux/testdata/fedora30/usr/lib/system-release b/agent/pkg/go-sysinfo/providers/linux/testdata/fedora30/usr/lib/system-release new file mode 100644 index 0000000..88caaa8 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/testdata/fedora30/usr/lib/system-release @@ -0,0 +1 @@ +Fedora release 30 (Thirty) diff --git a/agent/pkg/go-sysinfo/providers/linux/testdata/linuxmint20/etc/lsb-release b/agent/pkg/go-sysinfo/providers/linux/testdata/linuxmint20/etc/lsb-release new file mode 100644 index 0000000..c27d35f --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/testdata/linuxmint20/etc/lsb-release @@ -0,0 +1,4 @@ +DISTRIB_ID=LinuxMint +DISTRIB_RELEASE=20 +DISTRIB_CODENAME=ulyana +DISTRIB_DESCRIPTION="Linux Mint 20 Ulyana" diff --git a/agent/pkg/go-sysinfo/providers/linux/testdata/linuxmint20/etc/os-release b/agent/pkg/go-sysinfo/providers/linux/testdata/linuxmint20/etc/os-release new file mode 100644 index 0000000..c4c75b4 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/testdata/linuxmint20/etc/os-release @@ -0,0 +1 @@ +../usr/lib/os-release \ No newline at end of file diff --git a/agent/pkg/go-sysinfo/providers/linux/testdata/linuxmint20/usr/lib/os-release b/agent/pkg/go-sysinfo/providers/linux/testdata/linuxmint20/usr/lib/os-release new file mode 100644 index 0000000..825d928 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/testdata/linuxmint20/usr/lib/os-release @@ -0,0 +1,12 @@ +NAME="Linux Mint" +VERSION="20 (Ulyana)" +ID=linuxmint +ID_LIKE=ubuntu +PRETTY_NAME="Linux Mint 20" +VERSION_ID="20" +HOME_URL="https://www.linuxmint.com/" +SUPPORT_URL="https://forums.linuxmint.com/" +BUG_REPORT_URL="http://linuxmint-troubleshooting-guide.readthedocs.io/en/latest/" +PRIVACY_POLICY_URL="https://www.linuxmint.com/" +VERSION_CODENAME=ulyana +UBUNTU_CODENAME=focal diff --git a/agent/pkg/go-sysinfo/providers/linux/testdata/manjaro23/etc/arch-release b/agent/pkg/go-sysinfo/providers/linux/testdata/manjaro23/etc/arch-release new file mode 100644 index 0000000..e69de29 diff --git a/agent/pkg/go-sysinfo/providers/linux/testdata/manjaro23/etc/lsb-release b/agent/pkg/go-sysinfo/providers/linux/testdata/manjaro23/etc/lsb-release new file mode 100644 index 0000000..d4222a2 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/testdata/manjaro23/etc/lsb-release @@ -0,0 +1,4 @@ +DISTRIB_ID=Manjaro-ARM +DISTRIB_RELEASE=23.02 +DISTRIB_CODENAME= +DISTRIB_DESCRIPTION="Manjaro ARM Linux" diff --git a/agent/pkg/go-sysinfo/providers/linux/testdata/manjaro23/etc/os-release b/agent/pkg/go-sysinfo/providers/linux/testdata/manjaro23/etc/os-release new file mode 100644 index 0000000..c4c75b4 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/testdata/manjaro23/etc/os-release @@ -0,0 +1 @@ +../usr/lib/os-release \ No newline at end of file diff --git a/agent/pkg/go-sysinfo/providers/linux/testdata/manjaro23/usr/lib/os-release b/agent/pkg/go-sysinfo/providers/linux/testdata/manjaro23/usr/lib/os-release new file mode 100644 index 0000000..a4e7fb6 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/testdata/manjaro23/usr/lib/os-release @@ -0,0 +1,8 @@ +NAME="Manjaro ARM" +ID="manjaro-arm" +ID_LIKE="manjaro arch" +PRETTY_NAME="Manjaro ARM" +ANSI_COLOR="1;32" +HOME_URL="https://www.manjaro.org/" +SUPPORT_URL="https://forum.manjaro.org/c/arm/" +LOGO=manjarolinux diff --git a/agent/pkg/go-sysinfo/providers/linux/testdata/openeuler20.03/etc/openEuler-release b/agent/pkg/go-sysinfo/providers/linux/testdata/openeuler20.03/etc/openEuler-release new file mode 100644 index 0000000..70100dc --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/testdata/openeuler20.03/etc/openEuler-release @@ -0,0 +1 @@ +openEuler release 20.03 (LTS-SP3) diff --git a/agent/pkg/go-sysinfo/providers/linux/testdata/openeuler20.03/etc/os-release b/agent/pkg/go-sysinfo/providers/linux/testdata/openeuler20.03/etc/os-release new file mode 100644 index 0000000..64e8bd1 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/testdata/openeuler20.03/etc/os-release @@ -0,0 +1,7 @@ +NAME="openEuler" +VERSION="20.03 (LTS-SP3)" +ID="openEuler" +VERSION_ID="20.03" +PRETTY_NAME="openEuler 20.03 (LTS-SP3)" +ANSI_COLOR="0;31" + diff --git a/agent/pkg/go-sysinfo/providers/linux/testdata/openeuler20.03/etc/system-release b/agent/pkg/go-sysinfo/providers/linux/testdata/openeuler20.03/etc/system-release new file mode 100644 index 0000000..e4a26ac --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/testdata/openeuler20.03/etc/system-release @@ -0,0 +1 @@ +openEuler-release \ No newline at end of file diff --git a/agent/pkg/go-sysinfo/providers/linux/testdata/opensuse-leap15.4/etc/os-release b/agent/pkg/go-sysinfo/providers/linux/testdata/opensuse-leap15.4/etc/os-release new file mode 100644 index 0000000..c4c75b4 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/testdata/opensuse-leap15.4/etc/os-release @@ -0,0 +1 @@ +../usr/lib/os-release \ No newline at end of file diff --git a/agent/pkg/go-sysinfo/providers/linux/testdata/opensuse-leap15.4/usr/lib/os-release b/agent/pkg/go-sysinfo/providers/linux/testdata/opensuse-leap15.4/usr/lib/os-release new file mode 100644 index 0000000..fa61b75 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/testdata/opensuse-leap15.4/usr/lib/os-release @@ -0,0 +1,12 @@ +NAME="openSUSE Leap" +VERSION="15.4" +ID="opensuse-leap" +ID_LIKE="suse opensuse" +VERSION_ID="15.4" +PRETTY_NAME="openSUSE Leap 15.4" +ANSI_COLOR="0;32" +CPE_NAME="cpe:/o:opensuse:leap:15.4" +BUG_REPORT_URL="https://bugs.opensuse.org" +HOME_URL="https://www.opensuse.org/" +DOCUMENTATION_URL="https://en.opensuse.org/Portal:Leap" +LOGO="distributor-logo-Leap" diff --git a/agent/pkg/go-sysinfo/providers/linux/testdata/opensuse-tumbleweed/etc/os-release b/agent/pkg/go-sysinfo/providers/linux/testdata/opensuse-tumbleweed/etc/os-release new file mode 100644 index 0000000..c4c75b4 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/testdata/opensuse-tumbleweed/etc/os-release @@ -0,0 +1 @@ +../usr/lib/os-release \ No newline at end of file diff --git a/agent/pkg/go-sysinfo/providers/linux/testdata/opensuse-tumbleweed/usr/lib/os-release b/agent/pkg/go-sysinfo/providers/linux/testdata/opensuse-tumbleweed/usr/lib/os-release new file mode 100644 index 0000000..0349bb9 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/testdata/opensuse-tumbleweed/usr/lib/os-release @@ -0,0 +1,12 @@ +NAME="openSUSE Tumbleweed" +# VERSION="20230108" +ID="opensuse-tumbleweed" +ID_LIKE="opensuse suse" +VERSION_ID="20230108" +PRETTY_NAME="openSUSE Tumbleweed" +ANSI_COLOR="0;32" +CPE_NAME="cpe:/o:opensuse:tumbleweed:20230108" +BUG_REPORT_URL="https://bugs.opensuse.org" +HOME_URL="https://www.opensuse.org/" +DOCUMENTATION_URL="https://en.opensuse.org/Portal:Tumbleweed" +LOGO="distributor-logo-Tumbleweed" diff --git a/agent/pkg/go-sysinfo/providers/linux/testdata/oraclelinux7/etc/oracle-release b/agent/pkg/go-sysinfo/providers/linux/testdata/oraclelinux7/etc/oracle-release new file mode 100644 index 0000000..1c41687 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/testdata/oraclelinux7/etc/oracle-release @@ -0,0 +1 @@ +Oracle Linux Server release 7.9 diff --git a/agent/pkg/go-sysinfo/providers/linux/testdata/oraclelinux7/etc/os-release b/agent/pkg/go-sysinfo/providers/linux/testdata/oraclelinux7/etc/os-release new file mode 100644 index 0000000..f8823a8 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/testdata/oraclelinux7/etc/os-release @@ -0,0 +1,17 @@ +NAME="Oracle Linux Server" +VERSION="7.9" +ID="ol" +ID_LIKE="fedora" +VARIANT="Server" +VARIANT_ID="server" +VERSION_ID="7.9" +PRETTY_NAME="Oracle Linux Server 7.9" +ANSI_COLOR="0;31" +CPE_NAME="cpe:/o:oracle:linux:7:9:server" +HOME_URL="https://linux.oracle.com/" +BUG_REPORT_URL="https://bugzilla.oracle.com/" + +ORACLE_BUGZILLA_PRODUCT="Oracle Linux 7" +ORACLE_BUGZILLA_PRODUCT_VERSION=7.9 +ORACLE_SUPPORT_PRODUCT="Oracle Linux" +ORACLE_SUPPORT_PRODUCT_VERSION=7.9 diff --git a/agent/pkg/go-sysinfo/providers/linux/testdata/oraclelinux7/etc/redhat-release b/agent/pkg/go-sysinfo/providers/linux/testdata/oraclelinux7/etc/redhat-release new file mode 100644 index 0000000..db5baab --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/testdata/oraclelinux7/etc/redhat-release @@ -0,0 +1 @@ +Red Hat Enterprise Linux Server release 7.9 (Maipo) diff --git a/agent/pkg/go-sysinfo/providers/linux/testdata/oraclelinux7/etc/system-release b/agent/pkg/go-sysinfo/providers/linux/testdata/oraclelinux7/etc/system-release new file mode 100644 index 0000000..b5cb9bd --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/testdata/oraclelinux7/etc/system-release @@ -0,0 +1 @@ +oracle-release \ No newline at end of file diff --git a/agent/pkg/go-sysinfo/providers/linux/testdata/raspbian9/etc/debian_version b/agent/pkg/go-sysinfo/providers/linux/testdata/raspbian9/etc/debian_version new file mode 100644 index 0000000..0359f24 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/testdata/raspbian9/etc/debian_version @@ -0,0 +1 @@ +9.4 diff --git a/agent/pkg/go-sysinfo/providers/linux/testdata/raspbian9/etc/os-release b/agent/pkg/go-sysinfo/providers/linux/testdata/raspbian9/etc/os-release new file mode 100644 index 0000000..c4c75b4 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/testdata/raspbian9/etc/os-release @@ -0,0 +1 @@ +../usr/lib/os-release \ No newline at end of file diff --git a/agent/pkg/go-sysinfo/providers/linux/testdata/raspbian9/usr/lib/os-release b/agent/pkg/go-sysinfo/providers/linux/testdata/raspbian9/usr/lib/os-release new file mode 100644 index 0000000..4ed9750 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/testdata/raspbian9/usr/lib/os-release @@ -0,0 +1,9 @@ +PRETTY_NAME="Raspbian GNU/Linux 9 (stretch)" +NAME="Raspbian GNU/Linux" +VERSION_ID="9" +VERSION="9 (stretch)" +ID=raspbian +ID_LIKE=debian +HOME_URL="http://www.raspbian.org/" +SUPPORT_URL="http://www.raspbian.org/RaspbianForums" +BUG_REPORT_URL="http://www.raspbian.org/RaspbianBugs" diff --git a/agent/pkg/go-sysinfo/providers/linux/testdata/redhat7/etc/os-release b/agent/pkg/go-sysinfo/providers/linux/testdata/redhat7/etc/os-release new file mode 100644 index 0000000..7bdc744 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/testdata/redhat7/etc/os-release @@ -0,0 +1,17 @@ +NAME="Red Hat Enterprise Linux Server" +VERSION="7.6 (Maipo)" +ID="rhel" +ID_LIKE="fedora" +VARIANT="Server" +VARIANT_ID="server" +VERSION_ID="7.6" +PRETTY_NAME="Red Hat Enterprise Linux Server 7.6 (Maipo)" +ANSI_COLOR="0;31" +CPE_NAME="cpe:/o:redhat:enterprise_linux:7.6:GA:server" +HOME_URL="https://www.redhat.com/" +BUG_REPORT_URL="https://bugzilla.redhat.com/" + +REDHAT_BUGZILLA_PRODUCT="Red Hat Enterprise Linux 7" +REDHAT_BUGZILLA_PRODUCT_VERSION=7.6 +REDHAT_SUPPORT_PRODUCT="Red Hat Enterprise Linux" +REDHAT_SUPPORT_PRODUCT_VERSION="7.6" diff --git a/agent/pkg/go-sysinfo/providers/linux/testdata/redhat7/etc/redhat-release b/agent/pkg/go-sysinfo/providers/linux/testdata/redhat7/etc/redhat-release new file mode 100644 index 0000000..1d50bfc --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/testdata/redhat7/etc/redhat-release @@ -0,0 +1 @@ +Red Hat Enterprise Linux Server release 7.6 (Maipo) diff --git a/agent/pkg/go-sysinfo/providers/linux/testdata/redhat7/etc/system-release b/agent/pkg/go-sysinfo/providers/linux/testdata/redhat7/etc/system-release new file mode 100644 index 0000000..428ee09 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/testdata/redhat7/etc/system-release @@ -0,0 +1 @@ +redhat-release \ No newline at end of file diff --git a/agent/pkg/go-sysinfo/providers/linux/testdata/redhat9/etc/os-release b/agent/pkg/go-sysinfo/providers/linux/testdata/redhat9/etc/os-release new file mode 100644 index 0000000..c4c75b4 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/testdata/redhat9/etc/os-release @@ -0,0 +1 @@ +../usr/lib/os-release \ No newline at end of file diff --git a/agent/pkg/go-sysinfo/providers/linux/testdata/redhat9/etc/redhat-release b/agent/pkg/go-sysinfo/providers/linux/testdata/redhat9/etc/redhat-release new file mode 100644 index 0000000..0ea0a54 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/testdata/redhat9/etc/redhat-release @@ -0,0 +1 @@ +Red Hat Enterprise Linux release 9.0 (Plow) diff --git a/agent/pkg/go-sysinfo/providers/linux/testdata/redhat9/etc/system-release b/agent/pkg/go-sysinfo/providers/linux/testdata/redhat9/etc/system-release new file mode 100644 index 0000000..428ee09 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/testdata/redhat9/etc/system-release @@ -0,0 +1 @@ +redhat-release \ No newline at end of file diff --git a/agent/pkg/go-sysinfo/providers/linux/testdata/redhat9/usr/lib/os-release b/agent/pkg/go-sysinfo/providers/linux/testdata/redhat9/usr/lib/os-release new file mode 100644 index 0000000..9911d3d --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/testdata/redhat9/usr/lib/os-release @@ -0,0 +1,18 @@ +NAME="Red Hat Enterprise Linux" +VERSION="9.0 (Plow)" +ID="rhel" +ID_LIKE="fedora" +VERSION_ID="9.0" +PLATFORM_ID="platform:el9" +PRETTY_NAME="Red Hat Enterprise Linux 9.0 (Plow)" +ANSI_COLOR="0;31" +LOGO="fedora-logo-icon" +CPE_NAME="cpe:/o:redhat:enterprise_linux:9::baseos" +HOME_URL="https://www.redhat.com/" +DOCUMENTATION_URL="https://access.redhat.com/documentation/red_hat_enterprise_linux/9/" +BUG_REPORT_URL="https://bugzilla.redhat.com/" + +REDHAT_BUGZILLA_PRODUCT="Red Hat Enterprise Linux 9" +REDHAT_BUGZILLA_PRODUCT_VERSION=9.0 +REDHAT_SUPPORT_PRODUCT="Red Hat Enterprise Linux" +REDHAT_SUPPORT_PRODUCT_VERSION="9.0" diff --git a/agent/pkg/go-sysinfo/providers/linux/testdata/rockylinux9/etc/os-release b/agent/pkg/go-sysinfo/providers/linux/testdata/rockylinux9/etc/os-release new file mode 100644 index 0000000..c4c75b4 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/testdata/rockylinux9/etc/os-release @@ -0,0 +1 @@ +../usr/lib/os-release \ No newline at end of file diff --git a/agent/pkg/go-sysinfo/providers/linux/testdata/rockylinux9/etc/redhat-release b/agent/pkg/go-sysinfo/providers/linux/testdata/rockylinux9/etc/redhat-release new file mode 100644 index 0000000..780b78a --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/testdata/rockylinux9/etc/redhat-release @@ -0,0 +1 @@ +rocky-release \ No newline at end of file diff --git a/agent/pkg/go-sysinfo/providers/linux/testdata/rockylinux9/etc/rocky-release b/agent/pkg/go-sysinfo/providers/linux/testdata/rockylinux9/etc/rocky-release new file mode 100644 index 0000000..a66b297 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/testdata/rockylinux9/etc/rocky-release @@ -0,0 +1 @@ +Rocky Linux release 9.0 (Blue Onyx) diff --git a/agent/pkg/go-sysinfo/providers/linux/testdata/rockylinux9/etc/system-release b/agent/pkg/go-sysinfo/providers/linux/testdata/rockylinux9/etc/system-release new file mode 100644 index 0000000..780b78a --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/testdata/rockylinux9/etc/system-release @@ -0,0 +1 @@ +rocky-release \ No newline at end of file diff --git a/agent/pkg/go-sysinfo/providers/linux/testdata/rockylinux9/usr/lib/os-release b/agent/pkg/go-sysinfo/providers/linux/testdata/rockylinux9/usr/lib/os-release new file mode 100644 index 0000000..9d1eb4a --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/testdata/rockylinux9/usr/lib/os-release @@ -0,0 +1,16 @@ +NAME="Rocky Linux" +VERSION="9.0 (Blue Onyx)" +ID="rocky" +ID_LIKE="rhel centos fedora" +VERSION_ID="9.0" +PLATFORM_ID="platform:el9" +PRETTY_NAME="Rocky Linux 9.0 (Blue Onyx)" +ANSI_COLOR="0;32" +LOGO="fedora-logo-icon" +CPE_NAME="cpe:/o:rocky:rocky:9::baseos" +HOME_URL="https://rockylinux.org/" +BUG_REPORT_URL="https://bugs.rockylinux.org/" +ROCKY_SUPPORT_PRODUCT="Rocky-Linux-9" +ROCKY_SUPPORT_PRODUCT_VERSION="9.0" +REDHAT_SUPPORT_PRODUCT="Rocky Linux" +REDHAT_SUPPORT_PRODUCT_VERSION="9.0" diff --git a/agent/pkg/go-sysinfo/providers/linux/testdata/ubuntu1204/etc/lsb-release b/agent/pkg/go-sysinfo/providers/linux/testdata/ubuntu1204/etc/lsb-release new file mode 100644 index 0000000..83278be --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/testdata/ubuntu1204/etc/lsb-release @@ -0,0 +1,4 @@ +DISTRIB_ID=Ubuntu +DISTRIB_RELEASE=12.04 +DISTRIB_CODENAME=precise +DISTRIB_DESCRIPTION="Ubuntu 12.04.5 LTS" diff --git a/agent/pkg/go-sysinfo/providers/linux/testdata/ubuntu1204/etc/os-release b/agent/pkg/go-sysinfo/providers/linux/testdata/ubuntu1204/etc/os-release new file mode 100644 index 0000000..a9f7fcc --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/testdata/ubuntu1204/etc/os-release @@ -0,0 +1,6 @@ +NAME="Ubuntu" +VERSION="12.04.5 LTS, Precise Pangolin" +ID=ubuntu +ID_LIKE=debian +PRETTY_NAME="Ubuntu precise (12.04.5 LTS)" +VERSION_ID="12.04" diff --git a/agent/pkg/go-sysinfo/providers/linux/testdata/ubuntu1404/etc/debian_version b/agent/pkg/go-sysinfo/providers/linux/testdata/ubuntu1404/etc/debian_version new file mode 100644 index 0000000..9a5939c --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/testdata/ubuntu1404/etc/debian_version @@ -0,0 +1 @@ +jessie/sid diff --git a/agent/pkg/go-sysinfo/providers/linux/testdata/ubuntu1404/etc/lsb-release b/agent/pkg/go-sysinfo/providers/linux/testdata/ubuntu1404/etc/lsb-release new file mode 100644 index 0000000..be87e06 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/testdata/ubuntu1404/etc/lsb-release @@ -0,0 +1,4 @@ +DISTRIB_ID=Ubuntu +DISTRIB_RELEASE=14.04 +DISTRIB_CODENAME=trusty +DISTRIB_DESCRIPTION="Ubuntu 14.04.5 LTS" diff --git a/agent/pkg/go-sysinfo/providers/linux/testdata/ubuntu1404/etc/os-release b/agent/pkg/go-sysinfo/providers/linux/testdata/ubuntu1404/etc/os-release new file mode 100644 index 0000000..fa4c4a3 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/testdata/ubuntu1404/etc/os-release @@ -0,0 +1,9 @@ +NAME="Ubuntu" +VERSION="14.04.5 LTS, Trusty Tahr" +ID=ubuntu +ID_LIKE=debian +PRETTY_NAME="Ubuntu 14.04.5 LTS" +VERSION_ID="14.04" +HOME_URL="http://www.ubuntu.com/" +SUPPORT_URL="http://help.ubuntu.com/" +BUG_REPORT_URL="http://bugs.launchpad.net/ubuntu/" diff --git a/agent/pkg/go-sysinfo/providers/linux/testdata/ubuntu1710/etc/debian_version b/agent/pkg/go-sysinfo/providers/linux/testdata/ubuntu1710/etc/debian_version new file mode 100644 index 0000000..b0b57ed --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/testdata/ubuntu1710/etc/debian_version @@ -0,0 +1 @@ +stretch/sid diff --git a/agent/pkg/go-sysinfo/providers/linux/testdata/ubuntu1710/etc/lsb-release b/agent/pkg/go-sysinfo/providers/linux/testdata/ubuntu1710/etc/lsb-release new file mode 100644 index 0000000..b27e1b3 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/testdata/ubuntu1710/etc/lsb-release @@ -0,0 +1,4 @@ +DISTRIB_ID=Ubuntu +DISTRIB_RELEASE=17.10 +DISTRIB_CODENAME=artful +DISTRIB_DESCRIPTION="Ubuntu 17.10" diff --git a/agent/pkg/go-sysinfo/providers/linux/testdata/ubuntu1710/etc/os-release b/agent/pkg/go-sysinfo/providers/linux/testdata/ubuntu1710/etc/os-release new file mode 100644 index 0000000..c4c75b4 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/testdata/ubuntu1710/etc/os-release @@ -0,0 +1 @@ +../usr/lib/os-release \ No newline at end of file diff --git a/agent/pkg/go-sysinfo/providers/linux/testdata/ubuntu1710/proc/loadavg b/agent/pkg/go-sysinfo/providers/linux/testdata/ubuntu1710/proc/loadavg new file mode 100644 index 0000000..81ad54e --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/testdata/ubuntu1710/proc/loadavg @@ -0,0 +1 @@ +1.55 1.74 1.73 2/1318 46377 diff --git a/agent/pkg/go-sysinfo/providers/linux/testdata/ubuntu1710/proc/meminfo b/agent/pkg/go-sysinfo/providers/linux/testdata/ubuntu1710/proc/meminfo new file mode 100644 index 0000000..4340935 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/testdata/ubuntu1710/proc/meminfo @@ -0,0 +1,45 @@ +MemTotal: 4042048 kB +MemFree: 2551468 kB +MemAvailable: 3651940 kB +Buffers: 33316 kB +Cached: 1248984 kB +SwapCached: 0 kB +Active: 210556 kB +Inactive: 1181880 kB +Active(anon): 111080 kB +Inactive(anon): 660 kB +Active(file): 99476 kB +Inactive(file): 1181220 kB +Unevictable: 0 kB +Mlocked: 0 kB +SwapTotal: 1048572 kB +SwapFree: 1048572 kB +Dirty: 0 kB +Writeback: 0 kB +AnonPages: 110216 kB +Mapped: 91028 kB +Shmem: 1536 kB +Slab: 68708 kB +SReclaimable: 49852 kB +SUnreclaim: 18856 kB +KernelStack: 5632 kB +PageTables: 1652 kB +NFS_Unstable: 0 kB +Bounce: 0 kB +WritebackTmp: 0 kB +CommitLimit: 3069596 kB +Committed_AS: 418744 kB +VmallocTotal: 34359738367 kB +VmallocUsed: 0 kB +VmallocChunk: 0 kB +AnonHugePages: 0 kB +ShmemHugePages: 0 kB +ShmemPmdMapped: 0 kB +HugePages_Total: 0 +HugePages_Free: 0 +HugePages_Rsvd: 0 +HugePages_Surp: 0 +Hugepagesize: 2048 kB +DirectMap4k: 32172 kB +DirectMap2M: 3112960 kB +DirectMap1G: 3145728 kB diff --git a/agent/pkg/go-sysinfo/providers/linux/testdata/ubuntu1710/proc/stat b/agent/pkg/go-sysinfo/providers/linux/testdata/ubuntu1710/proc/stat new file mode 100644 index 0000000..89d6c94 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/testdata/ubuntu1710/proc/stat @@ -0,0 +1,12 @@ +cpu 2277627 0 1464379 1303119903 200372 0 27401 0 0 0 +cpu0 593545 0 431398 325286468 50343 0 14435 0 0 0 +cpu1 552949 0 341664 325964281 47304 0 5468 0 0 0 +cpu2 568758 0 359758 325909025 49470 0 3791 0 0 0 +cpu3 562375 0 331559 325960129 53255 0 3707 0 0 0 +intr 421434840 27 0 0 0 1100 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 22085 3951831 3272 52 0 2035574 1 0 236 0 25512135 27798885 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +ctxt 888692448 +btime 1518838073 +processes 754326 +procs_running 3 +procs_blocked 0 +softirq 162361882 0 61330255 3576 5818933 3941332 0 1 60746100 0 30521685 diff --git a/agent/pkg/go-sysinfo/providers/linux/testdata/ubuntu1710/proc/vmstat b/agent/pkg/go-sysinfo/providers/linux/testdata/ubuntu1710/proc/vmstat new file mode 100644 index 0000000..7ff1d85 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/testdata/ubuntu1710/proc/vmstat @@ -0,0 +1,133 @@ +nr_free_pages 50545 +nr_zone_inactive_anon 66 +nr_zone_active_anon 26799 +nr_zone_inactive_file 31849 +nr_zone_active_file 94164 +nr_zone_unevictable 0 +nr_zone_write_pending 7 +nr_mlock 0 +nr_page_table_pages 1225 +nr_kernel_stack 2496 +nr_bounce 0 +nr_zspages 0 +nr_free_cma 0 +numa_hit 44470329 +numa_miss 0 +numa_foreign 0 +numa_interleave 16296 +numa_local 44470329 +numa_other 0 +nr_inactive_anon 66 +nr_active_anon 26799 +nr_inactive_file 31849 +nr_active_file 94164 +nr_unevictable 0 +nr_slab_reclaimable 31763 +nr_slab_unreclaimable 10329 +nr_isolated_anon 0 +nr_isolated_file 0 +workingset_refault 302914 +workingset_activate 108959 +workingset_nodereclaim 6422 +nr_anon_pages 26218 +nr_mapped 8641 +nr_file_pages 126182 +nr_dirty 7 +nr_writeback 0 +nr_writeback_temp 0 +nr_shmem 169 +nr_shmem_hugepages 0 +nr_shmem_pmdmapped 0 +nr_anon_transparent_hugepages 0 +nr_unstable 0 +nr_vmscan_write 35 +nr_vmscan_immediate_reclaim 9832 +nr_dirtied 7188920 +nr_written 6479005 +nr_dirty_threshold 31736 +nr_dirty_background_threshold 15848 +pgpgin 17010697 +pgpgout 27734292 +pswpin 0 +pswpout 0 +pgalloc_dma 241378 +pgalloc_dma32 45788683 +pgalloc_normal 0 +pgalloc_movable 0 +allocstall_dma 0 +allocstall_dma32 0 +allocstall_normal 5 +allocstall_movable 8 +pgskip_dma 0 +pgskip_dma32 0 +pgskip_normal 0 +pgskip_movable 0 +pgfree 46085578 +pgactivate 2475069 +pgdeactivate 636658 +pglazyfree 9426 +pgfault 46777498 +pgmajfault 19204 +pglazyfreed 0 +pgrefill 707817 +pgsteal_kswapd 3798890 +pgsteal_direct 1466 +pgscan_kswapd 3868525 +pgscan_direct 1483 +pgscan_direct_throttle 0 +zone_reclaim_failed 0 +pginodesteal 1710 +slabs_scanned 8348560 +kswapd_inodesteal 3142001 +kswapd_low_wmark_hit_quickly 541 +kswapd_high_wmark_hit_quickly 332 +pageoutrun 1492 +pgrotated 29725 +drop_pagecache 0 +drop_slab 0 +oom_kill 0 +numa_pte_updates 0 +numa_huge_pte_updates 0 +numa_hint_faults 0 +numa_hint_faults_local 0 +numa_pages_migrated 0 +pgmigrate_success 4539 +pgmigrate_fail 156 +compact_migrate_scanned 9331 +compact_free_scanned 136266 +compact_isolated 9407 +compact_stall 2 +compact_fail 0 +compact_success 2 +compact_daemon_wake 21 +compact_daemon_migrate_scanned 8311 +compact_daemon_free_scanned 107086 +htlb_buddy_alloc_success 0 +htlb_buddy_alloc_fail 0 +unevictable_pgs_culled 19 +unevictable_pgs_scanned 0 +unevictable_pgs_rescued 304 +unevictable_pgs_mlocked 304 +unevictable_pgs_munlocked 304 +unevictable_pgs_cleared 0 +unevictable_pgs_stranded 0 +thp_fault_alloc 2 +thp_fault_fallback 0 +thp_collapse_alloc 2 +thp_collapse_alloc_failed 0 +thp_file_alloc 0 +thp_file_mapped 0 +thp_split_page 0 +thp_split_page_failed 0 +thp_deferred_split_page 4 +thp_split_pmd 1 +thp_split_pud 0 +thp_zero_page_alloc 0 +thp_zero_page_alloc_failed 0 +thp_swpout 0 +thp_swpout_fallback 0 +balloon_inflate 0 +balloon_deflate 0 +balloon_migrate 0 +swap_ra 0 +swap_ra_hit 0 \ No newline at end of file diff --git a/agent/pkg/go-sysinfo/providers/linux/testdata/ubuntu1710/usr/lib/os-release b/agent/pkg/go-sysinfo/providers/linux/testdata/ubuntu1710/usr/lib/os-release new file mode 100644 index 0000000..2f1d60f --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/testdata/ubuntu1710/usr/lib/os-release @@ -0,0 +1,12 @@ +NAME="Ubuntu" +VERSION="17.10 (Artful Aardvark)" +ID=ubuntu +ID_LIKE=debian +PRETTY_NAME="Ubuntu 17.10" +VERSION_ID="17.10" +HOME_URL="https://www.ubuntu.com/" +SUPPORT_URL="https://help.ubuntu.com/" +BUG_REPORT_URL="https://bugs.launchpad.net/ubuntu/" +PRIVACY_POLICY_URL="https://www.ubuntu.com/legal/terms-and-policies/privacy-policy" +VERSION_CODENAME=artful +UBUNTU_CODENAME=artful diff --git a/agent/pkg/go-sysinfo/providers/linux/testdata/ubuntu2204/etc/lsb-release b/agent/pkg/go-sysinfo/providers/linux/testdata/ubuntu2204/etc/lsb-release new file mode 100644 index 0000000..87045df --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/testdata/ubuntu2204/etc/lsb-release @@ -0,0 +1,4 @@ +DISTRIB_ID=Ubuntu +DISTRIB_RELEASE=22.04 +DISTRIB_CODENAME=jammy +DISTRIB_DESCRIPTION="Ubuntu 22.04 LTS" diff --git a/agent/pkg/go-sysinfo/providers/linux/testdata/ubuntu2204/etc/os-release b/agent/pkg/go-sysinfo/providers/linux/testdata/ubuntu2204/etc/os-release new file mode 100644 index 0000000..c4c75b4 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/testdata/ubuntu2204/etc/os-release @@ -0,0 +1 @@ +../usr/lib/os-release \ No newline at end of file diff --git a/agent/pkg/go-sysinfo/providers/linux/testdata/ubuntu2204/usr/lib/os-release b/agent/pkg/go-sysinfo/providers/linux/testdata/ubuntu2204/usr/lib/os-release new file mode 100644 index 0000000..9a95bef --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/testdata/ubuntu2204/usr/lib/os-release @@ -0,0 +1,12 @@ +PRETTY_NAME="Ubuntu 22.04 LTS" +NAME="Ubuntu" +VERSION_ID="22.04" +VERSION="22.04 LTS (Jammy Jellyfish)" +VERSION_CODENAME=jammy +ID=ubuntu +ID_LIKE=debian +HOME_URL="https://www.ubuntu.com/" +SUPPORT_URL="https://help.ubuntu.com/" +BUG_REPORT_URL="https://bugs.launchpad.net/ubuntu/" +PRIVACY_POLICY_URL="https://www.ubuntu.com/legal/terms-and-policies/privacy-policy" +UBUNTU_CODENAME=jammy diff --git a/agent/pkg/go-sysinfo/providers/linux/util.go b/agent/pkg/go-sysinfo/providers/linux/util.go new file mode 100644 index 0000000..8d9c27d --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/util.go @@ -0,0 +1,119 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package linux + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io/ioutil" + "strconv" +) + +// parseKeyValue parses key/val pairs separated by the provided separator from +// each line in content and invokes the callback. White-space is trimmed from +// val. Empty lines are ignored. All non-empty lines must contain the separator +// otherwise an error is returned. +func parseKeyValue(content []byte, separator byte, callback func(key, value []byte) error) error { + var line []byte + + for len(content) > 0 { + line, content, _ = bytes.Cut(content, []byte{'\n'}) + if len(line) == 0 { + continue + } + + key, value, ok := bytes.Cut(line, []byte{separator}) + if !ok { + return fmt.Errorf("separator %q not found", separator) + } + + callback(key, bytes.TrimSpace(value)) + } + + return nil +} + +func findValue(filename, separator, key string) (string, error) { + content, err := ioutil.ReadFile(filename) + if err != nil { + return "", err + } + + var line []byte + sc := bufio.NewScanner(bytes.NewReader(content)) + for sc.Scan() { + if bytes.HasPrefix(sc.Bytes(), []byte(key)) { + line = sc.Bytes() + break + } + } + if len(line) == 0 { + return "", fmt.Errorf("%v not found", key) + } + + parts := bytes.SplitN(line, []byte(separator), 2) + if len(parts) != 2 { + return "", fmt.Errorf("unexpected line format for '%v'", string(line)) + } + + return string(bytes.TrimSpace(parts[1])), nil +} + +func decodeBitMap(s string, lookupName func(int) string) ([]string, error) { + mask, err := strconv.ParseUint(s, 16, 64) + if err != nil { + return nil, err + } + + var names []string + for i := 0; i < 64; i++ { + bit := mask & (1 << uint(i)) + if bit > 0 { + names = append(names, lookupName(i)) + } + } + + return names, nil +} + +func parseBytesOrNumber(data []byte) (uint64, error) { + parts := bytes.Fields(data) + + if len(parts) == 0 { + return 0, errors.New("empty value") + } + + num, err := strconv.ParseUint(string(parts[0]), 10, 64) + if err != nil { + return 0, fmt.Errorf("failed to parse value: %w", err) + } + + var multiplier uint64 = 1 + if len(parts) >= 2 { + switch string(parts[1]) { + case "kB": + multiplier = 1024 + default: + return 0, fmt.Errorf("unhandled unit %v", string(parts[1])) + } + } + + return num * multiplier, nil +} diff --git a/agent/pkg/go-sysinfo/providers/linux/util_test.go b/agent/pkg/go-sysinfo/providers/linux/util_test.go new file mode 100644 index 0000000..388ec8b --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/util_test.go @@ -0,0 +1,162 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package linux + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestParseKeyValueNoEOL(t *testing.T) { + vals := [][2]string{} + err := parseKeyValue([]byte( + "Name: zsh\nUmask: 0022\nState: S (sleeping)\nUid: 1000 1000 1000 1000", + ), ':', func(key, value []byte) error { + vals = append(vals, [2]string{string(key), string(value)}) + return nil + }) + assert.NoError(t, err) + + assert.Equal(t, [][2]string{ + {"Name", "zsh"}, + {"Umask", "0022"}, + {"State", "S (sleeping)"}, + {"Uid", "1000\t1000\t1000\t1000"}, + }, vals) +} + +func TestParseKeyValueEmptyLine(t *testing.T) { + vals := [][2]string{} + err := parseKeyValue([]byte( + "Name: zsh\nUmask: 0022\nState: S (sleeping)\n\nUid: 1000 1000 1000 1000", + ), ':', func(key, value []byte) error { + vals = append(vals, [2]string{string(key), string(value)}) + return nil + }) + assert.NoError(t, err) + + assert.Equal(t, [][2]string{ + {"Name", "zsh"}, + {"Umask", "0022"}, + {"State", "S (sleeping)"}, + {"Uid", "1000\t1000\t1000\t1000"}, + }, vals) +} + +func TestParseKeyValueEOL(t *testing.T) { + vals := [][2]string{} + err := parseKeyValue([]byte( + "Name: zsh\nUmask: 0022\nState: S (sleeping)\nUid: 1000 1000 1000 1000\n", + ), ':', func(key, value []byte) error { + vals = append(vals, [2]string{string(key), string(value)}) + return nil + }) + assert.NoError(t, err) + + assert.Equal(t, [][2]string{ + {"Name", "zsh"}, + {"Umask", "0022"}, + {"State", "S (sleeping)"}, + {"Uid", "1000\t1000\t1000\t1000"}, + }, vals) +} + +// from cat /proc/$$/status +var testProcStatus = []byte(`Name: zsh +Umask: 0022 +State: S (sleeping) +Tgid: 4023363 +Ngid: 0 +Pid: 4023363 +PPid: 4023357 +TracerPid: 0 +Uid: 1000 1000 1000 1000 +Gid: 1000 1000 1000 1000 +FDSize: 64 +Groups: 24 25 27 29 30 44 46 102 109 112 116 119 131 998 1000 +NStgid: 4023363 +NSpid: 4023363 +NSpgid: 4023363 +NSsid: 4023363 +VmPeak: 15596 kB +VmSize: 15144 kB +VmLck: 0 kB +VmPin: 0 kB +VmHWM: 9060 kB +VmRSS: 8716 kB +RssAnon: 3828 kB +RssFile: 4888 kB +RssShmem: 0 kB +VmData: 3500 kB +VmStk: 328 kB +VmExe: 600 kB +VmLib: 2676 kB +VmPTE: 68 kB +VmSwap: 0 kB +HugetlbPages: 0 kB +CoreDumping: 0 +THP_enabled: 1 +Threads: 1 +SigQ: 0/126683 +SigPnd: 0000000000000000 +ShdPnd: 0000000000000000 +SigBlk: 0000000000000002 +SigIgn: 0000000000384000 +SigCgt: 0000000008013003 +CapInh: 0000000000000000 +CapPrm: 0000000000000000 +CapEff: 0000000000000000 +CapBnd: 000001ffffffffff +CapAmb: 0000000000000000 +NoNewPrivs: 0 +Seccomp: 0 +Seccomp_filters: 0 +Speculation_Store_Bypass: thread vulnerable +Cpus_allowed: fff +Cpus_allowed_list: 0-11 +Mems_allowed: 00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000001 +Mems_allowed_list: 0 +voluntary_ctxt_switches: 223 +nonvoluntary_ctxt_switches: 25 +`) + +func BenchmarkParseKeyValue(b *testing.B) { + for i := 0; i < b.N; i++ { + _ = parseKeyValue(testProcStatus, ':', func(key, value []byte) error { + return nil + }) + } +} + +func FuzzParseKeyValue(f *testing.F) { + testcases := []string{ + "no_separator", + "no_value:", + "empty_value: ", + "normal: 223", + } + for _, tc := range testcases { + f.Add(tc) + } + f.Fuzz(func(t *testing.T, orig string) { + _ = parseKeyValue([]byte(orig), ':', func(key, value []byte) error { + return nil + }) + }) +} diff --git a/agent/pkg/go-sysinfo/providers/linux/vmstat.go b/agent/pkg/go-sysinfo/providers/linux/vmstat.go new file mode 100644 index 0000000..9110e23 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/vmstat.go @@ -0,0 +1,69 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package linux + +import ( + "fmt" + "reflect" + + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/go-sysinfo/types" +) + +// vmstatTagToFieldIndex contains a mapping of json struct tags to struct field indices. +var vmstatTagToFieldIndex = make(map[string]int) + +func init() { + var vmstat types.VMStatInfo + val := reflect.ValueOf(vmstat) + typ := reflect.TypeOf(vmstat) + + for i := 0; i < val.NumField(); i++ { + field := typ.Field(i) + if tag := field.Tag.Get("json"); tag != "" { + vmstatTagToFieldIndex[tag] = i + } + } +} + +// parseVMStat parses the contents of /proc/vmstat. +func parseVMStat(content []byte) (*types.VMStatInfo, error) { + var vmStat types.VMStatInfo + refValues := reflect.ValueOf(&vmStat).Elem() + + err := parseKeyValue(content, ' ', func(key, value []byte) error { + // turn our []byte value into an int + val, err := parseBytesOrNumber(value) + if err != nil { + return fmt.Errorf("failed to parse %v value of %v: %w", string(key), string(value), err) + } + + idx, ok := vmstatTagToFieldIndex[string(key)] + if !ok { + return nil + } + + sval := refValues.Field(idx) + + if sval.CanSet() { + sval.SetUint(val) + } + return nil + }) + + return &vmStat, err +} diff --git a/agent/pkg/go-sysinfo/providers/linux/vmstat_test.go b/agent/pkg/go-sysinfo/providers/linux/vmstat_test.go new file mode 100644 index 0000000..1e44cb9 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/linux/vmstat_test.go @@ -0,0 +1,171 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package linux + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +var rawInput = ` +nr_free_pages 50545 +nr_zone_inactive_anon 66 +nr_zone_active_anon 26799 +nr_zone_inactive_file 31849 +nr_zone_active_file 94164 +nr_zone_unevictable 0 +nr_zone_write_pending 7 +nr_mlock 0 +nr_page_table_pages 1225 +nr_kernel_stack 2496 +nr_bounce 0 +nr_zspages 0 +nr_free_cma 0 +numa_hit 44470329 +numa_miss 0 +numa_foreign 0 +numa_interleave 16296 +numa_local 44470329 +numa_other 0 +nr_inactive_anon 66 +nr_active_anon 26799 +nr_inactive_file 31849 +nr_active_file 94164 +nr_unevictable 0 +nr_slab_reclaimable 31763 +nr_slab_unreclaimable 10329 +nr_isolated_anon 0 +nr_isolated_file 0 +workingset_refault 302914 +workingset_activate 108959 +workingset_nodereclaim 6422 +nr_anon_pages 26218 +nr_mapped 8641 +nr_file_pages 126182 +nr_dirty 7 +nr_writeback 0 +nr_writeback_temp 0 +nr_shmem 169 +nr_shmem_hugepages 0 +nr_shmem_pmdmapped 0 +nr_anon_transparent_hugepages 0 +nr_unstable 0 +nr_vmscan_write 35 +nr_vmscan_immediate_reclaim 9832 +nr_dirtied 7188920 +nr_written 6479005 +nr_dirty_threshold 31736 +nr_dirty_background_threshold 15848 +pgpgin 17010697 +pgpgout 27734292 +pswpin 0 +pswpout 0 +pgalloc_dma 241378 +pgalloc_dma32 45788683 +pgalloc_normal 0 +pgalloc_movable 0 +allocstall_dma 0 +allocstall_dma32 0 +allocstall_normal 5 +allocstall_movable 8 +pgskip_dma 0 +pgskip_dma32 0 +pgskip_normal 0 +pgskip_movable 0 +pgfree 46085578 +pgactivate 2475069 +pgdeactivate 636658 +pglazyfree 9426 +pgfault 46777498 +pgmajfault 19204 +pglazyfreed 0 +pgrefill 707817 +pgsteal_kswapd 3798890 +pgsteal_direct 1466 +pgscan_kswapd 3868525 +pgscan_direct 1483 +pgscan_direct_throttle 0 +zone_reclaim_failed 0 +pginodesteal 1710 +slabs_scanned 8348560 +kswapd_inodesteal 3142001 +kswapd_low_wmark_hit_quickly 541 +kswapd_high_wmark_hit_quickly 332 +pageoutrun 1492 +pgrotated 29725 +drop_pagecache 0 +drop_slab 0 +oom_kill 0 +numa_pte_updates 0 +numa_huge_pte_updates 0 +numa_hint_faults 0 +numa_hint_faults_local 0 +numa_pages_migrated 0 +pgmigrate_success 4539 +pgmigrate_fail 156 +compact_migrate_scanned 9331 +compact_free_scanned 136266 +compact_isolated 9407 +compact_stall 2 +compact_fail 0 +compact_success 2 +compact_daemon_wake 21 +compact_daemon_migrate_scanned 8311 +compact_daemon_free_scanned 107086 +htlb_buddy_alloc_success 0 +htlb_buddy_alloc_fail 0 +unevictable_pgs_culled 19 +unevictable_pgs_scanned 0 +unevictable_pgs_rescued 304 +unevictable_pgs_mlocked 304 +unevictable_pgs_munlocked 304 +unevictable_pgs_cleared 0 +unevictable_pgs_stranded 0 +thp_fault_alloc 2 +thp_fault_fallback 0 +thp_collapse_alloc 2 +thp_collapse_alloc_failed 0 +thp_file_alloc 0 +thp_file_mapped 0 +thp_split_page 0 +thp_split_page_failed 0 +thp_deferred_split_page 4 +thp_split_pmd 1 +thp_split_pud 0 +thp_zero_page_alloc 0 +thp_zero_page_alloc_failed 0 +thp_swpout 0 +thp_swpout_fallback 0 +balloon_inflate 0 +balloon_deflate 0 +balloon_migrate 0 +swap_ra 0 +swap_ra_hit 0 +` + +func TestVmStatParse(t *testing.T) { + data, err := parseVMStat([]byte(rawInput)) + if err != nil { + t.Fatal(err) + } + // Check a few values + assert.Equal(t, uint64(8348560), data.SlabsScanned) + assert.Equal(t, uint64(0), data.SwapRa) + assert.Equal(t, uint64(108959), data.WorkingsetActivate) +} diff --git a/agent/pkg/go-sysinfo/providers/shared/fqdn.go b/agent/pkg/go-sysinfo/providers/shared/fqdn.go new file mode 100644 index 0000000..8cba7bc --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/shared/fqdn.go @@ -0,0 +1,77 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build linux || darwin || aix + +package shared + +import ( + "fmt" + "net" + "os" + "strings" +) + +// FQDN attempts to lookup the host's fully-qualified domain name and returns it. +// It does so using the following algorithm: +// +// 1. It gets the hostname from the OS. If this step fails, it returns an error. +// +// 2. It tries to perform a CNAME DNS lookup for the hostname. If this succeeds, it +// returns the CNAME (after trimming any trailing period) as the FQDN. +// +// 3. It tries to perform an IP lookup for the hostname. If this succeeds, it tries +// to perform a reverse DNS lookup on the returned IPs and returns the first +// successful result (after trimming any trailing period) as the FQDN. +// +// 4. If steps 2 and 3 both fail, an empty string is returned as the FQDN along with +// errors from those steps. +func FQDN() (string, error) { + hostname, err := os.Hostname() + if err != nil { + return "", fmt.Errorf("could not get hostname to look for FQDN: %w", err) + } + + return fqdn(hostname) +} + +func fqdn(hostname string) (string, error) { + var errs error + cname, err := net.LookupCNAME(hostname) + if err != nil { + errs = fmt.Errorf("could not get FQDN, all methods failed: failed looking up CNAME: %w", + err) + } + if cname != "" { + return strings.ToLower(strings.TrimSuffix(cname, ".")), nil + } + + ips, err := net.LookupIP(hostname) + if err != nil { + errs = fmt.Errorf("%s: failed looking up IP: %w", errs, err) + } + + for _, ip := range ips { + names, err := net.LookupAddr(ip.String()) + if err != nil || len(names) == 0 { + continue + } + return strings.ToLower(strings.TrimSuffix(names[0], ".")), nil + } + + return "", errs +} diff --git a/agent/pkg/go-sysinfo/providers/shared/fqdn_test.go b/agent/pkg/go-sysinfo/providers/shared/fqdn_test.go new file mode 100644 index 0000000..090c96f --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/shared/fqdn_test.go @@ -0,0 +1,83 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build linux || darwin + +package shared + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestFQDN(t *testing.T) { + tests := map[string]struct { + osHostname string + expectedFQDN string + expectedErrRegex string + }{ + // This test case depends on network, particularly DNS, + // being available. If it starts to fail often enough + // due to occasional network/DNS unavailability, we should + // probably just delete this test case. + "long_real_hostname": { + osHostname: "elastic.co", + expectedFQDN: "elastic.co", + expectedErrRegex: "", + }, + "long_nonexistent_hostname": { + osHostname: "foo.bar.elastic.co", + expectedFQDN: "", + expectedErrRegex: makeErrorRegex("foo.bar.elastic.co"), + }, + "short_nonexistent_hostname": { + osHostname: "foobarbaz", + expectedFQDN: "", + expectedErrRegex: makeErrorRegex("foobarbaz"), + }, + "long_mixed_case_hostname": { + osHostname: "eLaSTic.co", + expectedFQDN: "elastic.co", + expectedErrRegex: "", + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + actualFQDN, err := fqdn(test.osHostname) + require.Equal(t, test.expectedFQDN, actualFQDN) + + if test.expectedErrRegex == "" { + require.Nil(t, err) + } else { + require.Regexp(t, test.expectedErrRegex, err.Error()) + } + }) + } +} + +func makeErrorRegex(osHostname string) string { + return fmt.Sprintf( + "could not get FQDN, all methods failed: "+ + "failed looking up CNAME: lookup %s.*: "+ + "failed looking up IP: lookup %s.*", + osHostname, + osHostname, + ) +} diff --git a/agent/pkg/go-sysinfo/providers/shared/network.go b/agent/pkg/go-sysinfo/providers/shared/network.go new file mode 100644 index 0000000..fa0ffac --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/shared/network.go @@ -0,0 +1,82 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package shared + +import ( + "net" + "strings" +) + +func Network() (ips, macs []string, err error) { + ifcs, err := net.Interfaces() + if err != nil { + return nil, nil, err + } + + ips = make([]string, 0, len(ifcs)) + macs = make([]string, 0, len(ifcs)) + for _, ifc := range ifcs { + addrs, err := ifc.Addrs() + if err != nil { + return nil, nil, err + } + for _, addr := range addrs { + ips = append(ips, addr.String()) + } + + mac := ifc.HardwareAddr.String() + if mac != "" { + macs = append(macs, mac) + } + } + + return ips, macs, nil +} + +func NamedNetwork(name string) (ip, mac string, err error) { + ifcs, err := net.Interfaces() + if err != nil { + return "", "", err + } + + for _, ifc := range ifcs { + if ifc.Name == name { + addrs, err := ifc.Addrs() + + var i int + for i = 0; i < len(addrs); i++ { + split := strings.Split(addrs[i].String(), "/") + if len(split) > 1 && split[1] == "24" { + //if i != 0 { + // ip = ip + "," + addrs[i].String() + //} + //ip = addrs[i].String() + ip = split[0] + } + } + + if err != nil { + return "", "", err + } + + mac = ifc.HardwareAddr.String() + } + } + + return +} diff --git a/agent/pkg/go-sysinfo/providers/shared/network_test.go b/agent/pkg/go-sysinfo/providers/shared/network_test.go new file mode 100644 index 0000000..ce691b2 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/shared/network_test.go @@ -0,0 +1,11 @@ +package shared + +import ( + "fmt" + "testing" +) + +func TestNamedNetwork(t *testing.T) { + ip, mac, _ := NamedNetwork("以太网 4") + fmt.Print(ip + mac) +} diff --git a/agent/pkg/go-sysinfo/providers/windows/arch_windows.go b/agent/pkg/go-sysinfo/providers/windows/arch_windows.go new file mode 100644 index 0000000..0edfc4d --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/windows/arch_windows.go @@ -0,0 +1,31 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package windows + +import ( + windows "github.com/elastic/go-windows" +) + +func Architecture() (string, error) { + systemInfo, err := windows.GetNativeSystemInfo() + if err != nil { + return "", err + } + + return systemInfo.ProcessorArchitecture.String(), nil +} diff --git a/agent/pkg/go-sysinfo/providers/windows/boottime_windows.go b/agent/pkg/go-sysinfo/providers/windows/boottime_windows.go new file mode 100644 index 0000000..e04d9a4 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/windows/boottime_windows.go @@ -0,0 +1,36 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package windows + +import ( + "time" + + "golang.org/x/sys/windows" +) + +func BootTime() (time.Time, error) { + bootTime := time.Now().Add(-1 * windows.DurationSinceBoot()) + + // According to GetTickCount64, the resolution of the value is limited to + // the resolution of the system timer, which is typically in the range of + // 10 milliseconds to 16 milliseconds. So this will round the value to the + // nearest second to not mislead anyone about the precision of the value + // and to provide a stable value. + bootTime = bootTime.Round(time.Second) + return bootTime, nil +} diff --git a/agent/pkg/go-sysinfo/providers/windows/boottime_windows_test.go b/agent/pkg/go-sysinfo/providers/windows/boottime_windows_test.go new file mode 100644 index 0000000..23d1872 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/windows/boottime_windows_test.go @@ -0,0 +1,33 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package windows + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestBootTime(t *testing.T) { + bootTime, err := BootTime() + require.NoError(t, err) + + // There should be no sub-second precision in the time. + assert.Equal(t, 0, bootTime.Nanosecond()) +} diff --git a/agent/pkg/go-sysinfo/providers/windows/device_windows.go b/agent/pkg/go-sysinfo/providers/windows/device_windows.go new file mode 100644 index 0000000..372f125 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/windows/device_windows.go @@ -0,0 +1,193 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package windows + +import ( + "errors" + "fmt" + "strings" + "unsafe" + + "golang.org/x/sys/windows" +) + +const ( + // DeviceMup is the device used for unmounted network filesystems + DeviceMup = "\\device\\mup" + + // LANManRedirector is an string that appears in mounted network filesystems + LANManRedirector = "lanmanredirector" +) + +var ( + // ErrNoDevice is the error returned by DevicePathToDrivePath when + // an invalid device-path is supplied. + ErrNoDevice = errors.New("not a device path") + + // ErrDeviceNotFound is the error returned by DevicePathToDrivePath when + // a path pointing to an unmapped device is passed. + ErrDeviceNotFound = errors.New("logical device not found") +) + +type deviceProvider interface { + GetLogicalDrives() (uint32, error) + QueryDosDevice(*uint16, *uint16, uint32) (uint32, error) +} + +type deviceMapper struct { + deviceProvider +} + +type winapiDeviceProvider struct{} + +type testingDeviceProvider map[byte]string + +func newDeviceMapper() deviceMapper { + return deviceMapper{ + deviceProvider: winapiDeviceProvider{}, + } +} + +func fixNetworkDrivePath(device string) string { + // For a VirtualBox share: + // device=\device\vboxminirdr\;z:\vboxsvr\share + // path=\device\vboxminirdr\vboxsvr\share + // + // For a network share: + // device=\device\lanmanredirector\;q:nnnnnnn\server\share + // path=\device\mup\server\share + + semicolonPos := strings.IndexByte(device, ';') + colonPos := strings.IndexByte(device, ':') + if semicolonPos == -1 || colonPos != semicolonPos+2 { + return device + } + pathStart := strings.IndexByte(device[colonPos+1:], '\\') + if pathStart == -1 { + return device + } + dev := device[:semicolonPos] + path := device[colonPos+pathStart+1:] + n := len(dev) + if n > 0 && dev[n-1] == '\\' { + dev = dev[:n-1] + } + return dev + path +} + +func (mapper *deviceMapper) getDevice(driveLetter byte) (string, error) { + driveBuf := [3]uint16{uint16(driveLetter), ':', 0} + + for bufSize := 64; bufSize <= 1024; bufSize *= 2 { + deviceBuf := make([]uint16, bufSize) + n, err := mapper.QueryDosDevice(&driveBuf[0], &deviceBuf[0], uint32(len(deviceBuf))) + if err != nil { + if err == windows.ERROR_INSUFFICIENT_BUFFER { + continue + } + return "", err + } + return windows.UTF16ToString(deviceBuf[:n]), nil + } + return "", windows.ERROR_INSUFFICIENT_BUFFER +} + +func (mapper *deviceMapper) DevicePathToDrivePath(path string) (string, error) { + pathLower := strings.ToLower(path) + isMUP := strings.Index(pathLower, DeviceMup) == 0 + mask, err := mapper.GetLogicalDrives() + if err != nil { + return "", fmt.Errorf("GetLogicalDrives: %w", err) + } + + for bit := uint32(0); mask != 0 && bit < uint32('Z'-'A'+1); bit++ { + if mask&(1< \\server\share\path + if isMUP { + return "\\" + path[len(DeviceMup):], nil + } + return "", ErrDeviceNotFound +} + +func (winapiDeviceProvider) GetLogicalDrives() (uint32, error) { + return windows.GetLogicalDrives() +} + +func (winapiDeviceProvider) QueryDosDevice(name *uint16, buf *uint16, length uint32) (uint32, error) { + return windows.QueryDosDevice(name, buf, length) +} + +func (m testingDeviceProvider) GetLogicalDrives() (mask uint32, err error) { + for drive := range m { + mask |= 1 << uint32(drive-'A') + } + return mask, nil +} + +func ptrOffset(ptr *uint16, off uint32) *uint16 { + return (*uint16)(unsafe.Pointer(uintptr(unsafe.Pointer(ptr)) + uintptr(off*2))) +} + +func (m testingDeviceProvider) QueryDosDevice(nameW *uint16, buf *uint16, length uint32) (uint32, error) { + drive := byte(*nameW) + if byte(*ptrOffset(nameW, 1)) != ':' { + return 0, errors.New("not a drive") + } + if *ptrOffset(nameW, 2) != 0 { + return 0, errors.New("drive not terminated") + } + path, ok := m[drive] + if !ok { + return 0, fmt.Errorf("drive %c not found", drive) + } + n := uint32(len(path)) + if n+2 > length { + return 0, windows.ERROR_INSUFFICIENT_BUFFER + } + for i := uint32(0); i < n; i++ { + *ptrOffset(buf, i) = uint16(path[i]) + } + *ptrOffset(buf, n) = 0 + *ptrOffset(buf, n+1) = 0 + return n + 2, nil +} diff --git a/agent/pkg/go-sysinfo/providers/windows/device_windows_test.go b/agent/pkg/go-sysinfo/providers/windows/device_windows_test.go new file mode 100644 index 0000000..0e8bc90 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/windows/device_windows_test.go @@ -0,0 +1,68 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package windows + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestDeviceMapper(t *testing.T) { + devMapper := deviceMapper{ + deviceProvider: testingDeviceProvider(map[byte]string{ + 'A': `\Device\Floppy0`, + 'B': `\Device\Floppy1`, + 'C': `\Device\Harddisk0Volume2`, + 'D': `\Device\Harddisk1Volume1`, + 'E': `\Device\Cdrom0`, + // Virtualbox-style share + 'W': `\Device\Share;w:\dataserver\programs`, + // Network share + 'Z': `\Device\LANManRedirector;z:01234567812313123\officeserver\documents`, + }), + } + for testIdx, testCase := range []struct { + devicePath, expected string + }{ + {`\DEVICE\FLOPPY0\README.TXT`, `A:\README.TXT`}, + {`\Device\cdrom0\autorun.INF`, `E:\autorun.INF`}, + {`\Device\Harddisk0Volume2\WINDOWS\System32\drivers\etc\hosts`, `C:\WINDOWS\System32\drivers\etc\hosts`}, + {`\Device\share\DATASERVER\PROGRAMS\elastic\packetbeat\PACKETBEAT.EXE`, `W:\elastic\packetbeat\PACKETBEAT.EXE`}, + {`\Device\MUP\OfficeServer\Documents\report.pdf`, `Z:\report.pdf`}, + {`\Device\share\othershare\files\run.EXE`, ``}, + {`\Device\MUP\networkserver\share\.git`, `\\networkserver\share\.git`}, + {`\Device\Harddisk1Volume1`, `D:\`}, + {`\Device\Harddisk1Volume1\`, `D:\`}, + {`\Device`, ``}, + {`C:\windows\calc.exe`, ``}, + } { + msg := fmt.Sprintf("test case #%d: %v", testIdx, testCase) + path, err := devMapper.DevicePathToDrivePath(testCase.devicePath) + if err == nil { + assert.Equal(t, testCase.expected, path, msg) + } else { + if len(testCase.expected) != 0 { + t.Fatal(err, msg) + continue + } + assert.Equal(t, testCase.expected, path, msg) + } + } +} diff --git a/agent/pkg/go-sysinfo/providers/windows/doc.go b/agent/pkg/go-sysinfo/providers/windows/doc.go new file mode 100644 index 0000000..fa35194 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/windows/doc.go @@ -0,0 +1,20 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Package windows implements the HostProvider and ProcessProvider interfaces +// for providing information about Windows. +package windows diff --git a/agent/pkg/go-sysinfo/providers/windows/helpers_windows.go b/agent/pkg/go-sysinfo/providers/windows/helpers_windows.go new file mode 100644 index 0000000..38940ff --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/windows/helpers_windows.go @@ -0,0 +1,41 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package windows + +import ( + "fmt" + + syswin "golang.org/x/sys/windows" +) + +// sidToString wraps the `String()` functions used to return SID strings in golang.org/x/sys +// These can return an error or no error, depending on the release. +func sidToString(strFunc *syswin.SID) (string, error) { + switch sig := (interface{})(strFunc).(type) { + case fmt.Stringer: + return sig.String(), nil + case errString: + return sig.String() + default: + return "", fmt.Errorf("missing or unexpected String() function signature for %#v", sig) + } +} + +type errString interface { + String() (string, error) +} diff --git a/agent/pkg/go-sysinfo/providers/windows/host_windows.go b/agent/pkg/go-sysinfo/providers/windows/host_windows.go new file mode 100644 index 0000000..a1ceeeb --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/windows/host_windows.go @@ -0,0 +1,221 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package windows + +import ( + "errors" + "fmt" + "os" + "strings" + "syscall" + "time" + + "github.com/joeshaw/multierror" + + stdwindows "golang.org/x/sys/windows" + + windows "github.com/elastic/go-windows" + + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/go-sysinfo/internal/registry" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/go-sysinfo/providers/shared" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/go-sysinfo/types" +) + +func init() { + registry.Register(windowsSystem{}) +} + +type windowsSystem struct{} + +func (s windowsSystem) Host() (types.Host, error) { + return newHost() +} + +type host struct { + info types.HostInfo +} + +func (h *host) Info() types.HostInfo { + return h.info +} + +func (h *host) CPUTime() (types.CPUTimes, error) { + idle, kernel, user, err := windows.GetSystemTimes() + if err != nil { + return types.CPUTimes{}, err + } + + return types.CPUTimes{ + System: kernel, + User: user, + Idle: idle, + }, nil +} + +func (h *host) Memory() (*types.HostMemoryInfo, error) { + mem, err := windows.GlobalMemoryStatusEx() + if err != nil { + return nil, err + } + + return &types.HostMemoryInfo{ + Total: mem.TotalPhys, + Used: mem.TotalPhys - mem.AvailPhys, + Free: mem.AvailPhys, + Available: mem.AvailPhys, + VirtualTotal: mem.TotalPageFile, + VirtualUsed: mem.TotalPageFile - mem.AvailPageFile, + VirtualFree: mem.AvailPageFile, + }, nil +} + +func (h *host) FQDN() (string, error) { + fqdn, err := getComputerNameEx(stdwindows.ComputerNamePhysicalDnsFullyQualified) + if err != nil { + return "", fmt.Errorf("could not get windows FQDN: %s", err) + } + + return strings.ToLower(strings.TrimSuffix(fqdn, ".")), nil +} + +func newHost() (*host, error) { + h := &host{} + r := &reader{} + r.architecture(h) + r.bootTime(h) + r.hostname(h) + r.network(h) + r.kernelVersion(h) + r.os(h) + r.time(h) + r.uniqueID(h) + return h, r.Err() +} + +type reader struct { + errs []error +} + +func (r *reader) addErr(err error) bool { + if err != nil { + if !errors.Is(err, types.ErrNotImplemented) { + r.errs = append(r.errs, err) + } + return true + } + return false +} + +func (r *reader) Err() error { + if len(r.errs) > 0 { + return &multierror.MultiError{Errors: r.errs} + } + return nil +} + +func (r *reader) architecture(h *host) { + v, err := Architecture() + if r.addErr(err) { + return + } + h.info.Architecture = v +} + +func (r *reader) bootTime(h *host) { + v, err := BootTime() + if r.addErr(err) { + return + } + h.info.BootTime = v +} + +func (r *reader) hostname(h *host) { + v, err := os.Hostname() + if r.addErr(err) { + return + } + h.info.Hostname = strings.ToLower(v) +} + +func getComputerNameEx(name uint32) (string, error) { + size := uint32(64) + + for { + buff := make([]uint16, size) + err := stdwindows.GetComputerNameEx( + name, &buff[0], &size) + if err == nil { + return syscall.UTF16ToString(buff[:size]), nil + } + + // ERROR_MORE_DATA means buff is too small and size is set to the + // number of bytes needed to store the FQDN. For details, see + // https://learn.microsoft.com/en-us/windows/win32/api/sysinfoapi/nf-sysinfoapi-getcomputernameexw#return-value + if errors.Is(err, syscall.ERROR_MORE_DATA) { + // Safeguard to avoid an infinite loop. + if size <= uint32(len(buff)) { + return "", fmt.Errorf( + "windows.GetComputerNameEx returned ERROR_MORE_DATA, " + + "but data size should fit into buffer") + } else { + // Grow the buffer and try again. + buff = make([]uint16, size) + continue + } + } + + return "", fmt.Errorf("could not get windows FQDN: could not get windows.ComputerNamePhysicalDnsFullyQualified: %w", err) + } +} + +func (r *reader) network(h *host) { + ips, macs, err := shared.Network() + if r.addErr(err) { + return + } + h.info.IPs = ips + h.info.MACs = macs +} + +func (r *reader) kernelVersion(h *host) { + v, err := KernelVersion() + if r.addErr(err) { + return + } + h.info.KernelVersion = v +} + +func (r *reader) os(h *host) { + v, err := OperatingSystem() + if r.addErr(err) { + return + } + h.info.OS = v +} + +func (r *reader) time(h *host) { + h.info.Timezone, h.info.TimezoneOffsetSec = time.Now().Zone() +} + +func (r *reader) uniqueID(h *host) { + v, err := MachineID() + if r.addErr(err) { + return + } + h.info.UniqueID = v +} diff --git a/agent/pkg/go-sysinfo/providers/windows/host_windows_test.go b/agent/pkg/go-sysinfo/providers/windows/host_windows_test.go new file mode 100644 index 0000000..6dbc076 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/windows/host_windows_test.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package windows + +import ( + "encoding/json" + "testing" + + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/go-sysinfo/internal/registry" +) + +var _ registry.HostProvider = windowsSystem{} + +func TestHost(t *testing.T) { + host, err := windowsSystem{}.Host() + if err != nil { + t.Logf("could not get all host info: %v\n", err) + } + + info := host.Info() + data, _ := json.MarshalIndent(info, "", " ") + t.Log(string(data)) +} diff --git a/agent/pkg/go-sysinfo/providers/windows/kernel_windows.go b/agent/pkg/go-sysinfo/providers/windows/kernel_windows.go new file mode 100644 index 0000000..c295c79 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/windows/kernel_windows.go @@ -0,0 +1,43 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package windows + +import ( + windows "github.com/elastic/go-windows" +) + +const windowsKernelExe = `C:\Windows\System32\ntoskrnl.exe` + +func KernelVersion() (string, error) { + versionData, err := windows.GetFileVersionInfo(windowsKernelExe) + if err != nil { + return "", err + } + + fileVersion, err := versionData.QueryValue("FileVersion") + if err == nil { + return fileVersion, nil + } + + // Make a second attempt through the fixed version info. + info, err := versionData.FixedFileInfo() + if err != nil { + return "", err + } + return info.ProductVersion(), nil +} diff --git a/agent/pkg/go-sysinfo/providers/windows/machineid_windows.go b/agent/pkg/go-sysinfo/providers/windows/machineid_windows.go new file mode 100644 index 0000000..0c69c89 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/windows/machineid_windows.go @@ -0,0 +1,47 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package windows + +import ( + "fmt" + + "golang.org/x/sys/windows/registry" +) + +func MachineID() (string, error) { + return getMachineGUID() +} + +func getMachineGUID() (string, error) { + const key = registry.LOCAL_MACHINE + const path = `SOFTWARE\Microsoft\Cryptography` + const name = "MachineGuid" + + k, err := registry.OpenKey(key, path, registry.READ|registry.WOW64_64KEY) + if err != nil { + return "", fmt.Errorf(`failed to open HKLM\%v: %w`, path, err) + } + defer k.Close() + + guid, _, err := k.GetStringValue(name) + if err != nil { + return "", fmt.Errorf(`failed to get value of HKLM\%v\%v: %w`, path, name, err) + } + + return guid, nil +} diff --git a/agent/pkg/go-sysinfo/providers/windows/os_windows.go b/agent/pkg/go-sysinfo/providers/windows/os_windows.go new file mode 100644 index 0000000..d7fc055 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/windows/os_windows.go @@ -0,0 +1,115 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package windows + +import ( + "fmt" + "strconv" + "strings" + + "golang.org/x/sys/windows/registry" + + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/go-sysinfo/types" +) + +func OperatingSystem() (*types.OSInfo, error) { + const key = registry.LOCAL_MACHINE + const path = `SOFTWARE\Microsoft\Windows NT\CurrentVersion` + const flags = registry.READ | registry.WOW64_64KEY + + k, err := registry.OpenKey(key, path, flags) + if err != nil { + return nil, fmt.Errorf(`failed to open HKLM\%v: %w`, path, err) + } + defer k.Close() + + osInfo := &types.OSInfo{ + Type: "windows", + Family: "windows", + Platform: "windows", + } + name := "ProductName" + osInfo.Name, _, err = k.GetStringValue(name) + if err != nil { + return nil, fmt.Errorf(`failed to get value of HKLM\%v\%v: %w`, path, name, err) + } + + // Newer versions (Win 10 and 2016) have CurrentMajor/CurrentMinor. + major, _, majorErr := k.GetIntegerValue("CurrentMajorVersionNumber") + minor, _, minorErr := k.GetIntegerValue("CurrentMinorVersionNumber") + if majorErr == nil && minorErr == nil { + osInfo.Major = int(major) + osInfo.Minor = int(minor) + osInfo.Version = fmt.Sprintf("%d.%d", major, minor) + } else { + name = "CurrentVersion" + osInfo.Version, _, err = k.GetStringValue(name) + if err != nil { + return nil, fmt.Errorf(`failed to get value of HKLM\%v\%v: %w`, path, name, err) + } + parts := strings.SplitN(osInfo.Version, ".", 3) + for i, p := range parts { + switch i { + case 0: + osInfo.Major, _ = strconv.Atoi(p) + case 1: + osInfo.Minor, _ = strconv.Atoi(p) + } + } + } + + name = "CurrentBuild" + currentBuild, _, err := k.GetStringValue(name) + if err != nil { + return nil, fmt.Errorf(`failed to get value of HKLM\%v\%v: %w`, path, name, err) + } + osInfo.Build = currentBuild + + // Update Build Revision (optional) + name = "UBR" + updateBuildRevision, _, err := k.GetIntegerValue(name) + if err != nil && err != registry.ErrNotExist { + return nil, fmt.Errorf(`failed to get value of HKLM\%v\%v: %w`, path, name, err) + } else { + osInfo.Build = fmt.Sprintf("%v.%d", osInfo.Build, updateBuildRevision) + } + + fixWindows11Naming(currentBuild, osInfo) + + return osInfo, nil +} + +// fixWindows11Naming adjusts the OS name because the ProductName registry value +// was not changed in Windows 11 and still contains Windows 10. If the product +// name contains "Windows 10" and the version is greater than or equal to +// 10.0.22000 then "Windows 10" is replaced with "Windows 11" in the OS name. +// +// https://docs.microsoft.com/en-us/answers/questions/586619/windows-11-build-ver-is-still-10022000194.html +func fixWindows11Naming(currentBuild string, osInfo *types.OSInfo) { + buildNumber, err := strconv.Atoi(currentBuild) + if err != nil { + return + } + + // "Anything above [or equal] 10.0.22000.0 is Win 11. Anything below is Win 10." + if osInfo.Major > 10 || + osInfo.Major == 10 && osInfo.Minor > 0 || + osInfo.Major == 10 && osInfo.Minor == 0 && buildNumber >= 22000 { + osInfo.Name = strings.Replace(osInfo.Name, "Windows 10", "Windows 11", 1) + } +} diff --git a/agent/pkg/go-sysinfo/providers/windows/os_windows_test.go b/agent/pkg/go-sysinfo/providers/windows/os_windows_test.go new file mode 100644 index 0000000..4a5ce45 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/windows/os_windows_test.go @@ -0,0 +1,120 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package windows + +import ( + "os/exec" + "strconv" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/go-sysinfo/types" +) + +func TestFixWindows11Naming(t *testing.T) { + testCases := []struct { + osInfo types.OSInfo + expectedName string + }{ + { + osInfo: types.OSInfo{ + Major: 10, + Minor: 0, + Build: "22000", + Name: "Windows 10 Pro", + }, + expectedName: "Windows 11 Pro", + }, + { + osInfo: types.OSInfo{ + Major: 10, + Minor: 0, + Build: "22001", + Name: "Windows 10 Pro", + }, + expectedName: "Windows 11 Pro", + }, + { + osInfo: types.OSInfo{ + Major: 10, + Minor: 1, + Build: "0", + Name: "Windows 10 Pro", + }, + expectedName: "Windows 11 Pro", + }, + { + osInfo: types.OSInfo{ + Major: 11, + Minor: 0, + Build: "0", + Name: "Windows 10 Pro", + }, + expectedName: "Windows 11 Pro", + }, + { + osInfo: types.OSInfo{ + Major: 11, + Minor: 0, + Build: "0", + Name: "Windows 12 Pro", + }, + expectedName: "Windows 12 Pro", + }, + { + osInfo: types.OSInfo{ + Major: 9, + Minor: 0, + Build: "22000", + Name: "Windows 10 Pro", + }, + expectedName: "Windows 10 Pro", + }, + } + + for _, tc := range testCases { + fixWindows11Naming(tc.osInfo.Build, &tc.osInfo) + assert.Equal(t, tc.expectedName, tc.osInfo.Name) + } +} + +func TestOperatingSystemMajorMinor(t *testing.T) { + // User PowerShell to get the expected OS version. + var major, minor int + if stdout, err := exec.Command("powershell.exe", "-c", "[System.Environment]::OSVersion.Version.Major").Output(); err != nil { + t.Fatal(err) + } else if major, err = strconv.Atoi(strings.TrimSpace(string(stdout))); err != nil { + t.Fatal(err) + } + if stdout, err := exec.Command("powershell.exe", "-c", "[System.Environment]::OSVersion.Version.Minor").Output(); err != nil { + t.Fatal(err) + } else if minor, err = strconv.Atoi(strings.TrimSpace(string(stdout))); err != nil { + t.Fatal(err) + } + + // Verify expected output. + osInfo, err := OperatingSystem() + if err != nil { + t.Fatal(err) + } + + assert.Equal(t, major, osInfo.Major) + assert.Equal(t, minor, osInfo.Minor) +} diff --git a/agent/pkg/go-sysinfo/providers/windows/process_windows.go b/agent/pkg/go-sysinfo/providers/windows/process_windows.go new file mode 100644 index 0000000..30f2896 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/windows/process_windows.go @@ -0,0 +1,383 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package windows + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "strings" + "syscall" + "time" + "unsafe" + + syswin "golang.org/x/sys/windows" + + windows "github.com/elastic/go-windows" + + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/go-sysinfo/types" +) + +var ( + selfPID = os.Getpid() + devMapper = newDeviceMapper() +) + +func (s windowsSystem) Processes() (procs []types.Process, err error) { + pids, err := windows.EnumProcesses() + if err != nil { + return nil, fmt.Errorf("EnumProcesses: %w", err) + } + procs = make([]types.Process, 0, len(pids)) + var proc types.Process + for _, pid := range pids { + if pid == 0 || pid == 4 { + // The Idle and System processes (PIDs 0 and 4) can never be + // opened by user-level code (see documentation for OpenProcess). + continue + } + + if proc, err = s.Process(int(pid)); err == nil { + procs = append(procs, proc) + } + } + if len(procs) == 0 { + return nil, err + } + return procs, nil +} + +func (s windowsSystem) Process(pid int) (types.Process, error) { + return newProcess(pid) +} + +func (s windowsSystem) Self() (types.Process, error) { + return newProcess(selfPID) +} + +type process struct { + pid int + info types.ProcessInfo +} + +func (p *process) PID() int { + return p.pid +} + +func (p *process) Parent() (types.Process, error) { + info, err := p.Info() + if err != nil { + return nil, err + } + + return newProcess(info.PPID) +} + +func newProcess(pid int) (*process, error) { + p := &process{pid: pid} + if err := p.init(); err != nil { + return nil, err + } + return p, nil +} + +func (p *process) init() error { + handle, err := p.open() + if err != nil { + return err + } + defer syscall.CloseHandle(handle) + + var path string + if imgf, err := windows.GetProcessImageFileName(handle); err == nil { + path, err = devMapper.DevicePathToDrivePath(imgf) + if err != nil { + path = imgf + } + } + + var creationTime, exitTime, kernelTime, userTime syscall.Filetime + if err := syscall.GetProcessTimes(handle, &creationTime, &exitTime, &kernelTime, &userTime); err != nil { + return err + } + + // Try to read the RTL_USER_PROCESS_PARAMETERS struct from the target process + // memory. This can fail due to missing access rights or when we are running + // as a 32bit process in a 64bit system (WOW64). + // Don't make this a fatal error: If it fails, `args` and `cwd` fields will + // be missing. + var args []string + var cwd string + var ppid int + pbi, err := getProcessBasicInformation(syswin.Handle(handle)) + if err == nil { + ppid = int(pbi.InheritedFromUniqueProcessID) + userProcParams, err := getUserProcessParams(syswin.Handle(handle), pbi) + if err == nil { + if argsW, err := readProcessUnicodeString(handle, &userProcParams.CommandLine); err == nil { + args, err = splitCommandline(argsW) + if err != nil { + args = nil + } + } + if cwdW, err := readProcessUnicodeString(handle, &userProcParams.CurrentDirectoryPath); err == nil { + cwd, _, err = windows.UTF16BytesToString(cwdW) + if err != nil { + cwd = "" + } + // Remove trailing separator + cwd = strings.TrimRight(cwd, "\\") + } + } + } + + p.info = types.ProcessInfo{ + Name: filepath.Base(path), + PID: p.pid, + PPID: ppid, + Exe: path, + Args: args, + CWD: cwd, + StartTime: time.Unix(0, creationTime.Nanoseconds()), + } + return nil +} + +func getProcessBasicInformation(handle syswin.Handle) (pbi windows.ProcessBasicInformationStruct, err error) { + var actualSize uint32 + err = syswin.NtQueryInformationProcess(handle, syswin.ProcessBasicInformation, unsafe.Pointer(&pbi), uint32(windows.SizeOfProcessBasicInformationStruct), &actualSize) + if actualSize < uint32(windows.SizeOfProcessBasicInformationStruct) { + return pbi, errors.New("bad size for PROCESS_BASIC_INFORMATION") + } + return pbi, err +} + +func getUserProcessParams(handle syswin.Handle, pbi windows.ProcessBasicInformationStruct) (params windows.RtlUserProcessParameters, err error) { + const is32bitProc = unsafe.Sizeof(uintptr(0)) == 4 + + // Offset of params field within PEB structure. + // This structure is different in 32 and 64 bit. + paramsOffset := 0x20 + if is32bitProc { + paramsOffset = 0x10 + } + + // Read the PEB from the target process memory + pebSize := paramsOffset + 8 + peb := make([]byte, pebSize) + var nRead uintptr + err = syswin.ReadProcessMemory(handle, pbi.PebBaseAddress, &peb[0], uintptr(pebSize), &nRead) + if err != nil { + return params, err + } + if nRead != uintptr(pebSize) { + return params, fmt.Errorf("PEB: short read (%d/%d)", nRead, pebSize) + } + + // Get the RTL_USER_PROCESS_PARAMETERS struct pointer from the PEB + paramsAddr := *(*uintptr)(unsafe.Pointer(&peb[paramsOffset])) + + // Read the RTL_USER_PROCESS_PARAMETERS from the target process memory + paramsBuf := make([]byte, windows.SizeOfRtlUserProcessParameters) + err = syswin.ReadProcessMemory(handle, paramsAddr, ¶msBuf[0], uintptr(windows.SizeOfRtlUserProcessParameters), &nRead) + if err != nil { + return params, err + } + if nRead != uintptr(windows.SizeOfRtlUserProcessParameters) { + return params, fmt.Errorf("RTL_USER_PROCESS_PARAMETERS: short read (%d/%d)", nRead, windows.SizeOfRtlUserProcessParameters) + } + + params = *(*windows.RtlUserProcessParameters)(unsafe.Pointer(¶msBuf[0])) + return params, nil +} + +// read an UTF-16 string from another process memory. Result is an []byte +// with the UTF-16 data. +func readProcessUnicodeString(handle syscall.Handle, s *windows.UnicodeString) ([]byte, error) { + // Allocate an extra UTF-16 null character at the end in case the read string + // is not terminated. + extra := 2 + if s.Size&1 != 0 { + extra = 3 // If size is odd, need 3 nulls to terminate. + } + buf := make([]byte, int(s.Size)+extra) + nRead, err := windows.ReadProcessMemory(handle, s.Buffer, buf[:s.Size]) + if err != nil { + return nil, err + } + if nRead != uintptr(s.Size) { + return nil, fmt.Errorf("unicode string: short read: (%d/%d)", nRead, s.Size) + } + return buf, nil +} + +// Use Windows' CommandLineToArgv API to split an UTF-16 command line string +// into a list of parameters. +func splitCommandline(utf16 []byte) ([]string, error) { + n := len(utf16) + // Discard odd byte + if n&1 != 0 { + n-- + utf16 = utf16[:n] + } + if n == 0 { + return nil, nil + } + terminated := false + for i := 0; i < n && !terminated; i += 2 { + terminated = utf16[i] == 0 && utf16[i+1] == 0 + } + if !terminated { + // Append a null uint16 at the end if terminator is missing + utf16 = append(utf16, 0, 0) + } + var numArgs int32 + argsWide, err := syscall.CommandLineToArgv((*uint16)(unsafe.Pointer(&utf16[0])), &numArgs) + if err != nil { + return nil, err + } + + // Free memory allocated for CommandLineToArgvW arguments. + defer syscall.LocalFree((syscall.Handle)(unsafe.Pointer(argsWide))) + + args := make([]string, numArgs) + for idx := range args { + args[idx] = syscall.UTF16ToString(argsWide[idx][:]) + } + return args, nil +} + +func (p *process) open() (handle syscall.Handle, err error) { + if p.pid == selfPID { + return syscall.GetCurrentProcess() + } + + // Try different access rights, from broader to more limited. + // PROCESS_VM_READ is needed to get command-line and working directory + // PROCESS_QUERY_LIMITED_INFORMATION is only available in Vista+ + for _, permissions := range [4]uint32{ + syscall.PROCESS_QUERY_INFORMATION | windows.PROCESS_VM_READ, + windows.PROCESS_QUERY_LIMITED_INFORMATION | windows.PROCESS_VM_READ, + syscall.PROCESS_QUERY_INFORMATION, + windows.PROCESS_QUERY_LIMITED_INFORMATION, + } { + if handle, err = syscall.OpenProcess(permissions, false, uint32(p.pid)); err == nil { + break + } + } + return handle, err +} + +func (p *process) Info() (types.ProcessInfo, error) { + return p.info, nil +} + +func (p *process) User() (types.UserInfo, error) { + handle, err := p.open() + if err != nil { + return types.UserInfo{}, fmt.Errorf("OpenProcess failed: %w", err) + } + defer syscall.CloseHandle(handle) + + var accessToken syswin.Token + err = syswin.OpenProcessToken(syswin.Handle(handle), syscall.TOKEN_QUERY, &accessToken) + if err != nil { + return types.UserInfo{}, fmt.Errorf("OpenProcessToken failed: %w", err) + } + defer accessToken.Close() + + tokenUser, err := accessToken.GetTokenUser() + if err != nil { + return types.UserInfo{}, fmt.Errorf("GetTokenUser failed: %w", err) + } + + sid, err := sidToString(tokenUser.User.Sid) + if sid == "" || err != nil { + if err != nil { + return types.UserInfo{}, fmt.Errorf("failed to look up user SID: %w", err) + } + return types.UserInfo{}, errors.New("failed to look up user SID") + } + + tokenGroup, err := accessToken.GetTokenPrimaryGroup() + if err != nil { + return types.UserInfo{}, fmt.Errorf("GetTokenPrimaryGroup failed: %w", err) + } + + gsid, err := sidToString(tokenGroup.PrimaryGroup) + if gsid == "" || err != nil { + if err != nil { + return types.UserInfo{}, fmt.Errorf("failed to look up primary group SID: %w", err) + } + return types.UserInfo{}, errors.New("failed to look up primary group SID") + } + + return types.UserInfo{ + UID: sid, + GID: gsid, + }, nil +} + +func (p *process) Memory() (types.MemoryInfo, error) { + handle, err := p.open() + if err != nil { + return types.MemoryInfo{}, err + } + defer syscall.CloseHandle(handle) + + counters, err := windows.GetProcessMemoryInfo(handle) + if err != nil { + return types.MemoryInfo{}, err + } + + return types.MemoryInfo{ + Resident: uint64(counters.WorkingSetSize), + Virtual: uint64(counters.PrivateUsage), + }, nil +} + +func (p *process) CPUTime() (types.CPUTimes, error) { + handle, err := p.open() + if err != nil { + return types.CPUTimes{}, err + } + defer syscall.CloseHandle(handle) + + var creationTime, exitTime, kernelTime, userTime syscall.Filetime + if err := syscall.GetProcessTimes(handle, &creationTime, &exitTime, &kernelTime, &userTime); err != nil { + return types.CPUTimes{}, err + } + + return types.CPUTimes{ + User: windows.FiletimeToDuration(&userTime), + System: windows.FiletimeToDuration(&kernelTime), + }, nil +} + +// OpenHandles returns the number of open handles of the process. +func (p *process) OpenHandleCount() (int, error) { + handle, err := p.open() + if err != nil { + return 0, err + } + defer syscall.CloseHandle(handle) + + count, err := windows.GetProcessHandleCount(handle) + return int(count), err +} diff --git a/agent/pkg/go-sysinfo/providers/windows/process_windows_test.go b/agent/pkg/go-sysinfo/providers/windows/process_windows_test.go new file mode 100644 index 0000000..9d0c106 --- /dev/null +++ b/agent/pkg/go-sysinfo/providers/windows/process_windows_test.go @@ -0,0 +1,27 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package windows + +import ( + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/go-sysinfo/internal/registry" +) + +var ( + _ registry.HostProvider = windowsSystem{} + _ registry.ProcessProvider = windowsSystem{} +) diff --git a/agent/pkg/go-sysinfo/system.go b/agent/pkg/go-sysinfo/system.go new file mode 100644 index 0000000..d4bf264 --- /dev/null +++ b/agent/pkg/go-sysinfo/system.go @@ -0,0 +1,87 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package sysinfo + +import ( + "runtime" + + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/go-sysinfo/internal/registry" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/go-sysinfo/types" + + // Register host and process providers. + _ "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/go-sysinfo/providers/aix" + _ "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/go-sysinfo/providers/darwin" + _ "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/go-sysinfo/providers/linux" + _ "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/go-sysinfo/providers/windows" +) + +// Go returns information about the Go runtime. +func Go() types.GoInfo { + return types.GoInfo{ + OS: runtime.GOOS, + Arch: runtime.GOARCH, + MaxProcs: runtime.GOMAXPROCS(0), + Version: runtime.Version(), + } +} + +// Host returns information about host on which this process is running. If +// host information collection is not implemented for this platform then +// types.ErrNotImplemented is returned. +// On Darwin (macOS) a types.ErrNotImplemented is returned with cgo disabled. +func Host() (types.Host, error) { + provider := registry.GetHostProvider() + if provider == nil { + return nil, types.ErrNotImplemented + } + return provider.Host() +} + +// Process returns a types.Process object representing the process associated +// with the given PID. The types.Process object can be used to query information +// about the process. If process information collection is not implemented for +// this platform then types.ErrNotImplemented is returned. +func Process(pid int) (types.Process, error) { + provider := registry.GetProcessProvider() + if provider == nil { + return nil, types.ErrNotImplemented + } + return provider.Process(pid) +} + +// Processes return a list of all processes. If process information collection +// is not implemented for this platform then types.ErrNotImplemented is +// returned. +func Processes() ([]types.Process, error) { + provider := registry.GetProcessProvider() + if provider == nil { + return nil, types.ErrNotImplemented + } + return provider.Processes() +} + +// Self return a types.Process object representing this process. If process +// information collection is not implemented for this platform then +// types.ErrNotImplemented is returned. +func Self() (types.Process, error) { + provider := registry.GetProcessProvider() + if provider == nil { + return nil, types.ErrNotImplemented + } + return provider.Self() +} diff --git a/agent/pkg/go-sysinfo/system_test.go b/agent/pkg/go-sysinfo/system_test.go new file mode 100644 index 0000000..2cfb400 --- /dev/null +++ b/agent/pkg/go-sysinfo/system_test.go @@ -0,0 +1,312 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package sysinfo + +import ( + "encoding/json" + "errors" + "io/fs" + "os" + osUser "os/user" + "runtime" + "sort" + "strconv" + "strings" + "syscall" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/go-sysinfo/types" +) + +type ProcessFeatures struct { + ProcessInfo bool + Environment bool + OpenHandleEnumerator bool + OpenHandleCounter bool + Seccomp bool + Capabilities bool +} + +var expectedProcessFeatures = map[string]*ProcessFeatures{ + "darwin": { + ProcessInfo: true, + Environment: true, + OpenHandleEnumerator: false, + OpenHandleCounter: false, + }, + "linux": { + ProcessInfo: true, + Environment: true, + OpenHandleEnumerator: true, + OpenHandleCounter: true, + Seccomp: true, + Capabilities: true, + }, + "windows": { + ProcessInfo: true, + OpenHandleEnumerator: false, + OpenHandleCounter: true, + }, + "aix": { + ProcessInfo: true, + Environment: true, + OpenHandleEnumerator: false, + OpenHandleCounter: false, + }, +} + +func TestProcessFeaturesMatrix(t *testing.T) { + const GOOS = runtime.GOOS + var features ProcessFeatures + + process, err := Self() + if err == types.ErrNotImplemented { + assert.Nil(t, expectedProcessFeatures[GOOS], "unexpected ErrNotImplemented for %v", GOOS) + return + } else if err != nil { + t.Fatal(err) + } + features.ProcessInfo = true + + _, features.Environment = process.(types.Environment) + _, features.OpenHandleEnumerator = process.(types.OpenHandleEnumerator) + _, features.OpenHandleCounter = process.(types.OpenHandleCounter) + _, features.Seccomp = process.(types.Seccomp) + _, features.Capabilities = process.(types.Capabilities) + + assert.Equal(t, expectedProcessFeatures[GOOS], &features) + logAsJSON(t, map[string]interface{}{ + "features": features, + }) +} + +func TestSelf(t *testing.T) { + process, err := Self() + if err == types.ErrNotImplemented { + t.Skip("process provider not implemented on", runtime.GOOS) + } else if err != nil { + t.Fatal(err) + } + assert.EqualValues(t, os.Getpid(), process.PID()) + + if runtime.GOOS == "linux" { + // Do some dummy work to spend user CPU time. + var v int + for i := 0; i < 999999999; i++ { + v += i * i + } + } + + output := map[string]interface{}{} + info, err := process.Info() + if err != nil { + t.Fatal(err) + } + output["process.info"] = info + assert.EqualValues(t, os.Getpid(), info.PID) + assert.Equal(t, os.Args, info.Args) + + exe, err := os.Executable() + if err != nil { + t.Fatal(err) + } + assert.Equal(t, exe, info.Exe) + + if user, err := process.User(); !errors.Is(err, types.ErrNotImplemented) { + if err != nil { + t.Fatal(err) + } + output["process.user"] = user + + currentUser, err := osUser.Current() + if err != nil { + t.Fatal(err) + } + assert.EqualValues(t, currentUser.Uid, user.UID) + assert.EqualValues(t, currentUser.Gid, user.GID) + + if runtime.GOOS != "windows" { + assert.EqualValues(t, strconv.Itoa(os.Geteuid()), user.EUID) + assert.EqualValues(t, strconv.Itoa(os.Getegid()), user.EGID) + } + } + + if v, ok := process.(types.Environment); ok { + actualEnv, err := v.Environment() + if err != nil { + t.Fatal(err) + } + output["process.env"] = actualEnv + + // Format the output to match format from os.Environ(). + keyEqualsValueList := make([]string, 0, len(actualEnv)) + for k, v := range actualEnv { + keyEqualsValueList = append(keyEqualsValueList, k+"="+v) + } + sort.Strings(keyEqualsValueList) + + expectedEnv := os.Environ() + sort.Strings(expectedEnv) + + assert.Equal(t, expectedEnv, keyEqualsValueList) + } + + if memInfo, err := process.Memory(); !errors.Is(err, types.ErrNotImplemented) { + require.NoError(t, err) + if runtime.GOOS != "windows" { + // Virtual memory may be reported as + // zero on some versions of Windows. + assert.NotZero(t, memInfo.Virtual) + } + assert.NotZero(t, memInfo.Resident) + output["process.mem"] = memInfo + } + + for { + cpuTimes, err := process.CPUTime() + if errors.Is(err, types.ErrNotImplemented) { + break + } + + require.NoError(t, err) + if cpuTimes.Total() != 0 { + output["process.cpu"] = cpuTimes + break + } + // Spin until CPU times are non-zero. + // Some operating systems have a very + // low resolution on process CPU + // measurement. + } + + if v, ok := process.(types.OpenHandleEnumerator); ok { + fds, err := v.OpenHandles() + if assert.NoError(t, err) { + output["process.fd"] = fds + } + } + + if v, ok := process.(types.OpenHandleCounter); ok { + count, err := v.OpenHandleCount() + if assert.NoError(t, err) { + t.Log("open handles count:", count) + } + } + + if v, ok := process.(types.Seccomp); ok { + seccompInfo, err := v.Seccomp() + if assert.NoError(t, err) { + assert.NotZero(t, seccompInfo) + output["process.seccomp"] = seccompInfo + } + } + + if v, ok := process.(types.Capabilities); ok { + capInfo, err := v.Capabilities() + if assert.NoError(t, err) { + assert.NotZero(t, capInfo) + output["process.capabilities"] = capInfo + } + } + + logAsJSON(t, output) +} + +func TestHost(t *testing.T) { + host, err := Host() + if err == types.ErrNotImplemented { + t.Skip("host provider not implemented on", runtime.GOOS) + } else if err != nil && !strings.Contains(err.Error(), "FQDN") { + t.Fatal(err) + } + + info := host.Info() + assert.NotZero(t, info) + + output := map[string]interface{}{} + output["host.info"] = info + + if v, ok := host.(types.LoadAverage); ok { + loadAvg, err := v.LoadAverage() + if err != nil { + t.Fatal(err) + } + output["host.loadavg"] = loadAvg + } + + memory, err := host.Memory() + if err != nil { + t.Fatal(err) + } + output["host.memory"] = memory + + cpu, err := host.CPUTime() + if errors.Is(err, types.ErrNotImplemented) { + t.Log("CPU times not implemented") + return + } + + if err != nil { + t.Fatal(err) + } + output["host.cpu"] = cpu + + logAsJSON(t, output) +} + +func logAsJSON(t testing.TB, v interface{}) { + if !testing.Verbose() { + return + } + t.Helper() + j, _ := json.MarshalIndent(v, "", " ") + t.Log(string(j)) +} + +func TestProcesses(t *testing.T) { + start := time.Now() + procs, err := Processes() + t.Log("Processes() took", time.Since(start)) + if err != nil { + t.Fatal(err) + } + + t.Log("Found", len(procs), "processes.") + for _, proc := range procs { + info, err := proc.Info() + switch { + // Ignore processes that no longer exist or that cannot be accessed. + case errors.Is(err, syscall.ESRCH), + errors.Is(err, syscall.EPERM), + errors.Is(err, syscall.EINVAL), + errors.Is(err, syscall.ENOENT), + errors.Is(err, fs.ErrPermission): + continue + case err != nil: + t.Fatalf("failed to get process info for PID=%d: %v", proc.PID(), err) + } + + t.Logf("pid=%v name='%s' exe='%s' args=%+v ppid=%d cwd='%s' start_time=%v", + info.PID, info.Name, info.Exe, info.Args, info.PPID, info.CWD, + info.StartTime) + } +} diff --git a/agent/pkg/go-sysinfo/types/errors.go b/agent/pkg/go-sysinfo/types/errors.go new file mode 100644 index 0000000..7e509bc --- /dev/null +++ b/agent/pkg/go-sysinfo/types/errors.go @@ -0,0 +1,23 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package types + +import "errors" + +// ErrNotImplemented represents an error for a function that is not implemented on a particular platform. +var ErrNotImplemented = errors.New("unimplemented") diff --git a/agent/pkg/go-sysinfo/types/go.go b/agent/pkg/go-sysinfo/types/go.go new file mode 100644 index 0000000..6237744 --- /dev/null +++ b/agent/pkg/go-sysinfo/types/go.go @@ -0,0 +1,26 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package types + +// GoInfo contains info about the go runtime +type GoInfo struct { + OS string `json:"os"` + Arch string `json:"arch"` + MaxProcs int `json:"max_procs"` + Version string `json:"version"` +} diff --git a/agent/pkg/go-sysinfo/types/host.go b/agent/pkg/go-sysinfo/types/host.go new file mode 100644 index 0000000..5685e98 --- /dev/null +++ b/agent/pkg/go-sysinfo/types/host.go @@ -0,0 +1,302 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package types + +import "time" + +// Host is the interface that wraps methods for returning Host stats +// It may return partial information if the provider +// implementation is unable to collect all of the necessary data. +type Host interface { + CPUTimer + Info() HostInfo + Memory() (*HostMemoryInfo, error) + + // FQDN returns the fully-qualified domain name of the host, lowercased. + FQDN() (string, error) +} + +// NetworkCounters represents network stats from /proc/net +type NetworkCounters interface { + NetworkCounters() (*NetworkCountersInfo, error) +} + +// SNMP represents the data from /proc/net/snmp +// Note that according to RFC 2012,TCP.MaxConn, if present, is a signed value and should be cast to int64 +type SNMP struct { + IP map[string]uint64 `json:"ip" netstat:"Ip"` + ICMP map[string]uint64 `json:"icmp" netstat:"Icmp"` + ICMPMsg map[string]uint64 `json:"icmp_msg" netstat:"IcmpMsg"` + TCP map[string]uint64 `json:"tcp" netstat:"Tcp"` + UDP map[string]uint64 `json:"udp" netstat:"Udp"` + UDPLite map[string]uint64 `json:"udp_lite" netstat:"UdpLite"` +} + +// Netstat represents the data from /proc/net/netstat +type Netstat struct { + TCPExt map[string]uint64 `json:"tcp_ext" netstat:"TcpExt"` + IPExt map[string]uint64 `json:"ip_ext" netstat:"IpExt"` +} + +// NetworkCountersInfo represents available network counters from /proc/net +type NetworkCountersInfo struct { + SNMP SNMP `json:"snmp"` + Netstat Netstat `json:"netstat"` +} + +// VMStat is the interface wrapper for platforms that support /proc/vmstat. +type VMStat interface { + VMStat() (*VMStatInfo, error) +} + +// HostInfo contains basic host information. +type HostInfo struct { + Architecture string `json:"architecture"` // Hardware architecture (e.g. x86_64, arm, ppc, mips). + BootTime time.Time `json:"boot_time"` // Host boot time. + Containerized *bool `json:"containerized,omitempty"` // Is the process containerized. + Hostname string `json:"name"` // Hostname, lowercased. + IPs []string `json:"ip,omitempty"` // List of all IPs. + KernelVersion string `json:"kernel_version"` // Kernel version. + MACs []string `json:"mac"` // List of MAC addresses. + OS *OSInfo `json:"os"` // OS information. + Timezone string `json:"timezone"` // System timezone. + TimezoneOffsetSec int `json:"timezone_offset_sec"` // Timezone offset (seconds from UTC). + UniqueID string `json:"id,omitempty"` // Unique ID of the host (optional). +} + +// Uptime returns the system uptime +func (host HostInfo) Uptime() time.Duration { + return time.Since(host.BootTime) +} + +// OSInfo contains basic OS information +type OSInfo struct { + Type string `json:"type"` // OS Type (one of linux, macos, unix, windows). + Family string `json:"family"` // OS Family (e.g. redhat, debian, freebsd, windows). + Platform string `json:"platform"` // OS platform (e.g. centos, ubuntu, windows). + Name string `json:"name"` // OS Name (e.g. Mac OS X, CentOS). + Version string `json:"version"` // OS version (e.g. 10.12.6). + Major int `json:"major"` // Major release version. + Minor int `json:"minor"` // Minor release version. + Patch int `json:"patch"` // Patch release version. + Build string `json:"build,omitempty"` // Build (e.g. 16G1114). + Codename string `json:"codename,omitempty"` // OS codename (e.g. jessie). +} + +// LoadAverage is the interface that wraps the LoadAverage method. +// LoadAverage returns load info on the host +type LoadAverage interface { + LoadAverage() (*LoadAverageInfo, error) +} + +// LoadAverageInfo contains load statistics +type LoadAverageInfo struct { + One float64 `json:"one_min"` + Five float64 `json:"five_min"` + Fifteen float64 `json:"fifteen_min"` +} + +// HostMemoryInfo (all values are specified in bytes). +type HostMemoryInfo struct { + Total uint64 `json:"total_bytes"` // Total physical memory. + Used uint64 `json:"used_bytes"` // Total - Free + Available uint64 `json:"available_bytes"` // Amount of memory available without swapping. + Free uint64 `json:"free_bytes"` // Amount of memory not used by the system. + VirtualTotal uint64 `json:"virtual_total_bytes"` // Total virtual memory. + VirtualUsed uint64 `json:"virtual_used_bytes"` // VirtualTotal - VirtualFree + VirtualFree uint64 `json:"virtual_free_bytes"` // Virtual memory that is not used. + Metrics map[string]uint64 `json:"raw,omitempty"` // Other memory related metrics. +} + +// VMStatInfo contains parsed info from /proc/vmstat. +// This procfs file has expanded much over the years +// with different kernel versions. If we don't have a field in vmstat, +// the field in the struct will just be blank. The comments represent kernel versions. +type VMStatInfo struct { + NrFreePages uint64 `json:"nr_free_pages"` // (since Linux 2.6.31) + NrAllocBatch uint64 `json:"nr_alloc_batch"` // (since Linux 3.12) + NrInactiveAnon uint64 `json:"nr_inactive_anon"` // (since Linux 2.6.28) + NrActiveAnon uint64 `json:"nr_active_anon"` // (since Linux 2.6.28) + NrInactiveFile uint64 `json:"nr_inactive_file"` // (since Linux 2.6.28) + NrActiveFile uint64 `json:"nr_active_file"` // (since Linux 2.6.28) + NrUnevictable uint64 `json:"nr_unevictable"` // (since Linux 2.6.28) + NrMlock uint64 `json:"nr_mlock"` // (since Linux 2.6.28) + NrAnonPages uint64 `json:"nr_anon_pages"` // (since Linux 2.6.18) + NrMapped uint64 `json:"nr_mapped"` // (since Linux 2.6.0) + NrFilePages uint64 `json:"nr_file_pages"` // (since Linux 2.6.18) + NrDirty uint64 `json:"nr_dirty"` // (since Linux 2.6.0) + NrWriteback uint64 `json:"nr_writeback"` // (since Linux 2.6.0) + NrSlabReclaimable uint64 `json:"nr_slab_reclaimable"` // (since Linux 2.6.19) + NrSlabUnreclaimable uint64 `json:"nr_slab_unreclaimable"` // (since Linux 2.6.19) + NrPageTablePages uint64 `json:"nr_page_table_pages"` // (since Linux 2.6.0) + NrKernelStack uint64 `json:"nr_kernel_stack"` // (since Linux 2.6.32) Amount of memory allocated to kernel stacks. + NrUnstable uint64 `json:"nr_unstable"` // (since Linux 2.6.0) + NrBounce uint64 `json:"nr_bounce"` // (since Linux 2.6.12) + NrVmscanWrite uint64 `json:"nr_vmscan_write"` // (since Linux 2.6.19) + NrVmscanImmediateReclaim uint64 `json:"nr_vmscan_immediate_reclaim"` // (since Linux 3.2) + NrWritebackTemp uint64 `json:"nr_writeback_temp"` // (since Linux 2.6.26) + NrIsolatedAnon uint64 `json:"nr_isolated_anon"` // (since Linux 2.6.32) + NrIsolatedFile uint64 `json:"nr_isolated_file"` // (since Linux 2.6.32) + NrShmem uint64 `json:"nr_shmem"` // (since Linux 2.6.32) Pages used by shmem and tmpfs(5). + NrDirtied uint64 `json:"nr_dirtied"` // (since Linux 2.6.37) + NrWritten uint64 `json:"nr_written"` // (since Linux 2.6.37) + NrPagesScanned uint64 `json:"nr_pages_scanned"` // (since Linux 3.17) + NumaHit uint64 `json:"numa_hit"` // (since Linux 2.6.18) + NumaMiss uint64 `json:"numa_miss"` // (since Linux 2.6.18) + NumaForeign uint64 `json:"numa_foreign"` // (since Linux 2.6.18) + NumaInterleave uint64 `json:"numa_interleave"` // (since Linux 2.6.18) + NumaLocal uint64 `json:"numa_local"` // (since Linux 2.6.18) + NumaOther uint64 `json:"numa_other"` // (since Linux 2.6.18) + WorkingsetRefault uint64 `json:"workingset_refault"` // (since Linux 3.15) + WorkingsetActivate uint64 `json:"workingset_activate"` // (since Linux 3.15) + WorkingsetNodereclaim uint64 `json:"workingset_nodereclaim"` // (since Linux 3.15) + NrAnonTransparentHugepages uint64 `json:"nr_anon_transparent_hugepages"` // (since Linux 2.6.38) + NrFreeCma uint64 `json:"nr_free_cma"` // (since Linux 3.7) Number of free CMA (Contiguous Memory Allocator) pages. + NrDirtyThreshold uint64 `json:"nr_dirty_threshold"` // (since Linux 2.6.37) + NrDirtyBackgroundThreshold uint64 `json:"nr_dirty_background_threshold"` // (since Linux 2.6.37) + Pgpgin uint64 `json:"pgpgin"` // (since Linux 2.6.0) + Pgpgout uint64 `json:"pgpgout"` // (since Linux 2.6.0) + Pswpin uint64 `json:"pswpin"` // (since Linux 2.6.0) + Pswpout uint64 `json:"pswpout"` // (since Linux 2.6.0) + PgallocDma uint64 `json:"pgalloc_dma"` // (since Linux 2.6.5) + PgallocDma32 uint64 `json:"pgalloc_dma32"` // (since Linux 2.6.16) + PgallocNormal uint64 `json:"pgalloc_normal"` // (since Linux 2.6.5) + PgallocHigh uint64 `json:"pgalloc_high"` // (since Linux 2.6.5) + PgallocMovable uint64 `json:"pgalloc_movable"` // (since Linux 2.6.23) + Pgfree uint64 `json:"pgfree"` // (since Linux 2.6.0) + Pgactivate uint64 `json:"pgactivate"` // (since Linux 2.6.0) + Pgdeactivate uint64 `json:"pgdeactivate"` // (since Linux 2.6.0) + Pgfault uint64 `json:"pgfault"` // (since Linux 2.6.0) + Pgmajfault uint64 `json:"pgmajfault"` // (since Linux 2.6.0) + PgrefillDma uint64 `json:"pgrefill_dma"` // (since Linux 2.6.5) + PgrefillDma32 uint64 `json:"pgrefill_dma32"` // (since Linux 2.6.16) + PgrefillNormal uint64 `json:"pgrefill_normal"` // (since Linux 2.6.5) + PgrefillHigh uint64 `json:"pgrefill_high"` // (since Linux 2.6.5) + PgrefillMovable uint64 `json:"pgrefill_movable"` // (since Linux 2.6.23) + PgstealKswapdDma uint64 `json:"pgsteal_kswapd_dma"` // (since Linux 3.4) + PgstealKswapdDma32 uint64 `json:"pgsteal_kswapd_dma32"` // (since Linux 3.4) + PgstealKswapdNormal uint64 `json:"pgsteal_kswapd_normal"` // (since Linux 3.4) + PgstealKswapdHigh uint64 `json:"pgsteal_kswapd_high"` // (since Linux 3.4) + PgstealKswapdMovable uint64 `json:"pgsteal_kswapd_movable"` // (since Linux 3.4) + PgstealDirectDma uint64 `json:"pgsteal_direct_dma"` + PgstealDirectDma32 uint64 `json:"pgsteal_direct_dma32"` // (since Linux 3.4) + PgstealDirectNormal uint64 `json:"pgsteal_direct_normal"` // (since Linux 3.4) + PgstealDirectHigh uint64 `json:"pgsteal_direct_high"` // (since Linux 3.4) + PgstealDirectMovable uint64 `json:"pgsteal_direct_movable"` // (since Linux 2.6.23) + PgscanKswapdDma uint64 `json:"pgscan_kswapd_dma"` + PgscanKswapdDma32 uint64 `json:"pgscan_kswapd_dma32"` // (since Linux 2.6.16) + PgscanKswapdNormal uint64 `json:"pgscan_kswapd_normal"` // (since Linux 2.6.5) + PgscanKswapdHigh uint64 `json:"pgscan_kswapd_high"` + PgscanKswapdMovable uint64 `json:"pgscan_kswapd_movable"` // (since Linux 2.6.23) + PgscanDirectDma uint64 `json:"pgscan_direct_dma"` // + PgscanDirectDma32 uint64 `json:"pgscan_direct_dma32"` // (since Linux 2.6.16) + PgscanDirectNormal uint64 `json:"pgscan_direct_normal"` + PgscanDirectHigh uint64 `json:"pgscan_direct_high"` + PgscanDirectMovable uint64 `json:"pgscan_direct_movable"` // (since Linux 2.6.23) + PgscanDirectThrottle uint64 `json:"pgscan_direct_throttle"` // (since Linux 3.6) + ZoneReclaimFailed uint64 `json:"zone_reclaim_failed"` // (since linux 2.6.31) + Pginodesteal uint64 `json:"pginodesteal"` // (since linux 2.6.0) + SlabsScanned uint64 `json:"slabs_scanned"` // (since linux 2.6.5) + KswapdInodesteal uint64 `json:"kswapd_inodesteal"` // (since linux 2.6.0) + KswapdLowWmarkHitQuickly uint64 `json:"kswapd_low_wmark_hit_quickly"` // (since 2.6.33) + KswapdHighWmarkHitQuickly uint64 `json:"kswapd_high_wmark_hit_quickly"` // (since 2.6.33) + Pageoutrun uint64 `json:"pageoutrun"` // (since Linux 2.6.0) + Allocstall uint64 `json:"allocstall"` // (since Linux 2.6.0) + Pgrotated uint64 `json:"pgrotated"` // (since Linux 2.6.0) + DropPagecache uint64 `json:"drop_pagecache"` // (since Linux 3.15) + DropSlab uint64 `json:"drop_slab"` // (since Linux 3.15) + NumaPteUpdates uint64 `json:"numa_pte_updates"` // (since Linux 3.8) + NumaHugePteUpdates uint64 `json:"numa_huge_pte_updates"` // (since Linux 3.13) + NumaHintFaults uint64 `json:"numa_hint_faults"` // (since Linux 3.8) + NumaHintFaultsLocal uint64 `json:"numa_hint_faults_local"` // (since Linux 3.8) + NumaPagesMigrated uint64 `json:"numa_pages_migrated"` // (since Linux 3.8) + PgmigrateSuccess uint64 `json:"pgmigrate_success"` // (since Linux 3.8) + PgmigrateFail uint64 `json:"pgmigrate_fail"` // (since Linux 3.8) + CompactMigrateScanned uint64 `json:"compact_migrate_scanned"` // (since Linux 3.8) + CompactFreeScanned uint64 `json:"compact_free_scanned"` // (since Linux 3.8) + CompactIsolated uint64 `json:"compact_isolated"` // (since Linux 3.8) + CompactStall uint64 `json:"compact_stall"` // (since Linux 2.6.35) See the kernel source file Documentation/admin-guide/mm/transhuge.rst. + CompactFail uint64 `json:"compact_fail"` // (since Linux 2.6.35) See the kernel source file Documentation/admin-guide/mm/transhuge.rst. + CompactSuccess uint64 `json:"compact_success"` // (since Linux 2.6.35) See the kernel source file Documentation/admin-guide/mm/transhuge.rst. + HtlbBuddyAllocSuccess uint64 `json:"htlb_buddy_alloc_success"` // (since Linux 2.6.26) + HtlbBuddyAllocFail uint64 `json:"htlb_buddy_alloc_fail"` // (since Linux 2.6.26) + UnevictablePgsCulled uint64 `json:"unevictable_pgs_culled"` // (since Linux 2.6.28) + UnevictablePgsScanned uint64 `json:"unevictable_pgs_scanned"` // (since Linux 2.6.28) + UnevictablePgsRescued uint64 `json:"unevictable_pgs_rescued"` // (since Linux 2.6.28) + UnevictablePgsMlocked uint64 `json:"unevictable_pgs_mlocked"` // (since Linux 2.6.28) + UnevictablePgsMunlocked uint64 `json:"unevictable_pgs_munlocked"` // (since Linux 2.6.28) + UnevictablePgsCleared uint64 `json:"unevictable_pgs_cleared"` // (since Linux 2.6.28) + UnevictablePgsStranded uint64 `json:"unevictable_pgs_stranded"` // (since Linux 2.6.28) + ThpFaultAlloc uint64 `json:"thp_fault_alloc"` // (since Linux 2.6.39) See the kernel source file Documentation/admin-guide/mm/transhuge.rst. + ThpFaultFallback uint64 `json:"thp_fault_fallback"` // (since Linux 2.6.39) See the kernel source file Documentation/admin-guide/mm/transhuge.rst. + ThpCollapseAlloc uint64 `json:"thp_collapse_alloc"` // (since Linux 2.6.39) See the kernel source file Documentation/admin-guide/mm/transhuge.rst. + ThpCollapseAllocFailed uint64 `json:"thp_collapse_alloc_failed"` // (since Linux 2.6.39) See the kernel source file Documentation/admin-guide/mm/transhuge.rst. + ThpSplit uint64 `json:"thp_split"` // (since Linux 2.6.39) See the kernel source file Documentation/admin-guide/mm/transhuge.rst. + ThpZeroPageAlloc uint64 `json:"thp_zero_page_alloc"` // (since Linux 3.8) See the kernel source file Documentation/admin-guide/mm/transhuge.rst. + ThpZeroPageAllocFailed uint64 `json:"thp_zero_page_alloc_failed"` // (since Linux 3.8) See the kernel source file Documentation/admin-guide/mm/transhuge.rst. + BalloonInflate uint64 `json:"balloon_inflate"` // (since Linux 3.18) + BalloonDeflate uint64 `json:"balloon_deflate"` // (since Linux 3.18) + BalloonMigrate uint64 `json:"balloon_migrate"` // (since Linux 3.18) + NrTlbRemoteFlush uint64 `json:"nr_tlb_remote_flush"` // (since Linux 3.12) + NrTlbRemoteFlushReceived uint64 `json:"nr_tlb_remote_flush_received"` // (since Linux 3.12) + NrTlbLocalFlushAll uint64 `json:"nr_tlb_local_flush_all"` // (since Linux 3.12) + NrTlbLocalFlushOne uint64 `json:"nr_tlb_local_flush_one"` // (since Linux 3.12) + VmacacheFindCalls uint64 `json:"vmacache_find_calls"` // (since Linux 3.16) + VmacacheFindHits uint64 `json:"vmacache_find_hits"` // (since Linux 3.16) + VmacacheFullFlushes uint64 `json:"vmacache_full_flushes"` // (since Linux 3.19) + // the following fields are not documented in `man 5 proc` as of 4.15 + NrZoneInactiveAnon uint64 `json:"nr_zone_inactive_anon"` + NrZoneActiveAnon uint64 `json:"nr_zone_active_anon"` + NrZoneInactiveFile uint64 `json:"nr_zone_inactive_file"` + NrZoneActiveFile uint64 `json:"nr_zone_active_file"` + NrZoneUnevictable uint64 `json:"nr_zone_unevictable"` + NrZoneWritePending uint64 `json:"nr_zone_write_pending"` + NrZspages uint64 `json:"nr_zspages"` + NrShmemHugepages uint64 `json:"nr_shmem_hugepages"` + NrShmemPmdmapped uint64 `json:"nr_shmem_pmdmapped"` + AllocstallDma uint64 `json:"allocstall_dma"` + AllocstallDma32 uint64 `json:"allocstall_dma32"` + AllocstallNormal uint64 `json:"allocstall_normal"` + AllocstallMovable uint64 `json:"allocstall_movable"` + PgskipDma uint64 `json:"pgskip_dma"` + PgskipDma32 uint64 `json:"pgskip_dma32"` + PgskipNormal uint64 `json:"pgskip_normal"` + PgskipMovable uint64 `json:"pgskip_movable"` + Pglazyfree uint64 `json:"pglazyfree"` + Pglazyfreed uint64 `json:"pglazyfreed"` + Pgrefill uint64 `json:"pgrefill"` + PgstealKswapd uint64 `json:"pgsteal_kswapd"` + PgstealDirect uint64 `json:"pgsteal_direct"` + PgscanKswapd uint64 `json:"pgscan_kswapd"` + PgscanDirect uint64 `json:"pgscan_direct"` + OomKill uint64 `json:"oom_kill"` + CompactDaemonWake uint64 `json:"compact_daemon_wake"` + CompactDaemonMigrateScanned uint64 `json:"compact_daemon_migrate_scanned"` + CompactDaemonFreeScanned uint64 `json:"compact_daemon_free_scanned"` + ThpFileAlloc uint64 `json:"thp_file_alloc"` + ThpFileMapped uint64 `json:"thp_file_mapped"` + ThpSplitPage uint64 `json:"thp_split_page"` + ThpSplitPageFailed uint64 `json:"thp_split_page_failed"` + ThpDeferredSplitPage uint64 `json:"thp_deferred_split_page"` + ThpSplitPmd uint64 `json:"thp_split_pmd"` + ThpSplitPud uint64 `json:"thp_split_pud"` + ThpSwpout uint64 `json:"thp_swpout"` + ThpSwpoutFallback uint64 `json:"thp_swpout_fallback"` + SwapRa uint64 `json:"swap_ra"` + SwapRaHit uint64 `json:"swap_ra_hit"` +} diff --git a/agent/pkg/go-sysinfo/types/process.go b/agent/pkg/go-sysinfo/types/process.go new file mode 100644 index 0000000..c02ac9d --- /dev/null +++ b/agent/pkg/go-sysinfo/types/process.go @@ -0,0 +1,160 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package types + +import "time" + +// Process is the main wrapper for gathering information on a process +type Process interface { + CPUTimer + // Info returns process info. + // It may return partial information if the provider + // implementation is unable to collect all the necessary data. + Info() (ProcessInfo, error) + Memory() (MemoryInfo, error) + User() (UserInfo, error) + Parent() (Process, error) + PID() int +} + +// ProcessInfo contains basic stats about a process +type ProcessInfo struct { + Name string `json:"name"` + PID int `json:"pid"` + PPID int `json:"ppid"` + CWD string `json:"cwd"` + Exe string `json:"exe"` + Args []string `json:"args"` + StartTime time.Time `json:"start_time"` +} + +// UserInfo contains information about the UID and GID +// values of a process. +type UserInfo struct { + // UID is the user ID. + // On Linux and Darwin (macOS) this is the real user ID. + // On Windows, this is the security identifier (SID) of the + // user account of the process access token. + UID string `json:"uid"` + + // On Linux and Darwin (macOS) this is the effective user ID. + // On Windows, this is empty. + EUID string `json:"euid"` + + // On Linux and Darwin (macOS) this is the saved user ID. + // On Windows, this is empty. + SUID string `json:"suid"` + + // GID is the primary group ID. + // On Linux and Darwin (macOS) this is the real group ID. + // On Windows, this is the security identifier (SID) of the + // primary group of the process access token. + GID string `json:"gid"` + + // On Linux and Darwin (macOS) this is the effective group ID. + // On Windows, this is empty. + EGID string `json:"egid"` + + // On Linux and Darwin (macOS) this is the saved group ID. + // On Windows, this is empty. + SGID string `json:"sgid"` +} + +// Environment is the interface that wraps the Environment method. +// Environment returns variables for a process +type Environment interface { + Environment() (map[string]string, error) +} + +// OpenHandleEnumerator is the interface that wraps the OpenHandles method. +// OpenHandles lists the open file handles. +type OpenHandleEnumerator interface { + OpenHandles() ([]string, error) +} + +// OpenHandleCounter is the interface that wraps the OpenHandleCount method. +// OpenHandleCount returns the number of open file handles. +type OpenHandleCounter interface { + OpenHandleCount() (int, error) +} + +// CPUTimer is the interface that wraps the CPUTime method. +// CPUTime returns CPU time info +type CPUTimer interface { + // CPUTime returns a CPUTimes structure for + // the host or some process. + // + // The User and System fields are guaranteed + // to be populated for all platforms, and + // for both hosts and processes. + // This may return types.ErrNotImplemented + // if the provider cannot implement collection of this data. + CPUTime() (CPUTimes, error) +} + +// CPUTimes contains CPU timing stats for a process +type CPUTimes struct { + User time.Duration `json:"user"` + System time.Duration `json:"system"` + Idle time.Duration `json:"idle,omitempty"` + IOWait time.Duration `json:"iowait,omitempty"` + IRQ time.Duration `json:"irq,omitempty"` + Nice time.Duration `json:"nice,omitempty"` + SoftIRQ time.Duration `json:"soft_irq,omitempty"` + Steal time.Duration `json:"steal,omitempty"` +} + +// Total returns the total CPU time +func (cpu CPUTimes) Total() time.Duration { + return cpu.User + cpu.System + cpu.Idle + cpu.IOWait + cpu.IRQ + cpu.Nice + + cpu.SoftIRQ + cpu.Steal +} + +// MemoryInfo contains memory stats for a process +type MemoryInfo struct { + Resident uint64 `json:"resident_bytes"` + Virtual uint64 `json:"virtual_bytes"` + Metrics map[string]uint64 `json:"raw,omitempty"` // Other memory related metrics. +} + +// SeccompInfo contains seccomp info for a process +type SeccompInfo struct { + Mode string `json:"mode"` + NoNewPrivs *bool `json:"no_new_privs,omitempty"` // Added in kernel 4.10. +} + +// CapabilityInfo contains capability set info. +type CapabilityInfo struct { + Inheritable []string `json:"inheritable"` + Permitted []string `json:"permitted"` + Effective []string `json:"effective"` + Bounding []string `json:"bounding"` + Ambient []string `json:"ambient"` +} + +// Capabilities is the interface that wraps the Capabilities method. +// Capabilities returns capabilities for a process +type Capabilities interface { + Capabilities() (*CapabilityInfo, error) +} + +// Seccomp is the interface that wraps the Seccomp method. +// Seccomp returns seccomp info on Linux +type Seccomp interface { + Seccomp() (*SeccompInfo, error) +} diff --git a/agent/pkg/log/logger.go b/agent/pkg/log/logger.go new file mode 100644 index 0000000..3d51d3c --- /dev/null +++ b/agent/pkg/log/logger.go @@ -0,0 +1,134 @@ +package log + +import ( + "fmt" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/config" + rotatelogs "github.com/lestrrat-go/file-rotatelogs" + "github.com/natefinch/lumberjack" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + "io" + "path/filepath" + "sync" + "time" +) + +var Logger *zap.Logger +var once sync.Once + +func InitLogger() { + once.Do(func() { Logger = createZapLog() }) +} + +// initZapLog 初始化 zap 日志 +func createZapLog() *zap.Logger { + // 开启 debug + if config.Config.Debug == true { + if Logger, err := zap.NewDevelopment(); err == nil { + return Logger + } else { + panic("创建zap日志包失败,详情:" + err.Error()) + } + } + + encoderConfig := zap.NewProductionEncoderConfig() + encoderConfig.EncodeTime = func(t time.Time, enc zapcore.PrimitiveArrayEncoder) { + enc.AppendString(t.Format("2006-01-02 15:04:05.000")) + } + + // 在日志文件中使用大写字母记录日志级别 + encoderConfig.EncodeLevel = zapcore.CapitalLevelEncoder + encoder := zapcore.NewConsoleEncoder(encoderConfig) + filename := filepath.Join(config.Config.StaticBasePath, "/logs/", config.Config.Logger.Filename) + var writer zapcore.WriteSyncer + if config.Config.Logger.DefaultDivision == "size" { + // 按文件大小切割日志 + writer = zapcore.AddSync(getLumberJackWriter(filename)) + } else { + // 按天切割日志 + writer = zapcore.AddSync(getRotateWriter(filename)) + } + + zapCore := zapcore.NewCore(encoder, writer, zap.InfoLevel) + //zap.AddStacktrace(zap.WarnLevel) + return zap.New(zapCore, zap.AddCaller()) +} + +// getRotateWriter 按日期切割日志 +func getRotateWriter(filename string) io.Writer { + maxAge := time.Duration(config.Config.Logger.DivisionTime.MaxAge) + rotationTime := time.Duration(config.Config.Logger.DivisionTime.RotationTime) + hook, err := rotatelogs.New( + filename+".%Y%m%d", + rotatelogs.WithLinkName(filename), + rotatelogs.WithMaxAge(time.Hour*24*maxAge), + rotatelogs.WithRotationTime(time.Hour*rotationTime), // 默认一天 + ) + + if err != nil { + panic(err) + } + + return hook +} + +// getLumberJackWriter 按文件切割日志 +func getLumberJackWriter(filename string) io.Writer { + // 日志切割配置 + return &lumberjack.Logger{ + Filename: filename, // 日志文件的位置 + MaxSize: config.Config.Logger.DivisionSize.MaxSize, // 在进行切割之前,日志文件的最大大小(以MB为单位) + MaxBackups: config.Config.Logger.DivisionSize.MaxBackups, // 保留旧文件的最大个数 + MaxAge: config.Config.Logger.DivisionSize.MaxAge, // 保留旧文件的最大天数 + Compress: config.Config.Logger.DivisionSize.Compress, // 是否压缩/归档旧文件 + } +} + +func Info(args ...interface{}) { + Logger.Info(fmt.Sprintf("%s", args...)) +} + +func Infof(format string, args ...interface{}) { + Logger.Info(fmt.Sprintf(format, args...)) +} + +func Warning(args ...interface{}) { + Logger.Warn(fmt.Sprintf("%s", args...)) +} + +func Warningf(format string, args ...interface{}) { + Logger.Warn(fmt.Sprintf(format, args...)) +} + +func Error(args ...interface{}) { + Logger.Error(fmt.Sprintf("%s", args...)) +} + +func Errorf(format string, args ...interface{}) { + Logger.Error(fmt.Sprintf(format, args...)) +} + +func Debug(args ...interface{}) { + Logger.Debug(fmt.Sprintf("%s", args...)) + +} + +func Debugf(format string, args ...interface{}) { + Logger.Debug(fmt.Sprintf(format, args...)) +} + +func Trace(args ...interface{}) { + Logger.Info(fmt.Sprintf("%s", args...)) +} + +func Tracef(format string, args ...interface{}) { + Logger.Info(fmt.Sprintf(format, args...)) +} + +//func Fatal(args ...interface{}) { +// Logger.Fatal(fmt.Sprintf("%s", args...)) +//} +// +//func Fatalf(format string, args ...interface{}) { +// Logger.Fatal(fmt.Sprintf(format, args...)) +//} diff --git a/agent/pkg/nats-client/nats.go b/agent/pkg/nats-client/nats.go new file mode 100644 index 0000000..9c5e5f9 --- /dev/null +++ b/agent/pkg/nats-client/nats.go @@ -0,0 +1,61 @@ +package nats_client + +import ( + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/log" + "github.com/nats-io/nats.go" + "time" +) + +func Publish(subj string, data []byte) error { + // Connect Options. + opts := []nats.Option{nats.Name("NATS Sample Publisher")} + opts = append(opts, nats.UserInfo(NatsConfig.User, NatsConfig.Password)) + + nc, err := nats.Connect(NatsConfig.Url, opts...) + if err != nil { + log.Errorf("连接nats失败:%s", err) + return err + } + defer nc.Close() + nc.Publish(subj, data) + nc.Flush() + + if err := nc.LastError(); err != nil { + //log.Fatal(err) + log.Errorf("与nats通信失败:%s", err) + return err + } + + return nil +} + +func Request(subj string, data []byte) (*nats.Msg, error) { + // Connect Options. + opts := []nats.Option{nats.Name("NATS Sample Publisher")} + opts = append(opts, nats.UserInfo(NatsConfig.User, NatsConfig.Password)) + + nc, err := nats.Connect(NatsConfig.Url, opts...) + if err != nil { + log.Errorf("连接nats失败:%s", err) + return nil, err + } + defer nc.Close() + msg, err := nc.Request(subj, data, 5*time.Second) + if err != nil { + if nc.LastError() != nil { + log.Errorf("%v for request", nc.LastError()) + } + log.Errorf("%v for request", err) + return nil, err + } + + nc.Flush() + + if err := nc.LastError(); err != nil { + //log.Fatal(err) + log.Errorf("与nats通信失败:%s", err) + return nil, err + } + + return msg, nil +} diff --git a/agent/pkg/nats-client/nats_test.go b/agent/pkg/nats-client/nats_test.go new file mode 100644 index 0000000..1ee01e1 --- /dev/null +++ b/agent/pkg/nats-client/nats_test.go @@ -0,0 +1,42 @@ +package nats_client + +import ( + "fmt" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/internal/model/nats_msg_model" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/internal/nats_service/definition" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/log" + "github.com/nats-io/nats.go" + "testing" + "time" +) + +func TestPublish(t *testing.T) { + Publish("hello", []byte("萨达咖啡机阿拉山口戴假发立卡就撒到啦开发机")) +} + +func TestSubscribe(t *testing.T) { + // Connect Options. + opts := []nats.Option{nats.Name("NATS Sample Publisher")} + opts = append(opts, nats.UserInfo(NatsConfig.User, NatsConfig.Password)) + + nc, _ := nats.Connect(NatsConfig.Url, opts...) + + nc.Subscribe(definition.ToScheduleSubject, func(msg *nats.Msg) { + fmt.Println("接收到数据:" + string(msg.Data)) + model, err := nats_msg_model.UnmarshalMsgModel(msg.Data) + if err != nil { + fmt.Println("反序列化失败:" + err.Error()) + } + + fmt.Println(model.Func) + + }) + + nc.Flush() + if err := nc.LastError(); err != nil { + log.Error(err) + } + + time.Sleep(time.Second * 500) + +} diff --git a/agent/pkg/nats-client/variable.go b/agent/pkg/nats-client/variable.go new file mode 100644 index 0000000..9db80fe --- /dev/null +++ b/agent/pkg/nats-client/variable.go @@ -0,0 +1,9 @@ +package nats_client + +import ( + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/config" +) + +var ( + NatsConfig = config.Config.Nats +) diff --git a/agent/pkg/process-manager/README.md b/agent/pkg/process-manager/README.md new file mode 100644 index 0000000..3d3af06 --- /dev/null +++ b/agent/pkg/process-manager/README.md @@ -0,0 +1,391 @@ +# minit + +![MIT License](https://img.shields.io/github/license/guoyk93/minit) +[![release](https://github.com/guoyk93/minit/actions/workflows/release.yml/badge.svg)](https://github.com/guoyk93/minit/actions/workflows/release.yml) +[![Dockerhub](https://img.shields.io/docker/pulls/guoyk/minit)](https://hub.docker.com/r/guoyk/minit) +[![Patreon Donation](https://img.shields.io/badge/Patreon-Donation-orange)](https://www.patreon.com/guoyk) +[![GitHub Sponsors](https://img.shields.io/github/sponsors/guoyk93)](https://github.com/sponsors/guoyk93) + +The missing `init` daemon for container + +[简体中文](README.zh.md) + +## 1. Installation + +You can install `minit` to your own container image by a multi-stage `Dockerfile` + +```dockerfile +FROM guoyk/minit:VERSION AS minit +# Or using Github Packages +# FROM ghcr.io/guoyk93/minit:VERSION AS minit + +# Your own build stage +FROM ubuntu:22.04 + +# ... + +# Copy minit binary +COPY --from=minit /minit /minit + +# Set ENTRYPOINT to minit +ENTRYPOINT ["/minit"] + +# Add a unit file to /etc/minit.d +ADD my-service.yml /etc/minit.d/my-service.yml +``` + +## 2. Unit Loading + +### 2.1 From Files + +Add Unit `YAML` files to `/etc/minit.d` + +Override default directory by environment variable `MINIT_UNIT_DIR` + +Use `---` to separate multiple units in single `YAML` file + +### 2.2 From Environment Variable + +**Example:** + +```dockerfile +ENV MINIT_MAIN="redis-server /etc/redis.conf" +ENV MINIT_MAIN_DIR="/work" +ENV MINIT_MAIN_NAME="main-program" +ENV MINIT_MAIN_GROUP="super-main" +ENV MINIT_MAIN_KIND="cron" +ENV MINIT_MAIN_IMMEDIATE=true +ENV MINIT_MAIN_CRON="* * * * *" +ENV MINIT_MAIN_CHARSET=gbk18030 +``` + +### 2.3 From Command Arguments + +**Example:** + +```dockerfile +ENTRYPOINT ["/minit"] +CMD ["redis-server", "/etc/redis.conf"] +``` + + +## 3. Unit Types + +### 3.1 Type: `render` + +`render` units execute at the very first stage. It renders template files. + +See [pkg/mtmpl/funcs.go](pkg/mtmpl/funcs.go) for available functions. + +**Example:** + +* `/etc/minit.d/render-demo.yaml` + +```yaml +kind: render +name: render-demo +files: + - /opt/*.txt +``` + +* `/opt/demo.txt` + +```text +Hello, {{stringsToUpper .Evn.HOME}} +``` + +Upon startup, `minit` will render file `/opt/demo.txt` + +Since default user for container is `root`, the content of file `/opt/demo.txt` will become: + +```text +Hello, ROOT +``` + +### 3.2 Type: `once` + +`once` units execute after `render` units. It runs command once. + +**Example:** + +```yaml +kind: once +name: once-demo +command: + - echo + - once +``` + +### 3.3 Type: `daemon` + +`daemon` units execute after `render` and `once`. It runs long-running command. + +**Example:** + +```yaml +kind: daemon +name: daemon-demo +command: + - sleep + - 9999 +``` + +### 3.4 Type: `cron` + +`cron` units execute after `render` and `once`. It runs command at cron basis. + +**Example:** + +```yaml +kind: cron +name: cron-demo +cron: "* * * * *" # cron expression, support extended syntax by https://github.com/robfig/cron +immediate: true # execute once on started +command: + - echo + - cron +``` + +## 4. Unit Features + +### 4.1 Replicas + +If `count` field is set, `minit` will replicate this unit with sequence number suffixed + +**Example:** + +```yaml +kind: once +name: once-demo-replicas +count: 2 +command: + - echo + - $MINIT_UNIT_SUB_ID +``` + +Is equal to: + +```yaml +kind: once +name: once-demo-replicas-1 +command: + - echo + - 1 +--- +kind: once +name: once-demo-replicas-2 +command: + - echo + - 2 +``` + +### 4.2 Logging + +**Log Files** + +`minit` write console logs of every command unit into `/var/log/minit` + +This directory can be overridden by environment `MINIT_LOG_DIR` + +Set `MINIT_LOG_DIR=none` to disable file logging and optimize performance of `minit` + +**Console Encoding** + +If `charset` field is set, `minit` will transcode command console output from other encodings to `utf8` + +**Example:** + +```yaml +kind: once +name: once-demo-transcode +charset: gbk # supports gbk, gb18030 only +command: + - command-that-produces-gbk-logs +``` + +### 4.3 Extra Environment Variables + +If `env` field is set, `minit` will append extra environment variables while launching command. + +**Example:** + +```yaml +kind: daemon +name: daemon-demo-env +env: + AAA: BBB +command: + - echo + - $AAA +``` + +### 4.4 Render Environment Variables + +Any environment with prefix `MINIT_ENV_` will be rendered before passing to command. + +**Example:** + +```yaml +kind: daemon +name: daemon-demo-render-env +env: + MINIT_ENV_MY_IP: '{{netResolveIP "google.com"}}' +command: + - echo + - $MY_IP +``` + +### 4.5 Using `shell` in command units + +By default, `command` field will be passed to `exec` syscall, `minit` won't modify ti, except simple environment variable substitution. + +If `shell` field is set, `command` field will act as a simple script file. + +**Example:** + +```yaml +kind: once +name: once-demo-shell +shell: "/bin/bash -eu" +command: # this is merely a script file + - if [ -n "${HELLO}" ]; then + - echo "world" + - fi +``` + +### 4.6 Unit Enabling / Disabling + +**Grouping** + +Use `group` field to set a group name to units. + +Default unit group name is `default` + +**Allowlist Mode** + +If environment `MINIT_ENABLE` is set, `minit` will run in **Allowlist Mode**, only units with name existed +in `MINIT_ENABLE` will be loaded. + +Use format `@group-name` to enable a group of units + +Use format `&daemon` to enable a kind of units + +Example: + +```text +MINIT_ENABLE=once-demo,@demo +``` + +**Denylist Mode** + +If environment `MINIT_DISABLE` is set, `minit` will run in **Denylist Mode**, units with name existed in `MINIT_DISABLE` +will NOT be loaded. + +Use format `@group-name` to disable a group of units + +Example: + +```text +MINIT_DISABLE=once-demo,@demo +``` + +## 5. Extra Features + +### 5.1 Zombie Processes Cleaning + +When running as `PID 1`, `minit` will do zombie process cleaning + +This is the responsibility of `PID 1` + +### 5.2 Quick Exit + +By default, `minit` will keep running even without `daemon` or `cron` units defined. + +If you want to use `minit` in `initContainers` or outside of container, you can set envrionment +variable `MINIT_QUIT_EXIT=true` to let `minit` exit as soon as possible + +### 5.3 Resource limits (ulimit) + +**Warning: this feature need container running at Privileged mode** + +Use environment variable `MINIT_RLIMIT_XXX` to set resource limits + +* `unlimited` means no limitation +* `-` means unchanged + +**Supported:** + +```text +MINIT_RLIMIT_AS +MINIT_RLIMIT_CORE +MINIT_RLIMIT_CPU +MINIT_RLIMIT_DATA +MINIT_RLIMIT_FSIZE +MINIT_RLIMIT_LOCKS +MINIT_RLIMIT_MEMLOCK +MINIT_RLIMIT_MSGQUEUE +MINIT_RLIMIT_NICE +MINIT_RLIMIT_NOFILE +MINIT_RLIMIT_NPROC +MINIT_RLIMIT_RTPRIO +MINIT_RLIMIT_SIGPENDING +MINIT_RLIMIT_STACK +``` + +**Example:** + +```text +MINIT_RLIMIT_NOFILE=unlimited # set soft limit and hard limit to 'unlimited' +MINIT_RLIMIT_NOFILE=128:unlimited # set soft limit to 128,set hard limit to 'unlimited' +MINIT_RLIMIT_NOFILE=128:- # set soft limit to 128,dont change hard limit +MINIT_RLIMIT_NOFILE=-:unlimited # don't change soft limit,set hard limit to 'unlimited' +``` + +### 5.4 Kernel Parameters (sysctl) + +**Warning: this feature need container running at Privileged mode** + +Use environment variable `MINIT_SYSCTL` to set kernel parameters + +Separate multiple entries with `,` + +**Example:** + +``` +MINIT_SYSCTL=vm.max_map_count=262144,vm.swappiness=60 +``` + +### 5.5 Transparent Huge Page (THP) + +**Warning: this feature need container running at Privileged mode and host `/sys` mounted** + +Use environment variable `MINIT_THP` to set THP configuration. + +**Example:** + +``` +# available values: never, madvise, always +MINIT_THP=madvise +``` + +### 5.6 Built-in WebDAV server + +By setting environment variable `MINIT_WEBDAV_ROOT`, `minit` will start a built-in WebDAV server at port `7486` + +Environment Variables: + +* `MINIT_WEBDAV_ROOT`, path to serve, `/srv` for example +* `MINIT_WEBDAV_PORT`, port of WebDAV server, default to `7486` +* `MINIT_WEBDAV_USERNAME` and `MINIT_WEBDAV_PASSWORD`, optional basic auth for WebDAV server + +### 5.7 Banner file + +By putting a file at `/etc/banner.minit.txt`, `minit` will print it's content at startup + +## 6. Donation + +View https://guoyk.xyz/donation + +## 7. Credits + +GUO YANKE, MIT License diff --git a/agent/pkg/process-manager/README.zh.md b/agent/pkg/process-manager/README.zh.md new file mode 100644 index 0000000..55ed454 --- /dev/null +++ b/agent/pkg/process-manager/README.zh.md @@ -0,0 +1,298 @@ +# minit + +一个用 Go 编写的进程管理工具,用以在容器内启动多个进程 + +## 获取镜像 + +``` +guoyk/minit:VERSION +``` + +## 使用方法 + +使用多阶段 Dockerfile 来从上述镜像地址导入 `minit` 可执行程序 + +```dockerfile +FROM guoyk/minit AS minit + +FROM xxxxxxx + +# 添加一份服务配置到 /etc/minit.d/ +ADD my-service.yml /etc/minit.d/my-service.yml +# 这将从 minit 镜像中,将可执行文件 /minit 拷贝到最终镜像的 /minit 位置 +COPY --from=minit /minit /minit +# 这将指定 /minit 作为主启动入口,允许后续的 CMD 传入 +ENTRYPOINT ["/minit"] +``` + +## 配置文件 + +配置文件默认从 `/etc/minit.d/*.yml` 读取 + +允许使用 `---` 分割在单个 `yaml` 文件中,写入多条配置单元 + +当前支持以下类型 + +* `render` + + `render` 类型配置单元最先运行(优先级 L1),一般用于渲染配置文件,可使用函数参考 [pkg/mtmpl/funcs.go] 文件 + + 如下示例 + + `/etc/minit.d/render-test.yml` + + ```yaml + kind: render + name: render-test + files: + - /tmp/*.txt + ``` + + `/tmp/sample.txt` + + ```text + Hello, {{stringsToUpper .Env.HOME}} + ``` + + `minit` 启动时,会按照配置规则,渲染 `/tmp/sample.txt` 文件 + + 由于容器用户默认为 `root`,因此 `/tmp/sample.txt` 文件会被渲染为 + + ```text + Hello, /ROOT + ``` + + 可用渲染函数,参见代码中的 `pkg/tmplfuncs/tmplfuncs.go` + +* `once` + + `once` 类型的配置单元随后运行(优先级 L2),用于执行一次性进程 + + `/etc/minit.d/sample.yml` + + ```yaml + kind: once + name: once-sample + dir: /work # 指定工作目录 + command: + - echo + - once + ``` + +* `daemon` + + `daemon` 类型的配置单元,最后启动(优先级 L3),用于执行常驻进程 + + ```yaml + kind: daemon + name: daemon-sample + dir: /work # 指定工作目录 + count: 3 # 如果指定了 count,会启动多个副本 + command: + - sleep + - 9999 + ``` + +* `cron` + + `cron` 类型的配置单元,最后启动(优先级 L3),用于按照 cron 表达式,执行命令 + + ```yaml + kind: cron + name: cron-sample + cron: "* * * * *" + immediate: true # 启动后立即执行一次 + dir: /work # 指定工作目录 + command: + - echo + - cron + ``` + +## 日志文件 + +`minit` 会把每个单元的日志记录在 `/var/log/minit` 文件夹内,使用环境变量 `MINIT_LOG_DIR` 来修改这个目录 + +设置 `MINIT_LOG_DIR=none` 禁用日志文件功能,同时缩减内存使用量,优化标准输出性能 + +## 日志字符集转换 + +上述所有配置单元,均可以追加 `charset` 字段,会将命令输出的日志,从其他字符集转义到 `utf-8` + +当前支持 + +* `gbk18030` +* `gbk` + +## 增加环境变量 + +在 `once`, `daemon` 和 `cron` 类型的单元中,可以使用 `env` 字段增加额外的环境变量 + +比如 + +```yaml +kind: daemon +name: demo-daemon-1 +env: + AAA: BBB +command: + - echo + - $AAA +``` + +## 渲染环境变量 + +凡是以 `MINIT_ENV_` 为前缀开头的环境变量,会执行模板渲染,并传递给进程,可使用函数参考 [pkg/mtmpl/funcs.go] 文件。 + +比如: + +``` +MINIT_ENV_MY_IP={{netResolveIP "google.com"}} +``` + +会设置对应的环境变量 + +``` +MY_IP=172.217.160.110 +``` + +## 使用 `Shell` + +上述配置单元的 `command` 数组默认状态下等价于 `argv` 系统调用,如果想要使用基于 `Shell` 的多行命令,使用以下方式 + +```yaml +name: demo-for-shell +kind: once +# 追加要使用的 shell +shell: "/bin/bash -eu" +command: + - if [ -n "${HELLO}" ]; then + - echo "world" + - fi +``` + +支持所有带 `command` 参数的工作单元类型,比如 `once`, `daemon`, `cron` + +## 快速创建单元 + +如果懒得写 `YAML` 文件,可以直接用环境变量,或者 `CMD` 来创建 `daemon` 类型的配置单元 + +**使用环境变量创建单元** + +``` +MINIT_MAIN=redis-server /etc/redis.conf +MINIT_MAIN_DIR=/work +MINIT_MAIN_NAME=main-program +MINIT_MAIN_GROUP=super-main +MINIT_MAIN_KIND=cron +MINIT_MAIN_CRON="* * * * *" +MINIT_MAIN_IMMEDIATE=true +MINIT_MAIN_CHARSET=gbk18030 +``` + +**使用命令行参数创建单元** + +``` +ENTRYPOINT ["/minit"] +CMD ["redis-server", "/etc/redis.conf"] +``` + +## 打开/关闭单元 + +可以通过环境变量,打开/关闭特定的单元 + +* `MINIT_ENABLE`, 逗号分隔, 如果值存在,则为 `白名单模式`,只有指定名称的单元会执行 +* `MINIT_DISABLE`, 逗号分隔, 如果值存在,则为 `黑名单模式`,除了指定名称外的单元会执行 + +可以为配置单元设置字段 `group`,然后在上述环境变量使用 `@group` ,设置一组单元的开启和关闭。 + +使用 `&daemon` 这样的格式,控制一个类型的控制单元的开启和关闭 + +没有设置 `group` 字段的单元,默认组名为 `default` + +## 快速退出 + +默认情况下,即便是没有 L3 类型任务 (`daemon`, `cron`, `logrotate` 等),`minit` 也会持续运行,以支撑起容器主进程。 + +如果要在 `initContainers` 中,或者容器外使用 `minit`,可以将环境变量 `MINIT_QUICK_EXIT` 设置为 `true` + +此时,如果没有 L3 类型任务,`minit` 会自动退出 + +## 资源限制 (ulimit) + +**注意,使用此功能可能需要容器运行在高权限 (Privileged) 模式** + +使用环境变量 `MINIT_RLIMIT_XXXX` 来设置容器的资源限制,`unlimited` 代表无限制, `-` 表示不修改 + +比如: + +``` +MINIT_RLIMIT_NOFILE=unlimited # 同时设置软硬限制为 unlimited +MINIT_RLIMIT_NOFILE=128:unlimited # 设置软限制为 128,设置硬限制为 unlimited +MINIT_RLIMIT_NOFILE=128:- # 设置软限制为 128,硬限制不变 +MINIT_RLIMIT_NOFILE=-:unlimited # 软限制不变,硬限制修改为 unlimited +``` + +可用的环境变量有: + +``` +MINIT_RLIMIT_AS +MINIT_RLIMIT_CORE +MINIT_RLIMIT_CPU +MINIT_RLIMIT_DATA +MINIT_RLIMIT_FSIZE +MINIT_RLIMIT_LOCKS +MINIT_RLIMIT_MEMLOCK +MINIT_RLIMIT_MSGQUEUE +MINIT_RLIMIT_NICE +MINIT_RLIMIT_NOFILE +MINIT_RLIMIT_NPROC +MINIT_RLIMIT_RTPRIO +MINIT_RLIMIT_SIGPENDING +MINIT_RLIMIT_STACK +``` + +## 内核参数 (sysctl) + +**注意,使用此功能可能需要容器运行在高权限 (Privileged) 模式** + +使用环境变量 `MINIT_SYSCTL` 来写入 `sysctl` 配置项,`minit` 会自动写入 `/proc/sys` 目录下对应的参数 + +使用 `,` 分隔多个值 + +比如: + +``` +MINIT_SYSCTL=vm.max_map_count=262144,vm.swappiness=60 +``` + +## 透明大页 (THP) + +**注意,使用此功能可能需要容器运行在高权限 (Privileged) 模式,并且需要挂载 /sys 目录** + +使用环境变量 `MINIT_THP` 修改 透明大页配置,可选值为 `never`, `madvise` 和 `always` + +## WebDAV 服务 + +我懂你的痛,当你在容器里面生成了一份调试信息,比如 `Arthas` 或者 `Go pprof` 的火焰图,然后你开始绞尽脑汁想办法把这个文件传输出来 + +现在,不再需要这份痛苦了,`minit` 内置 `WebDAV` 服务,你可以像暴露一个标准服务一样暴露出来,省去了调度主机+映射主机目录等一堆烦心事 + +环境变量: + +* `MINIT_WEBDAV_ROOT` 指定要暴露的路径并启动 WebDAV 服务,比如 `/srv` +* `MINIT_WEBDAV_PORT` 指定 `WebDAV` 服务的端口,默认为 `7486` +* `MINIT_WEBDAV_USERNAME` 和 `MINIT_WEBDAV_PASSWORD` 指定 `WebDAV` 服务的用户密码,默认不设置用户密码 + +可以使用 Cyberduck 来连接 WebDAV 服务器 https://cyberduck.io/ + +## 展示自述文件 + +如果把一个文件放在 `/etc/banner.minit.txt` ,则 `minit` 在启动时会打印其内容 + +## 赞助 + +访问 + +## 许可证 + +GUO YANKE, MIT License diff --git a/agent/pkg/process-manager/menv/construct.go b/agent/pkg/process-manager/menv/construct.go new file mode 100644 index 0000000..143e875 --- /dev/null +++ b/agent/pkg/process-manager/menv/construct.go @@ -0,0 +1,44 @@ +package menv + +import ( + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/process-manager/mtmpl" + "os" + "strings" +) + +const ( + PrefixMinitEnv = "MINIT_ENV_" +) + +// Construct create the env map with current system environ, extra and rendering MINIT_ENV_ prefixed keys +func Construct(extra map[string]string) (envs map[string]string, err error) { + envs = make(map[string]string) + // system env + for _, item := range os.Environ() { + splits := strings.SplitN(item, "=", 2) + var k, v string + if len(splits) > 0 { + k = splits[0] + if len(splits) > 1 { + v = splits[1] + } + envs[k] = v + } + } + // merge extra env + Merge(envs, extra) + // render MINIT_ENV_XXX + for k, v := range envs { + if !strings.HasPrefix(k, PrefixMinitEnv) { + continue + } + k = strings.TrimPrefix(k, PrefixMinitEnv) + var buf []byte + if buf, err = mtmpl.Execute(v, map[string]any{"Env": envs}); err != nil { + return + } + envs[k] = string(buf) + } + + return +} diff --git a/agent/pkg/process-manager/menv/construct_test.go b/agent/pkg/process-manager/menv/construct_test.go new file mode 100644 index 0000000..fc01111 --- /dev/null +++ b/agent/pkg/process-manager/menv/construct_test.go @@ -0,0 +1,16 @@ +package menv + +import ( + "github.com/stretchr/testify/require" + "testing" +) + +func TestBuild(t *testing.T) { + envs, err := Construct(map[string]string{ + "HOME-": "NONE", + "MINIT_ENV_BUF": "{{stringsToUpper \"bbb\"}}", + }) + require.NoError(t, err) + require.Equal(t, "", envs["HOME"]) + require.Equal(t, "BBB", envs["BUF"]) +} diff --git a/agent/pkg/process-manager/menv/merge.go b/agent/pkg/process-manager/menv/merge.go new file mode 100644 index 0000000..cb9b3aa --- /dev/null +++ b/agent/pkg/process-manager/menv/merge.go @@ -0,0 +1,15 @@ +package menv + +import "strings" + +// Merge merge two env map, if keys in src has a suffix '-', this will delete the key from dst +func Merge(dst map[string]string, src map[string]string) { + for k, v := range src { + if strings.HasSuffix(k, "-") { + delete(dst, k[:len(k)-1]) + } else { + dst[k] = v + } + } + return +} diff --git a/agent/pkg/process-manager/menv/merge_test.go b/agent/pkg/process-manager/menv/merge_test.go new file mode 100644 index 0000000..585eec3 --- /dev/null +++ b/agent/pkg/process-manager/menv/merge_test.go @@ -0,0 +1,22 @@ +package menv + +import ( + "github.com/stretchr/testify/require" + "testing" +) + +func TestMerge(t *testing.T) { + m := map[string]string{ + "a": "b", + "c": "d", + } + m2 := map[string]string{ + "a-": "", + "c": "e", + "h": "j", + } + Merge(m, m2) + require.Equal(t, 2, len(m)) + require.Equal(t, "e", m["c"]) + require.Equal(t, "j", m["h"]) +} diff --git a/agent/pkg/process-manager/merrs/errors.go b/agent/pkg/process-manager/merrs/errors.go new file mode 100644 index 0000000..653242b --- /dev/null +++ b/agent/pkg/process-manager/merrs/errors.go @@ -0,0 +1,74 @@ +package merrs + +import ( + "strconv" + "strings" + "sync" +) + +type Errors []error + +func (errs Errors) Error() string { + sb := &strings.Builder{} + for i, err := range errs { + if err == nil { + continue + } + if sb.Len() > 0 { + sb.WriteString("; ") + } + sb.WriteRune('#') + sb.WriteString(strconv.Itoa(i)) + sb.WriteString(": ") + sb.WriteString(err.Error()) + } + return sb.String() +} + +type ErrorGroup interface { + Add(err error) + Set(i int, err error) + Unwrap() error +} + +type errorGroup struct { + errors Errors + locker *sync.RWMutex +} + +func NewErrorGroup() ErrorGroup { + return &errorGroup{ + locker: &sync.RWMutex{}, + } +} + +func (eg *errorGroup) Add(err error) { + eg.locker.Lock() + defer eg.locker.Unlock() + + eg.errors = append(eg.errors, err) +} + +func (eg *errorGroup) Set(i int, err error) { + eg.locker.Lock() + defer eg.locker.Unlock() + + if i >= len(eg.errors) { + eg.errors = append(eg.errors, make([]error, i+1-len(eg.errors))...) + } + + eg.errors[i] = err +} + +func (eg *errorGroup) Unwrap() error { + eg.locker.RLock() + defer eg.locker.RUnlock() + + for _, err := range eg.errors { + if err != nil { + return eg.errors + } + } + + return nil +} diff --git a/agent/pkg/process-manager/merrs/errors_test.go b/agent/pkg/process-manager/merrs/errors_test.go new file mode 100644 index 0000000..36b193c --- /dev/null +++ b/agent/pkg/process-manager/merrs/errors_test.go @@ -0,0 +1,30 @@ +package merrs + +import ( + "errors" + "github.com/stretchr/testify/require" + "testing" +) + +func TestNewErrorGroup(t *testing.T) { + eg := NewErrorGroup() + eg.Add(errors.New("hello")) + eg.Add(nil) + eg.Add(errors.New("world")) + require.Equal(t, "#0: hello; #2: world", eg.Unwrap().Error()) + + eg = NewErrorGroup() + eg.Add(nil) + eg.Add(nil) + require.NoError(t, eg.Unwrap()) + + eg.Set(3, errors.New("BBB")) + require.Error(t, eg.Unwrap()) + + errs := eg.Unwrap().(Errors) + require.Equal(t, 4, len(errs)) + require.NoError(t, errs[0]) + require.NoError(t, errs[1]) + require.NoError(t, errs[2]) + require.Error(t, errs[3]) +} diff --git a/agent/pkg/process-manager/mexec/manager.go b/agent/pkg/process-manager/mexec/manager.go new file mode 100644 index 0000000..1661e12 --- /dev/null +++ b/agent/pkg/process-manager/mexec/manager.go @@ -0,0 +1,182 @@ +//go:build linux + +package mexec + +import ( + "errors" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/process-manager/menv" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/process-manager/mlog" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/process-manager/shellquote" + "golang.org/x/text/encoding" + "golang.org/x/text/encoding/simplifiedchinese" + "io" + "os" + "os/exec" + "strings" + "sync" + "syscall" +) + +type ExecuteOptions struct { + Name string + + Dir string + Shell string + Env map[string]string + Command []string + Charset string + + Logger mlog.ProcLogger + IgnoreExecError bool +} + +type Manager interface { + Signal(sig os.Signal) + Execute(opts ExecuteOptions) (err error) +} + +type manager struct { + childPIDs map[int]struct{} + childPIDLock sync.Locker + charsets map[string]encoding.Encoding +} + +func NewManager() Manager { + return &manager{ + childPIDs: map[int]struct{}{}, + childPIDLock: &sync.Mutex{}, + charsets: map[string]encoding.Encoding{ + "gb18030": simplifiedchinese.GB18030, + "gbk": simplifiedchinese.GBK, + }, + } +} + +func (m *manager) addChildPID(fn func() (pid int, err error)) error { + m.childPIDLock.Lock() + defer m.childPIDLock.Unlock() + pid, err := fn() + if err == nil { + m.childPIDs[pid] = struct{}{} + } + return err +} + +func (m *manager) delChildPID(pid int) { + m.childPIDLock.Lock() + defer m.childPIDLock.Unlock() + delete(m.childPIDs, pid) +} + +func (m *manager) Signal(sig os.Signal) { + m.childPIDLock.Lock() + defer m.childPIDLock.Unlock() + for pid := range m.childPIDs { + if process, _ := os.FindProcess(pid); process != nil { + _ = process.Signal(sig) + } + } +} + +func (m *manager) Execute(opts ExecuteOptions) (err error) { + var argv []string + + // check opts.Dir + if opts.Dir != "" { + var info os.FileInfo + if info, err = os.Stat(opts.Dir); err != nil { + err = errors.New("failed to stat opts.Dir: " + err.Error()) + return + } + if !info.IsDir() { + err = errors.New("opts.Dir is not a directory: " + opts.Dir) + return + } + } + + // build env + var env map[string]string + if env, err = menv.Construct(opts.Env); err != nil { + err = errors.New("failed constructing environment variables: " + err.Error()) + return + } + + // build argv + if opts.Shell != "" { + if argv, err = shellquote.Split(opts.Shell); err != nil { + err = errors.New("opts.Shell is invalid: " + err.Error()) + return + } + } else { + for _, arg := range opts.Command { + argv = append(argv, os.Expand(arg, func(s string) string { + return env[s] + })) + } + } + + // build exec.Cmd + var outPipe, errPipe io.Reader + cmd := exec.Command(argv[0], argv[1:]...) + if opts.Shell != "" { + cmd.Stdin = strings.NewReader(strings.Join(opts.Command, "\n")) + } + for k, v := range env { + cmd.Env = append(cmd.Env, k+"="+v) + } + cmd.Dir = opts.Dir + cmd.SysProcAttr = &syscall.SysProcAttr{ + Setpgid: true, + } + + // build out / err pipe + if outPipe, err = cmd.StdoutPipe(); err != nil { + return + } + if errPipe, err = cmd.StderrPipe(); err != nil { + return + } + + // charset + if opts.Charset != "" { + enc := m.charsets[strings.ToLower(opts.Charset)] + if enc == nil { + opts.Logger.Error("unknown charset:", opts.Charset) + } else { + outPipe = enc.NewDecoder().Reader(outPipe) + errPipe = enc.NewDecoder().Reader(errPipe) + } + } + + // start process in the same lock with signal children + if err = m.addChildPID(func() (pid int, err error) { + if err = cmd.Start(); err != nil { + return + } + pid = cmd.Process.Pid + return + }); err != nil { + return + } + + opts.Logger.Print("minit: " + opts.Name + ": process started") + + // streaming + go opts.Logger.Out().ReadFrom(outPipe) + go opts.Logger.Err().ReadFrom(errPipe) + + // wait for process + if err = cmd.Wait(); err != nil { + opts.Logger.Error("minit: " + opts.Name + ": process exited with error: " + err.Error()) + + if opts.IgnoreExecError { + err = nil + } + } else { + opts.Logger.Print("minit: " + opts.Name + ": process exited") + } + + m.delChildPID(cmd.Process.Pid) + + return +} diff --git a/agent/pkg/process-manager/mexec/manager_test.go b/agent/pkg/process-manager/mexec/manager_test.go new file mode 100644 index 0000000..f99f9f6 --- /dev/null +++ b/agent/pkg/process-manager/mexec/manager_test.go @@ -0,0 +1,64 @@ +package mexec + +import ( + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/process-manager/mlog" + "github.com/stretchr/testify/require" + "os" + "path/filepath" + "syscall" + "testing" + "time" +) + +func TestNewManager(t *testing.T) { + m := NewManager() + + os.RemoveAll(filepath.Join("testdata", "test.out.log")) + os.RemoveAll(filepath.Join("testdata", "test.err.log")) + + logger, err := mlog.NewProcLogger(mlog.ProcLoggerOptions{ + FileOptions: &mlog.RotatingFileOptions{ + Dir: "testdata", + Filename: "test", + }, + }) + require.NoError(t, err) + + err = m.Execute(ExecuteOptions{ + Dir: "testdata", + Env: map[string]string{ + "AAA": "BBB", + }, + Command: []string{ + "echo", "$AAA", + }, + Logger: logger, + IgnoreExecError: true, + }) + require.NoError(t, err) + + buf, err := os.ReadFile(filepath.Join("testdata", "test.out.log")) + require.Contains(t, string(buf), "BBB") + + go func() { + time.Sleep(time.Second) + m.Signal(syscall.SIGINT) + }() + + t1 := time.Now() + + err = m.Execute(ExecuteOptions{ + Dir: "testdata", + Env: map[string]string{ + "AAA": "10", + }, + Command: []string{ + "sleep", "$AAA", + }, + Logger: logger, + IgnoreExecError: true, + }) + require.NoError(t, err) + + require.True(t, time.Now().Sub(t1) < time.Second*2) +} diff --git a/agent/pkg/process-manager/mexec/manager_windows.go b/agent/pkg/process-manager/mexec/manager_windows.go new file mode 100644 index 0000000..b8e6af8 --- /dev/null +++ b/agent/pkg/process-manager/mexec/manager_windows.go @@ -0,0 +1,182 @@ +//go:build windows + +package mexec + +import ( + "errors" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/process-manager/menv" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/process-manager/mlog" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/process-manager/shellquote" + "golang.org/x/text/encoding" + "golang.org/x/text/encoding/simplifiedchinese" + "io" + "log" + "os" + "os/exec" + "strings" + "sync" + "syscall" +) + +type ExecuteOptions struct { + Name string + + Dir string + Shell string + Env map[string]string + Command []string + Charset string + + Logger mlog.ProcLogger + IgnoreExecError bool +} + +type Manager interface { + Signal(sig os.Signal) + Execute(opts ExecuteOptions) (err error) +} + +type manager struct { + childPIDs map[int]struct{} + childPIDLock sync.Locker + charsets map[string]encoding.Encoding +} + +func NewManager() Manager { + return &manager{ + childPIDs: map[int]struct{}{}, + childPIDLock: &sync.Mutex{}, + charsets: map[string]encoding.Encoding{ + "gb18030": simplifiedchinese.GB18030, + "gbk": simplifiedchinese.GBK, + }, + } +} + +func (m *manager) addChildPID(fn func() (pid int, err error)) error { + m.childPIDLock.Lock() + defer m.childPIDLock.Unlock() + pid, err := fn() + if err == nil { + m.childPIDs[pid] = struct{}{} + } + return err +} + +func (m *manager) delChildPID(pid int) { + m.childPIDLock.Lock() + defer m.childPIDLock.Unlock() + delete(m.childPIDs, pid) +} + +func (m *manager) Signal(sig os.Signal) { + m.childPIDLock.Lock() + defer m.childPIDLock.Unlock() + for pid := range m.childPIDs { + if process, _ := os.FindProcess(pid); process != nil { + log.Printf("关闭各采集器:%d", process.Pid) + _ = process.Signal(sig) + } + } +} + +func (m *manager) Execute(opts ExecuteOptions) (err error) { + var argv []string + + // check opts.Dir + if opts.Dir != "" { + var info os.FileInfo + if info, err = os.Stat(opts.Dir); err != nil { + err = errors.New("failed to stat opts.Dir: " + err.Error()) + return + } + if !info.IsDir() { + err = errors.New("opts.Dir is not a directory: " + opts.Dir) + return + } + } + + // build env + var env map[string]string + if env, err = menv.Construct(opts.Env); err != nil { + err = errors.New("failed constructing environment variables: " + err.Error()) + return + } + + // build argv + if opts.Shell != "" { + if argv, err = shellquote.Split(opts.Shell); err != nil { + err = errors.New("opts.Shell is invalid: " + err.Error()) + return + } + } else { + for _, arg := range opts.Command { + argv = append(argv, os.Expand(arg, func(s string) string { + return env[s] + })) + } + } + + // build exec.Cmd + var outPipe, errPipe io.Reader + cmd := exec.Command(argv[0], argv[1:]...) + if opts.Shell != "" { + cmd.Stdin = strings.NewReader(strings.Join(opts.Command, "\n")) + } + for k, v := range env { + cmd.Env = append(cmd.Env, k+"="+v) + } + cmd.Dir = opts.Dir + cmd.SysProcAttr = &syscall.SysProcAttr{} + + // build out / err pipe + if outPipe, err = cmd.StdoutPipe(); err != nil { + return + } + if errPipe, err = cmd.StderrPipe(); err != nil { + return + } + + // charset + if opts.Charset != "" { + enc := m.charsets[strings.ToLower(opts.Charset)] + if enc == nil { + opts.Logger.Error("unknown charset:", opts.Charset) + } else { + outPipe = enc.NewDecoder().Reader(outPipe) + errPipe = enc.NewDecoder().Reader(errPipe) + } + } + + // start process in the same lock with signal children + if err = m.addChildPID(func() (pid int, err error) { + if err = cmd.Start(); err != nil { + return + } + pid = cmd.Process.Pid + return + }); err != nil { + return + } + + opts.Logger.Print("minit: " + opts.Name + ": process started") + + // streaming + go opts.Logger.Out().ReadFrom(outPipe) + go opts.Logger.Err().ReadFrom(errPipe) + + // wait for process + if err = cmd.Wait(); err != nil { + opts.Logger.Error("minit: " + opts.Name + ": process exited with error: " + err.Error()) + + if opts.IgnoreExecError { + err = nil + } + } else { + opts.Logger.Print("minit: " + opts.Name + ": process exited") + } + + m.delChildPID(cmd.Process.Pid) + + return +} diff --git a/agent/pkg/process-manager/mexec/testdata/.gitignore b/agent/pkg/process-manager/mexec/testdata/.gitignore new file mode 100644 index 0000000..bf0824e --- /dev/null +++ b/agent/pkg/process-manager/mexec/testdata/.gitignore @@ -0,0 +1 @@ +*.log \ No newline at end of file diff --git a/agent/pkg/process-manager/mlog/logger.go b/agent/pkg/process-manager/mlog/logger.go new file mode 100644 index 0000000..47e4e65 --- /dev/null +++ b/agent/pkg/process-manager/mlog/logger.go @@ -0,0 +1,118 @@ +package mlog + +import ( + "fmt" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/process-manager/merrs" + "io" + "os" +) + +type ProcLoggerOptions struct { + ConsoleOut io.Writer + ConsoleErr io.Writer + ConsolePrefix string + + FilePrefix string + FileOptions *RotatingFileOptions +} + +type ProcLogger interface { + Print(items ...interface{}) + Printf(layout string, items ...interface{}) + Error(items ...interface{}) + Errorf(layout string, items ...interface{}) + + ProcOutput +} + +type procLogger struct { + out Output + err Output +} + +func NewProcLogger(opts ProcLoggerOptions) (pl ProcLogger, err error) { + if opts.ConsoleOut == nil { + opts.ConsoleOut = os.Stdout + } + if opts.ConsoleErr == nil { + opts.ConsoleErr = os.Stderr + } + + if opts.FileOptions == nil { + pl = &procLogger{ + out: NewWriterOutput(opts.ConsoleOut, []byte(opts.ConsolePrefix), nil), + err: NewWriterOutput(opts.ConsoleErr, []byte(opts.ConsolePrefix), nil), + } + return + } + + if opts.FileOptions.MaxFileSize == 0 { + opts.FileOptions.MaxFileSize = 128 * 1024 * 1024 + } + if opts.FileOptions.MaxFileCount == 0 { + opts.FileOptions.MaxFileCount = 5 + } + + var fileOut io.WriteCloser + if fileOut, err = NewRotatingFile(RotatingFileOptions{ + Dir: opts.FileOptions.Dir, + Filename: opts.FileOptions.Filename + ".out", + MaxFileSize: opts.FileOptions.MaxFileSize, + MaxFileCount: opts.FileOptions.MaxFileCount, + }); err != nil { + return + } + + var fileErr io.WriteCloser + if fileErr, err = NewRotatingFile(RotatingFileOptions{ + Dir: opts.FileOptions.Dir, + Filename: opts.FileOptions.Filename + ".err", + MaxFileSize: opts.FileOptions.MaxFileSize, + MaxFileCount: opts.FileOptions.MaxFileCount, + }); err != nil { + return + } + + pl = &procLogger{ + out: MultiOutput( + NewWriterOutput(fileOut, []byte(opts.FilePrefix), nil), + NewWriterOutput(opts.ConsoleOut, []byte(opts.ConsolePrefix), nil), + ), + err: MultiOutput( + NewWriterOutput(fileErr, []byte(opts.FilePrefix), nil), + NewWriterOutput(opts.ConsoleErr, []byte(opts.ConsolePrefix), nil), + ), + } + return +} + +func (pl *procLogger) Close() error { + eg := merrs.NewErrorGroup() + eg.Add(pl.out.Close()) + eg.Add(pl.err.Close()) + return eg.Unwrap() +} + +func (pl *procLogger) Print(items ...interface{}) { + _, _ = pl.out.Write(append([]byte(fmt.Sprint(items...)), '\n')) +} + +func (pl *procLogger) Error(items ...interface{}) { + _, _ = pl.err.Write(append([]byte(fmt.Sprint(items...)), '\n')) +} + +func (pl *procLogger) Printf(pattern string, items ...interface{}) { + _, _ = pl.out.Write(append([]byte(fmt.Sprintf(pattern, items...)), '\n')) +} + +func (pl *procLogger) Errorf(pattern string, items ...interface{}) { + _, _ = pl.err.Write(append([]byte(fmt.Sprintf(pattern, items...)), '\n')) +} + +func (pl *procLogger) Out() Output { + return pl.out +} + +func (pl *procLogger) Err() Output { + return pl.err +} diff --git a/agent/pkg/process-manager/mlog/logger_test.go b/agent/pkg/process-manager/mlog/logger_test.go new file mode 100644 index 0000000..c0b7700 --- /dev/null +++ b/agent/pkg/process-manager/mlog/logger_test.go @@ -0,0 +1,25 @@ +package mlog + +import ( + "github.com/stretchr/testify/require" + "os" + "path/filepath" + "testing" +) + +func TestLog(t *testing.T) { + os.MkdirAll(filepath.Join("testdata", "logger"), 0755) + os.WriteFile(filepath.Join("testdata", "logger", ".gitignore"), []byte("*.log"), 0644) + log, err := NewProcLogger(ProcLoggerOptions{ + FileOptions: &RotatingFileOptions{ + Dir: filepath.Join("testdata", "logger"), + Filename: "test", + }, + ConsolePrefix: "test", + }) + require.NoError(t, err) + log.Print("hello", "world") + log.Printf("hello, %s", "world") + log.Error("error", "world") + log.Errorf("error, %s", "world") +} diff --git a/agent/pkg/process-manager/mlog/output.go b/agent/pkg/process-manager/mlog/output.go new file mode 100644 index 0000000..5840854 --- /dev/null +++ b/agent/pkg/process-manager/mlog/output.go @@ -0,0 +1,154 @@ +package mlog + +import ( + "bufio" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/process-manager/merrs" + "io" + "sync" +) + +// Output interface for single stream log output +type Output interface { + // WriteCloser is for single line writing + io.WriteCloser + + // ReaderFrom is for streaming + io.ReaderFrom +} + +// ProcOutput interface for process +type ProcOutput interface { + // Out stdout + Out() Output + // Err stderr + Err() Output +} + +type writerOutput struct { + pfx []byte + sfx []byte + w io.Writer +} + +func (w *writerOutput) Write(p []byte) (n int, err error) { + if len(w.pfx) == 0 && len(w.sfx) == 0 { + n, err = w.w.Write(p) + return + } + if n, err = w.w.Write( + append( + append(w.pfx, p...), + w.sfx..., + ), + ); err != nil { + return + } + + n = len(p) + + return +} + +func (w *writerOutput) Close() error { + if c, ok := w.w.(io.Closer); ok { + return c.Close() + } + return nil +} + +func (w *writerOutput) ReadFrom(r io.Reader) (n int64, err error) { + br := bufio.NewReader(r) + for { + var line []byte + if line, err = br.ReadBytes('\n'); err == nil { + _, _ = w.Write(line) + n += int64(len(line)) + } else { + if err == io.EOF { + err = nil + } + if len(line) != 0 { + _, _ = w.Write(append(line, '\n')) + n += int64(len(line)) + } + break + } + } + return +} + +// NewWriterOutput wrap a writer as a Output, with optional line Prefix and Suffix +func NewWriterOutput(w io.Writer, pfx, sfx []byte) Output { + return &writerOutput{w: w, pfx: pfx, sfx: sfx} +} + +type multiOutput struct { + outputs []Output +} + +// MultiOutput create a new Output for proc logging +func MultiOutput(outputs ...Output) Output { + return &multiOutput{outputs: outputs} +} + +func (pc *multiOutput) Close() error { + eg := merrs.NewErrorGroup() + for _, output := range pc.outputs { + eg.Add(output.Close()) + } + return eg.Unwrap() +} + +// Write this method is used to write a single line of log +func (pc *multiOutput) Write(buf []byte) (n int, err error) { + for _, output := range pc.outputs { + if n, err = output.Write(buf); err != nil { + return + } + } + n = len(buf) + return +} + +// ReadFrom implements ReaderFrom +func (pc *multiOutput) ReadFrom(r io.Reader) (n int64, err error) { + eg := merrs.NewErrorGroup() + wg := &sync.WaitGroup{} + + var ( + cs []io.Closer + ws []io.Writer + ) + + for _, _out := range pc.outputs { + out := _out + + childR, childW := io.Pipe() + cs, ws = append(cs, childW), append(ws, childW) + + wg.Add(1) + go func() { + defer wg.Done() + _, err := out.ReadFrom(childR) + if err == io.EOF { + err = nil + } + eg.Add(err) + }() + } + + _, err = io.Copy(io.MultiWriter(ws...), r) + if err == io.EOF { + err = nil + } + for _, c := range cs { + _ = c.Close() + } + + wg.Wait() + + if err == nil { + err = eg.Unwrap() + } + return +} diff --git a/agent/pkg/process-manager/mlog/output_test.go b/agent/pkg/process-manager/mlog/output_test.go new file mode 100644 index 0000000..d34cb75 --- /dev/null +++ b/agent/pkg/process-manager/mlog/output_test.go @@ -0,0 +1,39 @@ +package mlog + +import ( + "bytes" + "github.com/stretchr/testify/require" + "testing" +) + +func TestNewWriterOutput(t *testing.T) { + buf := &bytes.Buffer{} + + o := NewWriterOutput(buf, []byte("a"), []byte("b")) + + _, err := o.Write([]byte("hello\n")) + require.NoError(t, err) + + _, err = o.ReadFrom(bytes.NewReader([]byte("hello\nworld"))) + require.NoError(t, err) + + require.Equal(t, "ahello\nbahello\nbaworld\nb", buf.String()) +} + +func TestMultiOutput(t *testing.T) { + buf1 := &bytes.Buffer{} + o1 := NewWriterOutput(buf1, []byte("a"), []byte("b")) + buf2 := &bytes.Buffer{} + o2 := NewWriterOutput(buf2, []byte("c"), []byte("d")) + + o := MultiOutput(o1, o2) + + _, err := o.Write([]byte("hello\n")) + require.NoError(t, err) + + _, err = o.ReadFrom(bytes.NewReader([]byte("hello\nworld"))) + require.NoError(t, err) + + require.Equal(t, "ahello\nbahello\nbaworld\nb", buf1.String()) + require.Equal(t, "chello\ndchello\ndcworld\nd", buf2.String()) +} diff --git a/agent/pkg/process-manager/mlog/rotating.go b/agent/pkg/process-manager/mlog/rotating.go new file mode 100644 index 0000000..580296f --- /dev/null +++ b/agent/pkg/process-manager/mlog/rotating.go @@ -0,0 +1,165 @@ +package mlog + +import ( + "fmt" + "io" + "os" + "path/filepath" + "strconv" + "strings" + "sync" + "sync/atomic" +) + +type rotatingFile struct { + opts RotatingFileOptions + + fd *os.File + size int64 + lock sync.Locker +} + +// RotatingFileOptions options for creating a RotatingFile +type RotatingFileOptions struct { + // Dir directory + Dir string + // Filename filename prefix + Filename string + // MaxFileSize max size of a single file, default to 128mb + MaxFileSize int64 + // MaxFileCount max count of rotated files + MaxFileCount int64 +} + +// NewRotatingFile create a new io.WriteCloser as a rotating log file +func NewRotatingFile(opts RotatingFileOptions) (w io.WriteCloser, err error) { + if opts.MaxFileSize == 0 { + opts.MaxFileSize = 128 * 1000 * 1000 + } + rf := &rotatingFile{opts: opts, lock: &sync.Mutex{}} + if err = rf.open(); err != nil { + return + } + w = rf + return +} + +func (rf *rotatingFile) currentPath() string { + return filepath.Join(rf.opts.Dir, rf.opts.Filename+".log") +} + +func (rf *rotatingFile) rotatedPath(id int64) string { + return filepath.Join(rf.opts.Dir, fmt.Sprintf("%s.%d.log", rf.opts.Filename, id)) +} + +func (rf *rotatingFile) nextRotatedID() (id int64, err error) { + var entries []os.DirEntry + if entries, err = os.ReadDir(rf.opts.Dir); err != nil { + return + } + + for _, entry := range entries { + name := entry.Name() + if strings.HasPrefix(name, rf.opts.Filename+".") && + strings.HasSuffix(name, ".log") { + eIDStr := strings.TrimSuffix(strings.TrimPrefix(name, rf.opts.Filename+"."), ".log") + eID, _ := strconv.ParseInt(eIDStr, 10, 64) + if eID > id { + id = eID + } + } + } + + id += 1 + + // if id exceeded MaxFileCount, back to 1 + if rf.opts.MaxFileCount > 0 && id > rf.opts.MaxFileCount { + id = 1 + } + return +} + +func (rf *rotatingFile) open() (err error) { + var fd *os.File + if fd, err = os.OpenFile( + rf.currentPath(), + os.O_WRONLY|os.O_CREATE|os.O_APPEND, + 0644, + ); err != nil { + return + } + + var info os.FileInfo + if info, err = fd.Stat(); err != nil { + _ = fd.Close() + return + } + + existed := rf.fd + + rf.fd = fd + rf.size = info.Size() + + if existed != nil { + _ = existed.Close() + } + + return +} + +func (rf *rotatingFile) reallocate() (err error) { + rf.lock.Lock() + defer rf.lock.Unlock() + + // recheck, in case of race condition + if atomic.LoadInt64(&rf.size) <= rf.opts.MaxFileSize { + return + } + + // find next rotated id + var id int64 + if id, err = rf.nextRotatedID(); err != nil { + return + } + + // try remove existed, in case id looped due to maxCount + _ = os.Remove(rf.rotatedPath(id)) + + // remove current file to rotated path + if err = os.Rename(rf.currentPath(), rf.rotatedPath(id)); err != nil { + return + } + + // open current file, this will close existing file + if err = rf.open(); err != nil { + return + } + + return nil +} + +func (rf *rotatingFile) Write(p []byte) (n int, err error) { + if n, err = rf.fd.Write(p); err != nil { + return + } + + // reallocate if exceeded + if atomic.AddInt64(&rf.size, int64(n)) > rf.opts.MaxFileSize { + if err = rf.reallocate(); err != nil { + return + } + } + + return +} + +func (rf *rotatingFile) Close() (err error) { + rf.lock.Lock() + defer rf.lock.Unlock() + + if rf.fd != nil { + err = rf.fd.Close() + rf.fd = nil + } + return +} diff --git a/agent/pkg/process-manager/mlog/rotating_test.go b/agent/pkg/process-manager/mlog/rotating_test.go new file mode 100644 index 0000000..4200265 --- /dev/null +++ b/agent/pkg/process-manager/mlog/rotating_test.go @@ -0,0 +1,43 @@ +package mlog + +import ( + "github.com/stretchr/testify/require" + "os" + "path/filepath" + "testing" +) + +func TestNewRotatingFile(t *testing.T) { + _ = os.RemoveAll(filepath.Join("testdata", "logs")) + _ = os.MkdirAll(filepath.Join("testdata", "logs"), 0755) + _ = os.WriteFile(filepath.Join("testdata", "logs", ".gitignore"), []byte("*.log"), 0644) + f, err := NewRotatingFile(RotatingFileOptions{ + Dir: filepath.Join("testdata", "logs"), + Filename: "test", + MaxFileSize: 10, + }) + require.NoError(t, err) + _, err = f.Write([]byte("hello, world, hello, world, hello, world")) + require.NoError(t, err) + _, err = f.Write([]byte("hello, world, hello, world, hello, world")) + require.NoError(t, err) + _, err = f.Write([]byte("hello, world, hello, world, hello, world")) + require.NoError(t, err) + err = f.Close() + require.NoError(t, err) + f, err = NewRotatingFile(RotatingFileOptions{ + Dir: filepath.Join("testdata", "logs"), + Filename: "test-maxcount", + MaxFileSize: 10, + MaxFileCount: 2, + }) + require.NoError(t, err) + _, err = f.Write([]byte("hello, world, hello, world, hello, world")) + require.NoError(t, err) + _, err = f.Write([]byte("hello, world, hello, world, hello, world")) + require.NoError(t, err) + _, err = f.Write([]byte("hello, world, hello, world, hello, world")) + require.NoError(t, err) + err = f.Close() + require.NoError(t, err) +} diff --git a/agent/pkg/process-manager/mlog/testdata/logger/.gitignore b/agent/pkg/process-manager/mlog/testdata/logger/.gitignore new file mode 100644 index 0000000..bf0824e --- /dev/null +++ b/agent/pkg/process-manager/mlog/testdata/logger/.gitignore @@ -0,0 +1 @@ +*.log \ No newline at end of file diff --git a/agent/pkg/process-manager/mlog/testdata/logs/.gitignore b/agent/pkg/process-manager/mlog/testdata/logs/.gitignore new file mode 100644 index 0000000..bf0824e --- /dev/null +++ b/agent/pkg/process-manager/mlog/testdata/logs/.gitignore @@ -0,0 +1 @@ +*.log \ No newline at end of file diff --git a/agent/pkg/process-manager/mlog/writer.go b/agent/pkg/process-manager/mlog/writer.go new file mode 100644 index 0000000..4db7e6f --- /dev/null +++ b/agent/pkg/process-manager/mlog/writer.go @@ -0,0 +1,80 @@ +package mlog + +import ( + "bytes" + "io" + "log" + "sync" +) + +type loggerWriter struct { + logger *log.Logger + buf *bytes.Buffer + pfx string + + lock sync.Locker +} + +// NewLoggerWriter create a new io.WriteCloser that append each line to log.procLogger +func NewLoggerWriter(logger *log.Logger, prefix string) io.WriteCloser { + return &loggerWriter{ + logger: logger, + buf: &bytes.Buffer{}, + pfx: prefix, + + lock: &sync.Mutex{}, + } +} + +func (w *loggerWriter) finish(force bool) (err error) { + var line string + + for { + // read till new line + if line, err = w.buf.ReadString('\n'); err == nil { + // output + if err = w.logger.Output(3, w.pfx+line); err != nil { + return + } + } else { + if force { + // if forced, output to logger + if err = w.logger.Output(3, w.pfx+line); err != nil { + return + } + } else { + // write back + if _, err = w.buf.WriteString(line); err != nil { + return + } + } + break + } + } + + return +} + +func (w *loggerWriter) Close() (err error) { + w.lock.Lock() + defer w.lock.Unlock() + + if err = w.finish(true); err != nil { + return + } + return +} + +func (w *loggerWriter) Write(p []byte) (n int, err error) { + w.lock.Lock() + defer w.lock.Unlock() + + if n, err = w.buf.Write(p); err != nil { + return + } + + if err = w.finish(false); err != nil { + return + } + return +} diff --git a/agent/pkg/process-manager/mlog/writer_test.go b/agent/pkg/process-manager/mlog/writer_test.go new file mode 100644 index 0000000..42292f8 --- /dev/null +++ b/agent/pkg/process-manager/mlog/writer_test.go @@ -0,0 +1,19 @@ +package mlog + +import ( + "bytes" + "github.com/stretchr/testify/require" + "log" + "testing" +) + +func TestLoggerWriter(t *testing.T) { + out := &bytes.Buffer{} + l := log.New(out, "aaa", log.Lshortfile) + w := NewLoggerWriter(l, "bbb ") + _, err := w.Write([]byte("hello,world\nbbb")) + require.NoError(t, err) + err = w.Close() + require.NoError(t, err) + require.Equal(t, "aaawriter_test.go:14: bbb hello,world\naaawriter_test.go:16: bbb bbb\n", out.String()) +} diff --git a/agent/pkg/process-manager/mrunners/runner.go b/agent/pkg/process-manager/mrunners/runner.go new file mode 100644 index 0000000..9fb11d3 --- /dev/null +++ b/agent/pkg/process-manager/mrunners/runner.go @@ -0,0 +1,59 @@ +package mrunners + +import ( + "context" + "errors" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/process-manager/mexec" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/process-manager/mlog" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/process-manager/munit" + "sync" +) + +type RunnerAction interface { + Do(ctx context.Context) +} + +type Runner struct { + Order int + Long bool + Action RunnerAction +} + +var ( + factories = map[string]RunnerFactory{} + factoriesLock sync.Locker = &sync.Mutex{} +) + +type RunnerOptions struct { + Unit munit.Unit + Exec mexec.Manager + Logger mlog.ProcLogger +} + +func (ro RunnerOptions) Print(message string) { + ro.Logger.Print("minit: " + ro.Unit.Kind + "/" + ro.Unit.Name + ": " + message) +} + +func (ro RunnerOptions) Error(message string) { + ro.Logger.Error("minit: " + ro.Unit.Kind + "/" + ro.Unit.Name + ": " + message) +} + +type RunnerFactory = func(opts RunnerOptions) (Runner, error) + +func Register(name string, factory RunnerFactory) { + factoriesLock.Lock() + defer factoriesLock.Unlock() + + factories[name] = factory +} + +func Create(opts RunnerOptions) (Runner, error) { + factoriesLock.Lock() + defer factoriesLock.Unlock() + + if fac, ok := factories[opts.Unit.Kind]; ok { + return fac(opts) + } else { + return Runner{}, errors.New("unknown runner kind: " + opts.Unit.Kind) + } +} diff --git a/agent/pkg/process-manager/mrunners/runner_cron.go b/agent/pkg/process-manager/mrunners/runner_cron.go new file mode 100644 index 0000000..8cfc278 --- /dev/null +++ b/agent/pkg/process-manager/mrunners/runner_cron.go @@ -0,0 +1,58 @@ +package mrunners + +import ( + "context" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/process-manager/munit" + "github.com/robfig/cron/v3" +) + +func init() { + Register(munit.KindCron, func(opts RunnerOptions) (runner Runner, err error) { + if err = opts.Unit.RequireCommand(); err != nil { + return + } + if err = opts.Unit.RequireCron(); err != nil { + return + } + if _, err = cron.ParseStandard(opts.Unit.Cron); err != nil { + return + } + + runner.Order = 30 + runner.Long = true + runner.Action = &runnerCron{RunnerOptions: opts} + return + }) +} + +type runnerCron struct { + RunnerOptions +} + +func (r *runnerCron) Do(ctx context.Context) { + r.Print("controller started") + defer r.Print("controller exited") + + if r.Unit.Immediate { + if err := r.Exec.Execute(r.Unit.ExecuteOptions(r.Logger)); err != nil { + r.Error("failed executing: " + err.Error()) + } + } + + cr := cron.New(cron.WithLogger(cron.PrintfLogger(r.Logger))) + _, err := cr.AddFunc(r.Unit.Cron, func() { + r.Print("triggered") + if err := r.Exec.Execute(r.Unit.ExecuteOptions(r.Logger)); err != nil { + r.Error("failed executing: " + err.Error()) + } + }) + + if err != nil { + panic(err) + } + + cr.Start() + + <-ctx.Done() + <-cr.Stop().Done() +} diff --git a/agent/pkg/process-manager/mrunners/runner_daemon.go b/agent/pkg/process-manager/mrunners/runner_daemon.go new file mode 100644 index 0000000..1a67483 --- /dev/null +++ b/agent/pkg/process-manager/mrunners/runner_daemon.go @@ -0,0 +1,57 @@ +package mrunners + +import ( + "context" + "fmt" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/process-manager/munit" + "time" +) + +func init() { + Register(munit.KindDaemon, func(opts RunnerOptions) (runner Runner, err error) { + if err = opts.Unit.RequireCommand(); err != nil { + return + } + + runner.Order = 40 + runner.Long = true + runner.Action = &runnerDaemon{RunnerOptions: opts} + return + }) +} + +type runnerDaemon struct { + RunnerOptions +} + +func (r *runnerDaemon) Do(ctx context.Context) { + r.Print("controller started") + r.Print(fmt.Sprintf("启动%s....", r.Unit.Name)) + defer r.Print("controller exited") + +forLoop: + for { + if ctx.Err() != nil { + break forLoop + } + + var err error + if err = r.Exec.Execute(r.Unit.ExecuteOptions(r.Logger)); err != nil { + r.Error("failed executing:" + err.Error()) + } + + if ctx.Err() != nil { + break forLoop + } + + r.Print("restarting") + + timer := time.NewTimer(time.Second * 5) + select { + case <-timer.C: + case <-ctx.Done(): + break forLoop + } + } + +} diff --git a/agent/pkg/process-manager/mrunners/runner_once.go b/agent/pkg/process-manager/mrunners/runner_once.go new file mode 100644 index 0000000..e03c259 --- /dev/null +++ b/agent/pkg/process-manager/mrunners/runner_once.go @@ -0,0 +1,32 @@ +package mrunners + +import ( + "context" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/process-manager/munit" +) + +func init() { + Register(munit.KindOnce, func(opts RunnerOptions) (runner Runner, err error) { + if err = opts.Unit.RequireCommand(); err != nil { + return + } + + runner.Order = 20 + runner.Action = &runnerOnce{RunnerOptions: opts} + return + }) +} + +type runnerOnce struct { + RunnerOptions +} + +func (r *runnerOnce) Do(ctx context.Context) { + r.Print("controller started") + defer r.Print("controller exited") + + if err := r.Exec.Execute(r.Unit.ExecuteOptions(r.Logger)); err != nil { + r.Error("failed executing: " + err.Error()) + return + } +} diff --git a/agent/pkg/process-manager/mrunners/runner_render.go b/agent/pkg/process-manager/mrunners/runner_render.go new file mode 100644 index 0000000..86b2443 --- /dev/null +++ b/agent/pkg/process-manager/mrunners/runner_render.go @@ -0,0 +1,92 @@ +package mrunners + +import ( + "bytes" + "context" + "fmt" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/process-manager/menv" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/process-manager/mtmpl" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/process-manager/munit" + "os" + "path/filepath" +) + +func init() { + Register(munit.KindRender, func(opts RunnerOptions) (runner Runner, err error) { + if err = opts.Unit.RequireFiles(); err != nil { + return + } + + runner.Order = 10 + runner.Action = &runnerRender{RunnerOptions: opts} + return + }) +} + +type runnerRender struct { + RunnerOptions +} + +func (r *runnerRender) doFile(ctx context.Context, name string, env map[string]string) (err error) { + var buf []byte + if buf, err = os.ReadFile(name); err != nil { + err = fmt.Errorf("failed reading %s: %s", name, err.Error()) + return + } + var content []byte + if content, err = mtmpl.Execute(string(buf), map[string]any{ + "Env": env, + }); err != nil { + err = fmt.Errorf("failed rendering %s: %s", name, err.Error()) + return + } + if !r.Unit.Raw { + content = sanitizeLines(content) + } + if err = os.WriteFile(name, content, 0755); err != nil { + err = fmt.Errorf("failed writing %s: %s", name, err.Error()) + return + } + return +} + +func (r *runnerRender) Do(ctx context.Context) { + r.Print("controller started") + defer r.Print("controller exited") + + env, err := menv.Construct(r.Unit.Env) + + if err != nil { + r.Error("failed constructing environments variables: " + err.Error()) + return + } + + for _, filePattern := range r.Unit.Files { + var names []string + if names, err = filepath.Glob(filePattern); err != nil { + r.Error(fmt.Sprintf("failed globbing: %s: %s", filePattern, err.Error())) + continue + } + for _, name := range names { + if err = r.doFile(ctx, name, env); err == nil { + r.Print("done rendering: " + name) + } else { + r.Error("failed rendering: " + name + ": " + err.Error()) + } + } + } +} + +func sanitizeLines(s []byte) []byte { + lines := bytes.Split(s, []byte("\n")) + out := &bytes.Buffer{} + for _, line := range lines { + line = bytes.TrimSpace(line) + if len(line) == 0 { + continue + } + out.Write(line) + out.WriteRune('\n') + } + return out.Bytes() +} diff --git a/agent/pkg/process-manager/msetups/setup.go b/agent/pkg/process-manager/msetups/setup.go new file mode 100644 index 0000000..f137b88 --- /dev/null +++ b/agent/pkg/process-manager/msetups/setup.go @@ -0,0 +1,43 @@ +package msetups + +import ( + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/process-manager/mlog" + "sort" + "sync" +) + +type SetupFunc = func(log mlog.ProcLogger) error + +type setupItem struct { + order int + fn SetupFunc +} + +var ( + setupsLock sync.Locker = &sync.Mutex{} + setups []setupItem +) + +func Register(order int, fn SetupFunc) { + setupsLock.Lock() + defer setupsLock.Unlock() + + setups = append(setups, setupItem{order: order, fn: fn}) +} + +func Setup(logger mlog.ProcLogger) (err error) { + setupsLock.Lock() + defer setupsLock.Unlock() + + sort.Slice(setups, func(i, j int) bool { + return setups[i].order > setups[j].order + }) + + for _, setup := range setups { + if err = setup.fn(logger); err != nil { + return + } + } + + return +} diff --git a/agent/pkg/process-manager/msetups/setup_banner.go b/agent/pkg/process-manager/msetups/setup_banner.go new file mode 100644 index 0000000..b05e949 --- /dev/null +++ b/agent/pkg/process-manager/msetups/setup_banner.go @@ -0,0 +1,30 @@ +package msetups + +import ( + "bytes" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/process-manager/mlog" + "os" +) + +const ( + BannerFile = "/etc/banner.minit.txt" +) + +func init() { + Register(10, setupBanner) +} + +func setupBanner(logger mlog.ProcLogger) (err error) { + var buf []byte + if buf, err = os.ReadFile(BannerFile); err != nil { + err = nil + return + } + + lines := bytes.Split(buf, []byte{'\n'}) + for _, line := range lines { + logger.Print(string(line)) + } + + return +} diff --git a/agent/pkg/process-manager/msetups/setup_rlimits.go b/agent/pkg/process-manager/msetups/setup_rlimits.go new file mode 100644 index 0000000..9515647 --- /dev/null +++ b/agent/pkg/process-manager/msetups/setup_rlimits.go @@ -0,0 +1,104 @@ +//go:build linux + +package msetups + +import ( + "fmt" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/process-manager/mlog" + "golang.org/x/sys/unix" + "os" + "strconv" + "strings" + "syscall" +) + +const Unlimited = "unlimited" + +var ( + knownRLimitNames = map[string]int{ + "AS": unix.RLIMIT_AS, + "CORE": unix.RLIMIT_CORE, + "CPU": unix.RLIMIT_CPU, + "DATA": unix.RLIMIT_DATA, + "FSIZE": unix.RLIMIT_FSIZE, + "LOCKS": unix.RLIMIT_LOCKS, + "MEMLOCK": unix.RLIMIT_MEMLOCK, + "MSGQUEUE": unix.RLIMIT_MSGQUEUE, + "NICE": unix.RLIMIT_NICE, + "NOFILE": unix.RLIMIT_NOFILE, + "NPROC": unix.RLIMIT_NPROC, + "RTPRIO": unix.RLIMIT_RTPRIO, + "SIGPENDING": unix.RLIMIT_SIGPENDING, + "STACK": unix.RLIMIT_STACK, + } +) + +func decodeRLimitValue(v *uint64, s string) (err error) { + s = strings.TrimSpace(s) + if s == "-" || s == "" { + return + } + if strings.ToLower(s) == Unlimited { + *v = unix.RLIM_INFINITY + } else { + if *v, err = strconv.ParseUint(s, 10, 64); err != nil { + return + } + } + return +} + +func formatRLimitValue(v uint64) string { + if v == unix.RLIM_INFINITY { + return Unlimited + } else { + return strconv.FormatUint(v, 10) + } +} + +func init() { + Register(30, setupRLimits) +} + +func setupRLimits(logger mlog.ProcLogger) (err error) { + for name, res := range knownRLimitNames { + key := "MINIT_RLIMIT_" + name + val := strings.TrimSpace(os.Getenv(key)) + if val == "-" || val == "-:-" || val == "" { + continue + } + var limit syscall.Rlimit + if err = syscall.Getrlimit(res, &limit); err != nil { + err = fmt.Errorf("failed getting rlimit_%s: %s", name, err.Error()) + return + } + logger.Printf("current rlimit_%s=%s:%s", name, formatRLimitValue(limit.Cur), formatRLimitValue(limit.Max)) + if strings.Contains(val, ":") { + splits := strings.Split(val, ":") + if len(splits) != 2 { + err = fmt.Errorf("invalid environment variable %s=%s", key, val) + return + } + if err = decodeRLimitValue(&limit.Cur, splits[0]); err != nil { + err = fmt.Errorf("invalid environment variable %s=%s: %s", key, val, err.Error()) + return + } + if err = decodeRLimitValue(&limit.Max, splits[1]); err != nil { + err = fmt.Errorf("invalid environment variable %s=%s: %s", key, val, err.Error()) + return + } + } else { + if err = decodeRLimitValue(&limit.Cur, val); err != nil { + return + } + limit.Max = limit.Cur + } + logger.Printf("setting rlimit_%s=%s:%s", name, formatRLimitValue(limit.Cur), formatRLimitValue(limit.Max)) + if err = syscall.Setrlimit(res, &limit); err != nil { + err = fmt.Errorf("failed setting rlimit_%s=%s: %s", name, val, err.Error()) + return + } + } + + return +} diff --git a/agent/pkg/process-manager/msetups/setup_sysctl.go b/agent/pkg/process-manager/msetups/setup_sysctl.go new file mode 100644 index 0000000..bae4e0b --- /dev/null +++ b/agent/pkg/process-manager/msetups/setup_sysctl.go @@ -0,0 +1,45 @@ +//go:build linux + +package msetups + +import ( + "fmt" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/process-manager/mlog" + "os" + "path/filepath" + "strings" +) + +func init() { + Register(20, setupSysctl) +} + +func setupSysctl(logger mlog.ProcLogger) (err error) { + items := strings.Split(os.Getenv("MINIT_SYSCTL"), ",") + for _, item := range items { + splits := strings.SplitN(item, "=", 2) + if len(splits) != 2 { + continue + } + + k, v := strings.TrimSpace(splits[0]), strings.TrimSpace(splits[1]) + if k == "" { + continue + } + + filename := filepath.Join( + append( + []string{"/proc", "sys"}, + strings.Split(k, ".")..., + )..., + ) + + logger.Printf("writing sysctl %s=%s", k, v) + + if err = os.WriteFile(filename, []byte(v), 0644); err != nil { + err = fmt.Errorf("failed writing sysctl %s=%s: %s", k, v, err.Error()) + return + } + } + return +} diff --git a/agent/pkg/process-manager/msetups/setup_test.go b/agent/pkg/process-manager/msetups/setup_test.go new file mode 100644 index 0000000..ef6b75d --- /dev/null +++ b/agent/pkg/process-manager/msetups/setup_test.go @@ -0,0 +1,10 @@ +package msetups + +import ( + "github.com/stretchr/testify/require" + "testing" +) + +func TestSetup(t *testing.T) { + require.Equal(t, 10, setups[0].order) +} diff --git a/agent/pkg/process-manager/msetups/setup_thp.go b/agent/pkg/process-manager/msetups/setup_thp.go new file mode 100644 index 0000000..2e5f681 --- /dev/null +++ b/agent/pkg/process-manager/msetups/setup_thp.go @@ -0,0 +1,43 @@ +//go:build linux + +package msetups + +import ( + "bytes" + "fmt" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/process-manager/mlog" + "os" + "strings" +) + +const ( + controlFileTHP = "/sys/kernel/mm/transparent_hugepage/enabled" +) + +func init() { + Register(40, setupTHP) +} + +func setupTHP(logger mlog.ProcLogger) (err error) { + val := strings.TrimSpace(os.Getenv("MINIT_THP")) + if val == "" { + return + } + var buf []byte + if buf, err = os.ReadFile(controlFileTHP); err != nil { + err = fmt.Errorf("failed reading THP configuration %s: %s", controlFileTHP, err.Error()) + return + } + logger.Printf("current THP configuration: %s", bytes.TrimSpace(buf)) + logger.Printf("writing THP configuration: %s", val) + if err = os.WriteFile(controlFileTHP, []byte(val), 644); err != nil { + err = fmt.Errorf("fialed writting THP configuration %s: %s", controlFileTHP, err.Error()) + return + } + if buf, err = os.ReadFile(controlFileTHP); err != nil { + err = fmt.Errorf("failed reading THP configuration %s: %s", controlFileTHP, err.Error()) + return + } + logger.Printf("current THP configuration: %s", bytes.TrimSpace(buf)) + return +} diff --git a/agent/pkg/process-manager/msetups/setup_webdav.go b/agent/pkg/process-manager/msetups/setup_webdav.go new file mode 100644 index 0000000..7ef905d --- /dev/null +++ b/agent/pkg/process-manager/msetups/setup_webdav.go @@ -0,0 +1,66 @@ +package msetups + +import ( + "fmt" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/process-manager/mlog" + "golang.org/x/net/webdav" + "net/http" + "os" + "strings" + "time" +) + +func init() { + Register(50, setupWebDAV) +} + +func setupWebDAV(logger mlog.ProcLogger) (err error) { + envRoot := strings.TrimSpace(os.Getenv("MINIT_WEBDAV_ROOT")) + if envRoot == "" { + return + } + if err = os.MkdirAll(envRoot, 0755); err != nil { + err = fmt.Errorf("failed initializing WebDAV root: %s: %s", envRoot, err.Error()) + return + } + envPort := strings.TrimSpace(os.Getenv("MINIT_WEBDAV_PORT")) + if envPort == "" { + envPort = "7486" + } + logger.Printf("WebDAV started: root=%s, port=%s", envRoot, envPort) + h := &webdav.Handler{ + FileSystem: webdav.Dir(envRoot), + LockSystem: webdav.NewMemLS(), + Logger: func(req *http.Request, err error) { + if err != nil { + logger.Printf("WebDAV: %s %s: %s", req.Method, req.URL.Path, err.Error()) + } else { + logger.Printf("WebDAV: %s %s", req.Method, req.URL.Path) + } + }, + } + envUsername := strings.TrimSpace(os.Getenv("MINIT_WEBDAV_USERNAME")) + envPassword := strings.TrimSpace(os.Getenv("MINIT_WEBDAV_PASSWORD")) + s := http.Server{ + Addr: ":" + envPort, + Handler: http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { + if envUsername != "" && envPassword != "" { + if username, password, ok := req.BasicAuth(); !ok || username != envUsername || password != envPassword { + rw.Header().Add("WWW-Authenticate", `Basic realm=Minit WebDAV`) + rw.WriteHeader(http.StatusUnauthorized) + return + } + } + h.ServeHTTP(rw, req) + }), + } + go func() { + for { + if err := s.ListenAndServe(); err != nil { + logger.Printf("failed running WebDAV: %s", err.Error()) + } + time.Sleep(time.Second * 10) + } + }() + return +} diff --git a/agent/pkg/process-manager/msetups/setup_zombies.go b/agent/pkg/process-manager/msetups/setup_zombies.go new file mode 100644 index 0000000..afc1ce1 --- /dev/null +++ b/agent/pkg/process-manager/msetups/setup_zombies.go @@ -0,0 +1,150 @@ +//go:build linux + +package msetups + +import ( + "bytes" + "fmt" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/config" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/process-manager/mlog" + "io/ioutil" + "os" + "os/signal" + "strconv" + "syscall" + "time" +) + +func init() { + Register(60, setupZombies) +} + +func setupZombies(log mlog.ProcLogger) (err error) { + // 如果自己不是 PID 1,则不负责清理僵尸进程 + //if os.Getpid() != 1 { + // log.Print("minit is not running as PID 1, skipping cleaning up zombies") + // return + //} + + if !config.Config.ZombieCleaner { + log.Print("minit is not running as PID 1, skipping cleaning up zombies") + return + } + + log.Print("开启ZombieCleaner") + go runZombieCleaner(log) + + return +} + +func runZombieCleaner(log mlog.ProcLogger) { + // SIGCHLD 触发 + chSig := make(chan os.Signal, 10) + signal.Notify(chSig, syscall.SIGCHLD) + + // 周期触发 + tk := time.NewTicker(time.Second * 30) + + var chT <-chan time.Time + + for { + select { + case <-chSig: + if chT == nil { + chT = time.After(time.Second * 3) + } + case <-tk.C: + if chT == nil { + chT = time.After(time.Second * 5) + } + case <-chT: + chT = nil + cleanZombieProcesses(log) + } + } +} + +func cleanZombieProcesses(log mlog.ProcLogger) { + var ( + err error + pids []int + ) + if pids, err = findZombieProcesses(); err != nil { + log.Print("failed checking zombies:", err.Error()) + return + } + + for _, pid := range pids { + go waitZombieProcess(log, pid) + } +} + +func findZombieProcesses() (pids []int, err error) { + var f *os.File + if f, err = os.Open("/proc"); err != nil { + return + } + defer f.Close() + var dirnames []string + if dirnames, err = f.Readdirnames(-1); err != nil { + return + } + for _, dirname := range dirnames { + if dirname[0] < '0' || dirname[0] > '9' { + continue + } + var pid int + if pid, err = strconv.Atoi(dirname); err != nil { + return + } + var zombie bool + if zombie, err = checkProcessIsZombie(pid); err != nil { + err = nil + continue + } + if zombie { + pids = append(pids, pid) + } + } + return +} + +func checkProcessIsZombie(pid int) (zombie bool, err error) { + var buf []byte + if buf, err = ioutil.ReadFile(fmt.Sprintf("/proc/%d/stat", pid)); err != nil { + return + } + zombie = checkProcStatIsZombie(buf) + return +} + +func checkProcStatIsZombie(buf []byte) bool { + if len(buf) == 0 { + return false + } + idx := bytes.LastIndexByte(buf, ')') + if idx < 0 { + return false + } + buf = buf[idx+1:] + buf = bytes.TrimSpace(buf) + if len(buf) == 0 { + return false + } + return buf[0] == 'Z' +} + +func waitZombieProcess(log mlog.ProcLogger, pid int) { + var err error + var ws syscall.WaitStatus + for { + _, err = syscall.Wait4(pid, &ws, 0, nil) + for syscall.EINTR == err { + _, err = syscall.Wait4(pid, &ws, 0, nil) + } + if syscall.ECHILD == err { + break + } + } + log.Printf("zombie cleaned %d", pid) +} diff --git a/agent/pkg/process-manager/msetups/setup_zombies_test.go b/agent/pkg/process-manager/msetups/setup_zombies_test.go new file mode 100644 index 0000000..d1649ab --- /dev/null +++ b/agent/pkg/process-manager/msetups/setup_zombies_test.go @@ -0,0 +1,16 @@ +//go:build linux + +package msetups + +import ( + "github.com/stretchr/testify/require" + "testing" +) + +func TestCheckProcStatIsZombie(t *testing.T) { + var res bool + res = checkProcStatIsZombie([]byte("299923 (kworker/2:1-cgroup_pidlist_destroy) R 2 0 0 0 -1 69238880 0 0 0 0 9 153 0 0 20 0 1 0 78232531 0 0 18446744073709551615 0 0 0 0 0 0 0 2147483647 0 0 0 0 17 2 0 0 0 0 0 0 0 0 0 0 0 0 0")) + require.False(t, res) + res = checkProcStatIsZombie([]byte("299923 (kworker/2:1-cgroup_pidlist_destroy) Z 2 0 0 0 -1 69238880 0 0 0 0 9 153 0 0 20 0 1 0 78232531 0 0 18446744073709551615 0 0 0 0 0 0 0 2147483647 0 0 0 0 17 2 0 0 0 0 0 0 0 0 0 0 0 0 0")) + require.True(t, res) +} diff --git a/agent/pkg/process-manager/mtmpl/execute.go b/agent/pkg/process-manager/mtmpl/execute.go new file mode 100644 index 0000000..e5c1797 --- /dev/null +++ b/agent/pkg/process-manager/mtmpl/execute.go @@ -0,0 +1,24 @@ +package mtmpl + +import ( + "bytes" + "text/template" +) + +// Execute render text template with predefined funcs +func Execute(src string, data any) (out []byte, err error) { + var t *template.Template + if t, err = template. + New("__main__"). + Funcs(Funcs). + Option("missingkey=zero"). + Parse(src); err != nil { + return + } + o := &bytes.Buffer{} + if err = t.Execute(o, data); err != nil { + return + } + out = o.Bytes() + return +} diff --git a/agent/pkg/process-manager/mtmpl/execute_test.go b/agent/pkg/process-manager/mtmpl/execute_test.go new file mode 100644 index 0000000..396f821 --- /dev/null +++ b/agent/pkg/process-manager/mtmpl/execute_test.go @@ -0,0 +1,13 @@ +package mtmpl + +import ( + "github.com/stretchr/testify/require" + "strings" + "testing" +) + +func TestExecute(t *testing.T) { + buf, err := Execute(TEST_TMPL, map[string]interface{}{"A": "B"}) + require.NoError(t, err) + require.Equal(t, "2\nB", strings.TrimSpace(string(buf))) +} diff --git a/agent/pkg/process-manager/mtmpl/funcs.go b/agent/pkg/process-manager/mtmpl/funcs.go new file mode 100644 index 0000000..8ea8ae4 --- /dev/null +++ b/agent/pkg/process-manager/mtmpl/funcs.go @@ -0,0 +1,140 @@ +package mtmpl + +import ( + "errors" + "net" + "os" + "os/user" + "strconv" + "strings" +) + +// Funcs provided funcs for render +var Funcs = map[string]interface{}{ + "netResolveIPAddr": net.ResolveIPAddr, + "netResolveIP": netResolveIP, + "osHostname": os.Hostname, + "osUserCacheDir": os.UserCacheDir, + "osUserConfigDir": os.UserConfigDir, + "osUserHomeDir": os.UserHomeDir, + "osGetegid": os.Getegid, + "osGetenv": os.Getenv, + "osGeteuid": os.Geteuid, + "osGetgid": os.Getgid, + "osGetgroups": os.Getgroups, + "osGetpagesize": os.Getpagesize, + "osGetpid": os.Getpid, + "osGetppid": os.Getppid, + "osGetuid": os.Getuid, + "osGetwd": os.Getwd, + "osTempDir": os.TempDir, + "osUserLookupGroup": user.LookupGroup, + "osUserLookupGroupId": user.LookupGroupId, + "osUserCurrent": user.Current, + "osUserLookup": user.Lookup, + "osUserLookupId": user.LookupId, + "stringsContains": strings.Contains, + "stringsFields": strings.Fields, + "stringsIndex": strings.Index, + "stringsLastIndex": strings.LastIndex, + "stringsHasPrefix": strings.HasPrefix, + "stringsHasSuffix": strings.HasSuffix, + "stringsRepeat": strings.Repeat, + "stringsReplaceAll": strings.ReplaceAll, + "stringsSplit": strings.Split, + "stringsSplitN": strings.SplitN, + "stringsToLower": strings.ToLower, + "stringsToUpper": strings.ToUpper, + "stringsTrimPrefix": strings.TrimPrefix, + "stringsTrimSpace": strings.TrimSpace, + "stringsTrimSuffix": strings.TrimSuffix, + "strconvQuote": strconv.Quote, + "strconvUnquote": strconv.Unquote, + "strconvParseBool": strconv.ParseBool, + "strconvParseInt": strconv.ParseInt, + "strconvParseUint": strconv.ParseUint, + "strconvParseFloat": strconv.ParseFloat, + "strconvFormatBool": strconv.FormatBool, + "strconvFormatInt": strconv.FormatInt, + "strconvFormatUint": strconv.FormatUint, + "strconvFormatFloat": strconv.FormatFloat, + "strconvAoti": strconv.Atoi, + "strconvItoa": strconv.Itoa, + + "add": add, + "neg": neg, + "intAdd": add, + "intNeg": neg, + "int64Add": add, + "int64Neg": neg, + "float32Add": add, + "float32Neg": neg, + "float64Add": add, + "float64Neg": neg, + + "osHostnameSequenceID": osHostnameSequenceID, + "k8sStatefulSetID": osHostnameSequenceID, +} + +func netResolveIP(s string) (ip string, err error) { + var addr *net.IPAddr + if addr, err = net.ResolveIPAddr("ip", s); err != nil { + return + } + ip = addr.IP.String() + return +} + +func add(a, b interface{}) interface{} { + switch a.(type) { + case bool: + return a.(bool) || b.(bool) + case int: + return a.(int) + b.(int) + case int64: + return a.(int64) + b.(int64) + case int32: + return a.(int32) + b.(int32) + case float32: + return a.(float32) + b.(float32) + case float64: + return a.(float64) + b.(float64) + case string: + return a.(string) + b.(string) + } + return nil +} + +func neg(a interface{}) interface{} { + switch a.(type) { + case bool: + return !a.(bool) + case int: + return -a.(int) + case int64: + return -a.(int64) + case int32: + return -a.(int32) + case float32: + return -a.(float32) + case float64: + return -a.(float64) + } + return nil +} + +func osHostnameSequenceID() (id int, err error) { + var hostname string + if hostname = os.Getenv("HOSTNAME"); hostname == "" { + if hostname, err = os.Hostname(); err != nil { + return + } + } + splits := strings.Split(hostname, "-") + if len(splits) < 2 { + err = errors.New("invalid stateful-set hostname") + return + } + id, err = strconv.Atoi(splits[len(splits)-1]) + return +} diff --git a/agent/pkg/process-manager/mtmpl/funcs_test.go b/agent/pkg/process-manager/mtmpl/funcs_test.go new file mode 100644 index 0000000..6c107ba --- /dev/null +++ b/agent/pkg/process-manager/mtmpl/funcs_test.go @@ -0,0 +1,26 @@ +package mtmpl + +import ( + "bytes" + "github.com/stretchr/testify/require" + "strings" + "testing" + "text/template" +) + +const TEST_TMPL = ` +{{$a := 3}} +{{$b := 1}} +{{add (neg $b) $a}} +{{.A}} +` + +func TestFuncs(t *testing.T) { + tmpl := template.New("__main__").Funcs(Funcs).Option("missingkey=zero") + tmpl, err := tmpl.Parse(TEST_TMPL) + require.NoError(t, err) + buf := &bytes.Buffer{} + err = tmpl.Execute(buf, map[string]interface{}{"A": "B"}) + require.NoError(t, err) + require.Equal(t, "2\nB", strings.TrimSpace(buf.String())) +} diff --git a/agent/pkg/process-manager/munit/filter.go b/agent/pkg/process-manager/munit/filter.go new file mode 100644 index 0000000..784094d --- /dev/null +++ b/agent/pkg/process-manager/munit/filter.go @@ -0,0 +1,62 @@ +package munit + +import "strings" + +type FilterMap map[string]struct{} + +func (fm FilterMap) Match(unit Unit) bool { + if fm == nil { + return false + } + if _, ok := fm[unit.Name]; ok { + return true + } + if _, ok := fm[PrefixGroup+unit.Group]; ok { + return true + } + if _, ok := fm[PrefixKind+unit.Kind]; ok { + return true + } + return false +} + +func NewFilterMap(s string) (out FilterMap) { + s = strings.TrimSpace(s) + for _, item := range strings.Split(s, ",") { + item = strings.TrimSpace(item) + if item == "" || item == PrefixGroup || item == PrefixKind { + continue + } + if out == nil { + out = FilterMap{} + } + out[item] = struct{}{} + } + return +} + +type Filter struct { + pass FilterMap + deny FilterMap +} + +func NewFilter(pass, deny string) (uf *Filter) { + return &Filter{ + pass: NewFilterMap(pass), + deny: NewFilterMap(deny), + } +} + +func (uf *Filter) Match(unit Unit) bool { + if uf.pass != nil { + if !uf.pass.Match(unit) { + return false + } + } + if uf.deny != nil { + if uf.deny.Match(unit) { + return false + } + } + return true +} diff --git a/agent/pkg/process-manager/munit/filter_test.go b/agent/pkg/process-manager/munit/filter_test.go new file mode 100644 index 0000000..2656ded --- /dev/null +++ b/agent/pkg/process-manager/munit/filter_test.go @@ -0,0 +1,117 @@ +package munit + +import ( + "crypto/rand" + "encoding/hex" + "github.com/stretchr/testify/require" + "testing" +) + +func TestNewFilterMap(t *testing.T) { + fm := NewFilterMap("") + require.Nil(t, fm) + + fm = NewFilterMap(",, ,") + require.Nil(t, fm) + + fm = NewFilterMap("unit-a,&daemon") + require.True(t, fm.Match(Unit{ + Name: "unit-a", + })) + require.True(t, fm.Match(Unit{ + Name: "unit-b", + Kind: "daemon", + })) + + fm = NewFilterMap("unit-a, ,, @group-b, unit-c,,") + require.NotNil(t, fm) + require.True(t, fm.Match(Unit{ + Name: "unit-a", + })) + require.True(t, fm.Match(Unit{ + Name: "unit-b", + Group: "group-b", + })) + require.True(t, fm.Match(Unit{ + Name: "unit-c", + Group: "group-c", + })) + require.False(t, fm.Match(Unit{ + Name: "unit-d", + Group: "group-d", + })) +} + +func TestNewFilter(t *testing.T) { + f := NewFilter(" , , , ", ",, ,") + for i := 0; i < 10; i++ { + buf := make([]byte, 10) + rand.Read(buf) + require.True(t, f.Match(Unit{ + Name: hex.EncodeToString(buf), + Group: hex.EncodeToString(buf), + })) + } + + f = NewFilter("unit-a,&daemon", "") + require.False(t, f.Match(Unit{ + Name: "bla", + Kind: KindCron, + })) + require.True(t, f.Match(Unit{ + Name: "bla", + Kind: KindDaemon, + })) + require.True(t, f.Match(Unit{ + Name: "unit-a", + Kind: KindCron, + })) + + f = NewFilter("", "unit-a,&daemon") + require.True(t, f.Match(Unit{ + Name: "bla", + Kind: KindCron, + })) + require.False(t, f.Match(Unit{ + Name: "bla", + Kind: KindDaemon, + })) + require.False(t, f.Match(Unit{ + Name: "unit-a", + Kind: KindCron, + })) + + f = NewFilter("", "unit-a,,,@group-c,,") + require.True(t, f.Match(Unit{ + Name: "unit-b", + Group: "group-b", + })) + require.False(t, f.Match(Unit{ + Name: "unit-c", + Group: "group-c", + })) + + f = NewFilter("unit-a,,,@group-c,,", "") + require.False(t, f.Match(Unit{ + Name: "unit-b", + Group: "group-b", + })) + require.True(t, f.Match(Unit{ + Name: "unit-c", + Group: "group-c", + })) + + f = NewFilter("unit-a,,,@group-c,,", "unit-c2") + require.False(t, f.Match(Unit{ + Name: "unit-b", + Group: "group-b", + })) + require.True(t, f.Match(Unit{ + Name: "unit-c", + Group: "group-c", + })) + require.False(t, f.Match(Unit{ + Name: "unit-c2", + Group: "group-c", + })) +} diff --git a/agent/pkg/process-manager/munit/load.go b/agent/pkg/process-manager/munit/load.go new file mode 100644 index 0000000..ac81a97 --- /dev/null +++ b/agent/pkg/process-manager/munit/load.go @@ -0,0 +1,160 @@ +package munit + +import ( + "errors" + "fmt" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/process-manager/shellquote" + "gopkg.in/yaml.v3" + "io" + "os" + "path/filepath" + "strconv" + "strings" +) + +func LoadArgs(args []string) (unit Unit, ok bool, err error) { + var opts []string + + // fix a history issue + for len(args) > 0 { + if filepath.Base(args[0]) == "minit" { + args = args[1:] + } else { + break + } + } + + // extract arguments after '--' if existed + for i, item := range args { + if item == "--" { + opts = args[0:i] + args = args[i+1:] + break + } + } + + if len(args) == 0 { + return + } + + unit = Unit{ + Name: "arg-main", + Kind: KindDaemon, + Command: args, + } + + // opts decoding + for _, opt := range opts { + if strings.HasSuffix(opt, "-"+KindOnce) { + unit.Kind = KindOnce + } + } + + ok = true + + return +} + +func LoadEnv() (unit Unit, ok bool, err error) { + cmd := strings.TrimSpace(os.Getenv("MINIT_MAIN")) + if cmd == "" { + return + } + + name := strings.TrimSpace(os.Getenv("MINIT_MAIN_NAME")) + if name == "" { + name = "env-main" + } + + var ( + cron string + immediate bool + ) + + kind := strings.TrimSpace(os.Getenv("MINIT_MAIN_KIND")) + + switch kind { + case KindDaemon, KindOnce: + case KindCron: + cron = strings.TrimSpace(os.Getenv("MINIT_MAIN_CRON")) + + if cron == "" { + err = errors.New("missing environment variable $MINIT_MAIN_CRON while $MINIT_MAIN_KIND is 'cron'") + return + } + + immediate, _ = strconv.ParseBool(os.Getenv("MINIT_MAIN_IMMEDIATE")) + case "": + if once, _ := strconv.ParseBool(strings.TrimSpace(os.Getenv("MINIT_MAIN_ONCE"))); once { + kind = KindOnce + } else { + kind = KindDaemon + } + default: + err = errors.New("unsupported $MINIT_MAIN_KIND: " + kind) + return + } + + var cmds []string + if cmds, err = shellquote.Split(cmd); err != nil { + return + } + + unit = Unit{ + Name: name, + Group: strings.TrimSpace(os.Getenv("MINIT_MAIN_GROUP")), + Kind: kind, + Cron: cron, + Immediate: immediate, + Command: cmds, + Dir: strings.TrimSpace(os.Getenv("MINIT_MAIN_DIR")), + Charset: strings.TrimSpace(os.Getenv("MINIT_MAIN_CHARSET")), + } + + ok = true + return +} + +func LoadFile(filename string) (units []Unit, err error) { + var f *os.File + if f, err = os.Open(filename); err != nil { + return + } + defer f.Close() + + dec := yaml.NewDecoder(f) + for { + var unit Unit + if err = dec.Decode(&unit); err != nil { + if err == io.EOF { + err = nil + } else { + err = fmt.Errorf("failed to decode unit file %s: %s", filename, err.Error()) + } + return + } + + if unit.Kind == "" { + continue + } + + units = append(units, unit) + } +} + +func LoadDir(dir string) (units []Unit, err error) { + for _, ext := range []string{"*.yml", "*.yaml"} { + var files []string + if files, err = filepath.Glob(filepath.Join(dir, ext)); err != nil { + return + } + for _, file := range files { + var _units []Unit + if _units, err = LoadFile(file); err != nil { + return + } + units = append(units, _units...) + } + } + return +} diff --git a/agent/pkg/process-manager/munit/load_test.go b/agent/pkg/process-manager/munit/load_test.go new file mode 100644 index 0000000..bdddd2c --- /dev/null +++ b/agent/pkg/process-manager/munit/load_test.go @@ -0,0 +1,105 @@ +package munit + +import ( + "github.com/stretchr/testify/require" + "os" + "testing" +) + +func TestLoadArgs(t *testing.T) { + unit, ok, err := LoadArgs([]string{ + "hello", + "world", + }) + require.NoError(t, err) + require.True(t, ok) + require.Equal(t, []string{"hello", "world"}, unit.Command) + + unit, ok, err = LoadArgs([]string{ + "minit", + "/usr/bin/minit", + "hello", + "world", + }) + require.NoError(t, err) + require.True(t, ok) + require.Equal(t, []string{"hello", "world"}, unit.Command) + + unit, ok, err = LoadArgs([]string{ + "minit", + "--a", + "--b", + "--", + "hello", + "world", + }) + require.NoError(t, err) + require.True(t, ok) + require.Equal(t, []string{"hello", "world"}, unit.Command) + + unit, ok, err = LoadArgs([]string{ + "minit", + "--a", + "--b", + "--", + }) + require.NoError(t, err) + require.False(t, ok) + + unit, ok, err = LoadArgs([]string{ + "--a", + "--b", + "--", + }) + require.NoError(t, err) + require.False(t, ok) + + unit, ok, err = LoadArgs([]string{ + "minit", + "--once", + "--b", + "--", + "sleep", + "30", + }) + require.NoError(t, err) + require.True(t, ok) + require.Equal(t, []string{"sleep", "30"}, unit.Command) + require.Equal(t, KindOnce, unit.Kind) + + unit, ok, err = LoadArgs([]string{ + "--once", + "--b", + "--", + "sleep", + "30", + }) + require.NoError(t, err) + require.True(t, ok) + require.Equal(t, []string{"sleep", "30"}, unit.Command) + require.Equal(t, KindOnce, unit.Kind) +} + +func TestLoadEnv(t *testing.T) { + os.Setenv("MINIT_MAIN", "hello 'world destroyer'") + os.Setenv("MINIT_MAIN_KIND", "cron") + os.Setenv("MINIT_MAIN_NAME", "test-main") + os.Setenv("MINIT_MAIN_CRON", "1 2 3 4 5") + os.Setenv("MINIT_MAIN_GROUP", "bbb") + os.Setenv("MINIT_MAIN_CHARSET", "gbk") + + unit, ok, err := LoadEnv() + require.NoError(t, err) + require.True(t, ok) + require.Equal(t, Unit{ + Kind: "cron", + Name: "test-main", + Cron: "1 2 3 4 5", + Group: "bbb", + Command: []string{ + "hello", + "world destroyer", + }, + Charset: "gbk", + }, unit) +} diff --git a/agent/pkg/process-manager/munit/loader.go b/agent/pkg/process-manager/munit/loader.go new file mode 100644 index 0000000..da9a98b --- /dev/null +++ b/agent/pkg/process-manager/munit/loader.go @@ -0,0 +1,123 @@ +package munit + +import ( + "errors" + "os" + "regexp" + "strconv" + "strings" +) + +var ( + regexpName = regexp.MustCompile(`^[a-zA-Z][a-zA-Z0-9_-]*[a-zA-Z0-9]$`) +) + +const ( + NameMinit = "minit" + + PrefixGroup = "@" + PrefixKind = "&" +) + +type Loader struct { + filter *Filter +} + +func NewLoader() (ld *Loader) { + return &Loader{ + filter: NewFilter( + strings.TrimSpace(os.Getenv("MINIT_ENABLE")), + strings.TrimSpace(os.Getenv("MINIT_DISABLE")), + ), + } +} + +type LoadOptions struct { + Dir string +} + +func (ld *Loader) Load(opts LoadOptions) (output []Unit, skipped []Unit, err error) { + var units []Unit + + // load units + if opts.Dir != "" { + var dUnits []Unit + if dUnits, err = LoadDir(opts.Dir); err != nil { + return + } + units = append(units, dUnits...) + } + + // check duplicated name + names := map[string]struct{}{} + + // reserve 'minit' + + names[NameMinit] = struct{}{} + + // whitelist / blacklist, replicas + for _, unit := range units { + // check unit kind + if _, ok := knownUnitKind[unit.Kind]; !ok { + err = errors.New("invalid unit kind: " + unit.Kind) + return + } + + // check unit name + if !regexpName.MatchString(unit.Name) { + err = errors.New("invalid unit name: " + unit.Name) + return + } + + // check duplicated + if _, found := names[unit.Name]; found { + err = errors.New("duplicated unit name: " + unit.Name) + return + } + names[unit.Name] = struct{}{} + + // fix default group + if unit.Group == "" { + unit.Group = DefaultGroup + } + + // skip if needed + if !ld.filter.Match(unit) { + skipped = append(skipped, unit) + continue + } + + // replicas + if unit.Count > 1 { + for i := 0; i < unit.Count; i++ { + subUnit := unit + subUnit.Name = unit.Name + "-" + strconv.Itoa(i+1) + subUnit.Count = 1 + dupOrMakeMap(&subUnit.Env) + subUnit.Env["MINIT_UNIT_NAME"] = subUnit.Name + subUnit.Env["MINIT_UNIT_SUB_ID"] = strconv.Itoa(i + 1) + + output = append(output, subUnit) + } + } else { + unit.Count = 1 + dupOrMakeMap(&unit.Env) + unit.Env["MINIT_UNIT_NAME"] = unit.Name + unit.Env["MINIT_UNIT_SUB_ID"] = "1" + + output = append(output, unit) + } + } + + return +} + +func dupOrMakeMap[T comparable, U any](m *map[T]U) { + nm := make(map[T]U) + if *m != nil { + for k, v := range *m { + nm[k] = v + } + } + *m = nm +} diff --git a/agent/pkg/process-manager/munit/loader_test.go b/agent/pkg/process-manager/munit/loader_test.go new file mode 100644 index 0000000..51985cc --- /dev/null +++ b/agent/pkg/process-manager/munit/loader_test.go @@ -0,0 +1,36 @@ +package munit + +import ( + "github.com/stretchr/testify/require" + "os" + "testing" +) + +func TestNewLoader(t *testing.T) { + os.Setenv("MINIT_ENABLE", "@default") + os.Setenv("MINIT_DISABLE", "task-3,task-5") + ld := NewLoader() + units, skipped, err := ld.Load(LoadOptions{ + Dir: "testdata", + }) + + require.NoError(t, err) + require.Len(t, units, 1) + require.Len(t, skipped, 4) + require.Equal(t, "task-4", units[0].Name) +} + +func TestDupOrMakeMap(t *testing.T) { + var o map[string]any + dupOrMakeMap(&o) + require.NotNil(t, o) + + m1a := map[string]string{ + "a": "b", + } + m1b := m1a + dupOrMakeMap(&m1a) + m1a["c"] = "d" + require.Equal(t, "d", m1a["c"]) + require.Equal(t, "", m1b["c"]) +} diff --git a/agent/pkg/process-manager/munit/testdata/test1.yml b/agent/pkg/process-manager/munit/testdata/test1.yml new file mode 100644 index 0000000..abb11cf --- /dev/null +++ b/agent/pkg/process-manager/munit/testdata/test1.yml @@ -0,0 +1,16 @@ +name: task-1 +kind: once +group: group-echo +command: + - echo + - once + - $HOME +--- +name: task-2 +kind: daemon +group: group-echo +count: 3 +shell: /bin/bash +command: + - sleep 1 && echo hello world +--- \ No newline at end of file diff --git a/agent/pkg/process-manager/munit/testdata/test2.yml b/agent/pkg/process-manager/munit/testdata/test2.yml new file mode 100644 index 0000000..5ee53a8 --- /dev/null +++ b/agent/pkg/process-manager/munit/testdata/test2.yml @@ -0,0 +1,18 @@ +name: task-3 +kind: daemon +count: 3 +command: + - sleep + - 5 +--- +name: task-4 +kind: cron +cron: "@every 10s" +command: + - echo + - cron +--- +name: task-5 +kind: render +files: + - testdata/conf/*.conf diff --git a/agent/pkg/process-manager/munit/unit.go b/agent/pkg/process-manager/munit/unit.go new file mode 100644 index 0000000..f155a85 --- /dev/null +++ b/agent/pkg/process-manager/munit/unit.go @@ -0,0 +1,85 @@ +package munit + +import ( + "errors" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/process-manager/mexec" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/process-manager/mlog" +) + +const ( + DefaultGroup = "default" +) + +const ( + KindDaemon = "daemon" + KindOnce = "once" + KindCron = "cron" + KindRender = "render" +) + +var ( + knownUnitKind = map[string]struct{}{ + KindDaemon: {}, + KindOnce: {}, + KindCron: {}, + KindRender: {}, + } +) + +type Unit struct { + Kind string `yaml:"kind"` // kind of unit + Name string `yaml:"name"` // name of unit + Group string `yaml:"group"` // group of unit + Count int `yaml:"count"` // replicas of unit + + // execution options + Dir string `yaml:"dir"` + Shell string `yaml:"shell"` + Env map[string]string `yaml:"env"` + Command []string `yaml:"command"` + Charset string `yaml:"charset"` + + // for 'render' only + Raw bool `yaml:"raw"` // don't trim white spaces for 'render' + Files []string `yaml:"files"` // files to process + + // for 'cron' only + Cron string `yaml:"cron"` // cron syntax + Immediate bool `yaml:"immediate"` +} + +func (u Unit) RequireCommand() error { + if len(u.Command) == 0 { + return errors.New("missing unit field: command") + } + return nil +} + +func (u Unit) RequireFiles() error { + if len(u.Files) == 0 { + return errors.New("missing unit field: command") + } + return nil +} + +func (u Unit) RequireCron() error { + if len(u.Cron) == 0 { + return errors.New("missing unit field: cron") + } + return nil +} + +func (u Unit) ExecuteOptions(logger mlog.ProcLogger) mexec.ExecuteOptions { + return mexec.ExecuteOptions{ + Name: u.Kind + "/" + u.Name, + + Dir: u.Dir, + Shell: u.Shell, + Env: u.Env, + Command: u.Command, + Charset: u.Charset, + + Logger: logger, + IgnoreExecError: true, + } +} diff --git a/agent/pkg/process-manager/shellquote/both_test.go b/agent/pkg/process-manager/shellquote/both_test.go new file mode 100644 index 0000000..9cba3c8 --- /dev/null +++ b/agent/pkg/process-manager/shellquote/both_test.go @@ -0,0 +1,29 @@ +package shellquote + +import ( + "reflect" + "testing" + "testing/quick" +) + +// this is called bothtest because it tests Split and Join together + +func TestJoinSplit(t *testing.T) { + f := func(strs []string) bool { + // Join, then split, the input + combined := Join(strs...) + split, err := Split(combined) + if err != nil { + t.Logf("Error splitting %#v: %v", combined, err) + return false + } + if !reflect.DeepEqual(strs, split) { + t.Logf("Input %q did not match output %q", strs, split) + return false + } + return true + } + if err := quick.Check(f, nil); err != nil { + t.Error(err) + } +} diff --git a/agent/pkg/process-manager/shellquote/doc.go b/agent/pkg/process-manager/shellquote/doc.go new file mode 100644 index 0000000..459c874 --- /dev/null +++ b/agent/pkg/process-manager/shellquote/doc.go @@ -0,0 +1,3 @@ +// Package shellquote provides utilities for joining/splitting strings using sh's +// word-splitting rules. +package shellquote diff --git a/agent/pkg/process-manager/shellquote/quote.go b/agent/pkg/process-manager/shellquote/quote.go new file mode 100644 index 0000000..84eb0a3 --- /dev/null +++ b/agent/pkg/process-manager/shellquote/quote.go @@ -0,0 +1,99 @@ +package shellquote + +import ( + "bytes" + "strings" + "unicode/utf8" +) + +// Join quotes each argument and joins them with a space. +// If passed to /bin/sh, the resulting string will be split back into the +// original arguments. +func Join(args ...string) string { + var buf bytes.Buffer + for i, arg := range args { + if i != 0 { + buf.WriteByte(' ') + } + quote(arg, &buf) + } + return buf.String() +} + +const ( + specialChars = "\\'\"`${[|&;<>()*?!~" + extraSpecialChars = " \t\n" +) + +func quote(word string, buf *bytes.Buffer) { + // We want to try to produce a "nice" output. As such, we will + // backslash-escape most characters, but if we encounter a space, or if we + // encounter an extra-special char (which doesn't work with + // backslash-escaping) we switch over to quoting the whole word. We do this + // with a space because it's typically easier for people to read multi-word + // arguments when quoted with a space rather than with ugly backslashes + // everywhere. + origLen := buf.Len() + + if len(word) == 0 { + // oops, no content + buf.WriteString("''") + return + } + + cur, prev := word, word + for len(cur) > 0 { + c, l := utf8.DecodeRuneInString(cur) + cur = cur[l:] + if strings.ContainsRune(specialChars, c) { + // copy the non-special chars up to this point + if len(cur) < len(prev) { + buf.WriteString(prev[0 : len(prev)-len(cur)-l]) + } + buf.WriteByte('\\') + buf.WriteRune(c) + prev = cur + } else if strings.ContainsRune(extraSpecialChars, c) { + // start over in quote mode + buf.Truncate(origLen) + goto quote + } + } + if len(prev) > 0 { + buf.WriteString(prev) + } + return + +quote: + // quote mode + // Use single-quotes, but if we find a single-quote in the word, we need + // to terminate the string, emit an escaped quote, and start the string up + // again + inQuote := false + for len(word) > 0 { + i := strings.IndexRune(word, '\'') + if i == -1 { + break + } + if i > 0 { + if !inQuote { + buf.WriteByte('\'') + inQuote = true + } + buf.WriteString(word[0:i]) + } + word = word[i+1:] + if inQuote { + buf.WriteByte('\'') + inQuote = false + } + buf.WriteString("\\'") + } + if len(word) > 0 { + if !inQuote { + buf.WriteByte('\'') + } + buf.WriteString(word) + buf.WriteByte('\'') + } +} diff --git a/agent/pkg/process-manager/shellquote/quote_test.go b/agent/pkg/process-manager/shellquote/quote_test.go new file mode 100644 index 0000000..bb29fd8 --- /dev/null +++ b/agent/pkg/process-manager/shellquote/quote_test.go @@ -0,0 +1,31 @@ +package shellquote + +import ( + "testing" +) + +func TestSimpleJoin(t *testing.T) { + for _, elem := range simpleJoinTest { + output := Join(elem.input...) + if output != elem.output { + t.Errorf("Input %q, got %q, expected %q", elem.input, output, elem.output) + } + } +} + +var simpleJoinTest = []struct { + input []string + output string +}{ + {[]string{"test"}, "test"}, + {[]string{"hello goodbye"}, "'hello goodbye'"}, + {[]string{"hello", "goodbye"}, "hello goodbye"}, + {[]string{"don't you know the dewey decimal system?"}, "'don'\\''t you know the dewey decimal system?'"}, + {[]string{"don't", "you", "know", "the", "dewey", "decimal", "system?"}, "don\\'t you know the dewey decimal system\\?"}, + {[]string{"~user", "u~ser", " ~user", "!~user"}, "\\~user u\\~ser ' ~user' \\!\\~user"}, + {[]string{"foo*", "M{ovies,usic}", "ab[cd]", "%3"}, "foo\\* M\\{ovies,usic} ab\\[cd] %3"}, + {[]string{"one", "", "three"}, "one '' three"}, + {[]string{"some(parentheses)"}, "some\\(parentheses\\)"}, + {[]string{"$some_ot~her_)spe!cial_*_characters"}, "\\$some_ot\\~her_\\)spe\\!cial_\\*_characters"}, + {[]string{"' "}, "\\'' '"}, +} diff --git a/agent/pkg/process-manager/shellquote/unquote.go b/agent/pkg/process-manager/shellquote/unquote.go new file mode 100644 index 0000000..de0153f --- /dev/null +++ b/agent/pkg/process-manager/shellquote/unquote.go @@ -0,0 +1,156 @@ +package shellquote + +import ( + "bytes" + "errors" + "strings" + "unicode/utf8" +) + +var ( + UnterminatedSingleQuoteError = errors.New("unterminated single-quoted string") + UnterminatedDoubleQuoteError = errors.New("unterminated double-quoted string") + UnterminatedEscapeError = errors.New("unterminated backslash-escape") +) + +var ( + splitChars = " \n\t" + singleChar = '\'' + doubleChar = '"' + escapeChar = '\\' + doubleEscapeChars = "$`\"\n\\" +) + +// Split splits a string according to /bin/sh's word-splitting rules. It +// supports backslash-escapes, single-quotes, and double-quotes. Notably it does +// not support the $” style of quoting. It also doesn't attempt to perform any +// other sort of expansion, including brace expansion, shell expansion, or +// pathname expansion. +// +// If the given input has an unterminated quoted string or ends in a +// backslash-escape, one of UnterminatedSingleQuoteError, +// UnterminatedDoubleQuoteError, or UnterminatedEscapeError is returned. +func Split(input string) (words []string, err error) { + var buf bytes.Buffer + words = make([]string, 0) + + for len(input) > 0 { + // skip any splitChars at the start + c, l := utf8.DecodeRuneInString(input) + if strings.ContainsRune(splitChars, c) { + input = input[l:] + continue + } else if c == escapeChar { + // Look ahead for escaped newline so we can skip over it + next := input[l:] + if len(next) == 0 { + err = UnterminatedEscapeError + return + } + c2, l2 := utf8.DecodeRuneInString(next) + if c2 == '\n' { + input = next[l2:] + continue + } + } + + var word string + word, input, err = splitWord(input, &buf) + if err != nil { + return + } + words = append(words, word) + } + return +} + +func splitWord(input string, buf *bytes.Buffer) (word string, remainder string, err error) { + buf.Reset() + +raw: + { + cur := input + for len(cur) > 0 { + c, l := utf8.DecodeRuneInString(cur) + cur = cur[l:] + if c == singleChar { + buf.WriteString(input[0 : len(input)-len(cur)-l]) + input = cur + goto single + } else if c == doubleChar { + buf.WriteString(input[0 : len(input)-len(cur)-l]) + input = cur + goto double + } else if c == escapeChar { + buf.WriteString(input[0 : len(input)-len(cur)-l]) + input = cur + goto escape + } else if strings.ContainsRune(splitChars, c) { + buf.WriteString(input[0 : len(input)-len(cur)-l]) + return buf.String(), cur, nil + } + } + if len(input) > 0 { + buf.WriteString(input) + input = "" + } + goto done + } + +escape: + { + if len(input) == 0 { + return "", "", UnterminatedEscapeError + } + c, l := utf8.DecodeRuneInString(input) + if c == '\n' { + // a backslash-escaped newline is elided from the output entirely + } else { + buf.WriteString(input[:l]) + } + input = input[l:] + } + goto raw + +single: + { + i := strings.IndexRune(input, singleChar) + if i == -1 { + return "", "", UnterminatedSingleQuoteError + } + buf.WriteString(input[0:i]) + input = input[i+1:] + goto raw + } + +double: + { + cur := input + for len(cur) > 0 { + c, l := utf8.DecodeRuneInString(cur) + cur = cur[l:] + if c == doubleChar { + buf.WriteString(input[0 : len(input)-len(cur)-l]) + input = cur + goto raw + } else if c == escapeChar { + // bash only supports certain escapes in double-quoted strings + c2, l2 := utf8.DecodeRuneInString(cur) + cur = cur[l2:] + if strings.ContainsRune(doubleEscapeChars, c2) { + buf.WriteString(input[0 : len(input)-len(cur)-l-l2]) + if c2 == '\n' { + // newline is special, skip the backslash entirely + } else { + buf.WriteRune(c2) + } + input = cur + } + } + } + return "", "", UnterminatedDoubleQuoteError + } + +done: + return buf.String(), input, nil +} diff --git a/agent/pkg/process-manager/shellquote/unquote_test.go b/agent/pkg/process-manager/shellquote/unquote_test.go new file mode 100644 index 0000000..19c10d9 --- /dev/null +++ b/agent/pkg/process-manager/shellquote/unquote_test.go @@ -0,0 +1,55 @@ +package shellquote + +import ( + "reflect" + "testing" +) + +func TestSimpleSplit(t *testing.T) { + for _, elem := range simpleSplitTest { + output, err := Split(elem.input) + if err != nil { + t.Errorf("Input %q, got error %#v", elem.input, err) + } else if !reflect.DeepEqual(output, elem.output) { + t.Errorf("Input %q, got %q, expected %q", elem.input, output, elem.output) + } + } +} + +func TestErrorSplit(t *testing.T) { + for _, elem := range errorSplitTest { + _, err := Split(elem.input) + if err != elem.error { + t.Errorf("Input %q, got error %#v, expected error %#v", elem.input, err, elem.error) + } + } +} + +var simpleSplitTest = []struct { + input string + output []string +}{ + {"hello", []string{"hello"}}, + {"hello goodbye", []string{"hello", "goodbye"}}, + {"hello goodbye", []string{"hello", "goodbye"}}, + {"glob* test?", []string{"glob*", "test?"}}, + {"don\\'t you know the dewey decimal system\\?", []string{"don't", "you", "know", "the", "dewey", "decimal", "system?"}}, + {"'don'\\''t you know the dewey decimal system?'", []string{"don't you know the dewey decimal system?"}}, + {"one '' two", []string{"one", "", "two"}}, + {"text with\\\na backslash-escaped newline", []string{"text", "witha", "backslash-escaped", "newline"}}, + {"text \"with\na\" quoted newline", []string{"text", "with\na", "quoted", "newline"}}, + {"\"quoted\\d\\\\\\\" text with\\\na backslash-escaped newline\"", []string{"quoted\\d\\\" text witha backslash-escaped newline"}}, + {"text with an escaped \\\n newline in the middle", []string{"text", "with", "an", "escaped", "newline", "in", "the", "middle"}}, + {"foo\"bar\"baz", []string{"foobarbaz"}}, +} + +var errorSplitTest = []struct { + input string + error error +}{ + {"don't worry", UnterminatedSingleQuoteError}, + {"'test'\\''ing", UnterminatedSingleQuoteError}, + {"\"foo'bar", UnterminatedDoubleQuoteError}, + {"foo\\", UnterminatedEscapeError}, + {" \\", UnterminatedEscapeError}, +} diff --git a/agent/pkg/process-manager/start.go b/agent/pkg/process-manager/start.go new file mode 100644 index 0000000..054a6b8 --- /dev/null +++ b/agent/pkg/process-manager/start.go @@ -0,0 +1,205 @@ +package process_manager + +import ( + "context" + "fmt" + _ "net/http/pprof" + "os" + "path/filepath" + "sort" + "strconv" + "strings" + "sync" + "time" + + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/process-manager/mexec" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/process-manager/mlog" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/process-manager/mrunners" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/process-manager/msetups" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/process-manager/munit" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/utils" + "github.com/guoyk93/rg" +) + +var ( + GitHash = "UNKNOWN" + UnitDir = "" +) + +const ( + dirNone = "none" +) + +func mkdirUnlessNone(dir string) error { + if dir == dirNone { + return nil + } + return os.MkdirAll(dir, 0755) +} + +func exit(err *error) { + if *err != nil { + _, _ = fmt.Fprintf(os.Stderr, "%s: exited with error: %s\n", "minit", (*err).Error()) + os.Exit(1) + } else { + _, _ = fmt.Fprintf(os.Stdout, "%s: exited\n", "minit") + } +} + +func envStr(key string, out *string) { + if val := strings.TrimSpace(os.Getenv(key)); val != "" { + *out = val + } +} + +func envBool(key string, out *bool) { + if val := strings.TrimSpace(os.Getenv(key)); val != "" { + *out, _ = strconv.ParseBool(val) + } +} + +func Start(optUnitDir string) { + + if optUnitDir == "" { + runDirectory, _ := utils.GetCurrentPath() + optUnitDir = filepath.Join(runDirectory, "/unit.d") + } + + UnitDir = optUnitDir + + var err error + defer exit(&err) + defer rg.Guard(&err) + + var ( + optLogDir = optUnitDir + "/logs" + optQuickExit bool + ) + + //envStr("MINIT_UNIT_DIR", &optUnitDir) + //envStr("MINIT_LOG_DIR", &optLogDir) + //envBool("MINIT_QUICK_EXIT", &optQuickExit) + + rg.Must0(mkdirUnlessNone(optUnitDir)) + rg.Must0(mkdirUnlessNone(optLogDir)) + + createLogger := func(name string, pfx string) (mlog.ProcLogger, error) { + var rfo *mlog.RotatingFileOptions + if optLogDir != dirNone { + rfo = &mlog.RotatingFileOptions{ + Dir: optLogDir, + Filename: name, + } + } + return mlog.NewProcLogger(mlog.ProcLoggerOptions{ + ConsolePrefix: pfx, + FileOptions: rfo, + }) + } + + log := rg.Must(createLogger("minit", "minit: ")) + + exem := mexec.NewManager() + + log.Print("starting (#" + GitHash + ")") + + // run through setups + rg.Must0(msetups.Setup(log)) + + // load units + loader := munit.NewLoader() + + units, skips := rg.Must2( + loader.Load( + munit.LoadOptions{ + Dir: optUnitDir, + }, + ), + ) + + for _, skip := range skips { + log.Print("unit skipped: " + skip.Name) + } + + // load runners + var ( + runnersS []mrunners.Runner + runnersL []mrunners.Runner + ) + + { + var runners []mrunners.Runner + + // convert units to runners + for _, unit := range units { + runners = append( + runners, + rg.Must(mrunners.Create(mrunners.RunnerOptions{ + Unit: unit, + Exec: exem, + Logger: rg.Must(createLogger(unit.Name, "")), + })), + ) + } + + // sort runners + sort.Slice(runners, func(i, j int) bool { + return runners[i].Order < runners[j].Order + }) + + // split short runners and long runners + for _, runner := range runners { + if runner.Long { + runnersL = append(runnersL, runner) + } else { + runnersS = append(runnersS, runner) + } + } + } + + // execute short runners + for _, runner := range runnersS { + log.Printf("runnersS:%s", runner.Order) + + runner.Action.Do(context.Background()) + } + + // quick exit + if len(runnersL) == 0 && optQuickExit { + log.Printf("no long runners and MINIT_QUICK_EXIT is set") + return + } + + // run long runners + ctx, cancel := context.WithCancel(context.Background()) + wg := &sync.WaitGroup{} + + for _, runner := range runnersL { + log.Printf("runnersL:%s", runner.Order) + + wg.Add(1) + go func(runner mrunners.Runner) { + runner.Action.Do(ctx) + wg.Done() + }(runner) + } + + log.Printf("started") + + // wait for signals + sig := <-utils.StopCh + log.Printf("signal caught: %s,关闭各采集器", sig.String()) + + // shutdown context + cancel() + + // delay 3 seconds + time.Sleep(time.Second * 3) + + // broadcast signals + exem.Signal(sig) + + // wait for long runners + wg.Wait() + log.Printf("各采集器已关闭!") +} diff --git a/agent/pkg/process-manager/testdata/conf/sample.conf b/agent/pkg/process-manager/testdata/conf/sample.conf new file mode 100644 index 0000000..3737650 --- /dev/null +++ b/agent/pkg/process-manager/testdata/conf/sample.conf @@ -0,0 +1 @@ +This is a sample conf {{ uppercase .Env.HOME }} diff --git a/agent/pkg/process-manager/testdata/minit.d/test.yml b/agent/pkg/process-manager/testdata/minit.d/test.yml new file mode 100644 index 0000000..8a73ac2 --- /dev/null +++ b/agent/pkg/process-manager/testdata/minit.d/test.yml @@ -0,0 +1,32 @@ +name: echo +kind: once +command: + - echo + - once + - $HOME +--- +name: shell-test +kind: daemon +count: 3 +shell: /bin/bash +command: + - sleep 1 && echo hello world +--- +name: sleep +kind: daemon +count: 3 +command: + - sleep + - 5 +--- +name: echo-cron +kind: cron +cron: "@every 10s" +command: + - echo + - cron +--- +name: render-test +kind: render +files: + - testdata/conf/*.conf \ No newline at end of file diff --git a/agent/pkg/process-manager/unit.d/log/minit.err.log b/agent/pkg/process-manager/unit.d/log/minit.err.log new file mode 100644 index 0000000..e69de29 diff --git a/agent/pkg/process-manager/unit.d/log/minit.out.log b/agent/pkg/process-manager/unit.d/log/minit.out.log new file mode 100644 index 0000000..eb6ad77 --- /dev/null +++ b/agent/pkg/process-manager/unit.d/log/minit.out.log @@ -0,0 +1,10 @@ +starting (#UNKNOWN) +started +starting (#UNKNOWN) +started +starting (#UNKNOWN) +started +starting (#UNKNOWN) +started +starting (#UNKNOWN) +started diff --git a/agent/pkg/process-manager/unit.d/logs/minit.err.log b/agent/pkg/process-manager/unit.d/logs/minit.err.log new file mode 100644 index 0000000..e69de29 diff --git a/agent/pkg/process-manager/unit.d/logs/minit.out.log b/agent/pkg/process-manager/unit.d/logs/minit.out.log new file mode 100644 index 0000000..7d1a21e --- /dev/null +++ b/agent/pkg/process-manager/unit.d/logs/minit.out.log @@ -0,0 +1,113 @@ +starting (#UNKNOWN) +started +starting (#UNKNOWN) +started +starting (#UNKNOWN) +started +starting (#UNKNOWN) +started +starting (#UNKNOWN) +started +starting (#UNKNOWN) +started +starting (#UNKNOWN) +started +signal caught: interrupt,关闭各采集器 +各采集器已关闭! +starting (#UNKNOWN) +started +signal caught: interrupt,关闭各采集器 +各采集器已关闭! +starting (#UNKNOWN) +started +starting (#UNKNOWN) +started +starting (#UNKNOWN) +started +starting (#UNKNOWN) +started +starting (#UNKNOWN) +started +starting (#UNKNOWN) +started +starting (#UNKNOWN) +started +starting (#UNKNOWN) +started +starting (#UNKNOWN) +started +starting (#UNKNOWN) +started +starting (#UNKNOWN) +started +starting (#UNKNOWN) +started +starting (#UNKNOWN) +started +signal caught: interrupt,关闭各采集器 +各采集器已关闭! +starting (#UNKNOWN) +started +starting (#UNKNOWN) +started +starting (#UNKNOWN) +started +starting (#UNKNOWN) +started +starting (#UNKNOWN) +started +starting (#UNKNOWN) +started +starting (#UNKNOWN) +started +starting (#UNKNOWN) +started +starting (#UNKNOWN) +started +starting (#UNKNOWN) +started +signal caught: interrupt,关闭各采集器 +starting (#UNKNOWN) +started +starting (#UNKNOWN) +started +starting (#UNKNOWN) +started +starting (#UNKNOWN) +started +starting (#UNKNOWN) +started +starting (#UNKNOWN) +started +starting (#UNKNOWN) +started +starting (#UNKNOWN) +started +starting (#UNKNOWN) +started +starting (#UNKNOWN) +started +starting (#UNKNOWN) +started +starting (#UNKNOWN) +started +starting (#UNKNOWN) +started +starting (#UNKNOWN) +started +starting (#UNKNOWN) +started +starting (#UNKNOWN) +started +starting (#UNKNOWN) +started +starting (#UNKNOWN) +started +starting (#UNKNOWN) +started +starting (#UNKNOWN) +started +starting (#UNKNOWN) +started +starting (#UNKNOWN) +started diff --git a/agent/pkg/utils/compression/const.go b/agent/pkg/utils/compression/const.go new file mode 100644 index 0000000..e63b386 --- /dev/null +++ b/agent/pkg/utils/compression/const.go @@ -0,0 +1,6 @@ +package compression + +const ( + ZIP = "zip" + TGZ = "tgz" +) diff --git a/agent/pkg/utils/compression/testdata/agentv1.1.0.tar.gz b/agent/pkg/utils/compression/testdata/agentv1.1.0.tar.gz new file mode 100644 index 0000000..453cca7 Binary files /dev/null and b/agent/pkg/utils/compression/testdata/agentv1.1.0.tar.gz differ diff --git a/agent/pkg/utils/compression/testdata/test.tar.gz b/agent/pkg/utils/compression/testdata/test.tar.gz new file mode 100644 index 0000000..afa4d53 Binary files /dev/null and b/agent/pkg/utils/compression/testdata/test.tar.gz differ diff --git a/agent/pkg/utils/compression/testdata/test.txt b/agent/pkg/utils/compression/testdata/test.txt new file mode 100644 index 0000000..e69de29 diff --git a/agent/pkg/utils/compression/testdata/test.zip b/agent/pkg/utils/compression/testdata/test.zip new file mode 100644 index 0000000..11d0709 Binary files /dev/null and b/agent/pkg/utils/compression/testdata/test.zip differ diff --git a/agent/pkg/utils/compression/tgz.go b/agent/pkg/utils/compression/tgz.go new file mode 100644 index 0000000..15d9ad2 --- /dev/null +++ b/agent/pkg/utils/compression/tgz.go @@ -0,0 +1,154 @@ +package compression + +import ( + "archive/tar" + "compress/gzip" + "io" + "os" + "path" + "path/filepath" + "strings" +) + +type TGZHandler struct { +} + +func NewTGZHandler() *TGZHandler { + return &TGZHandler{} +} + +// UNTarGZ 解压tar.gz文件到指定目录,包含顶层文件夹 +func (z *TGZHandler) UNTarGZ(src, dst string) error { + archive, err := os.Open(src) + if err != nil { + return err + } + defer func(archive *os.File) { + err := archive.Close() + if err != nil { + return + } + }(archive) + + gr, err := gzip.NewReader(archive) + if err != nil { + return err + } + + defer func(gr *gzip.Reader) { + err := gr.Close() + if err != nil { + return + } + }(gr) + + tr := tar.NewReader(gr) + for { + f, err := tr.Next() + if err == io.EOF { + break + } + if err != nil { + return err + } + filePath := path.Join(dst, f.Name) + if err := os.MkdirAll(filepath.Dir(filePath), os.ModePerm); err != nil { + return err + } + + if f.FileInfo().IsDir() { + err := os.MkdirAll(filePath, os.ModePerm) + if err != nil { + return err + } + continue + } else { + fileName := path.Join(dst, f.Name) + fw, err := os.OpenFile(fileName, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, os.FileMode(f.Mode)) + defer func(fw *os.File) { + err := fw.Close() + if err != nil { + return + } + }(fw) + if err != nil { + return err + } + if _, err := io.Copy(fw, tr); err != nil { + return err + } + } + + } + + return nil +} + +// UNTarGZTo 解压tar.gz文件到指定目录,不包含顶层文件夹,只是将文件解压到指定目录 +func (z *TGZHandler) UNTarGZTo(src, dst string) error { + archive, err := os.Open(src) + if err != nil { + return err + } + defer func(archive *os.File) { + err := archive.Close() + if err != nil { + return + } + }(archive) + + gr, err := gzip.NewReader(archive) + if err != nil { + return err + } + + defer func(gr *gzip.Reader) { + err := gr.Close() + if err != nil { + return + } + }(gr) + + tr := tar.NewReader(gr) + for { + f, err := tr.Next() + if err == io.EOF { + break + } + if err != nil { + return err + } + filePath := path.Join(dst, f.Name) + if err := os.MkdirAll(filepath.Dir(filePath), os.ModePerm); err != nil { + return err + } + + if f.FileInfo().IsDir() { + err := os.MkdirAll(filePath, os.ModePerm) + if err != nil { + return err + } + continue + } else { + splitPath := strings.Split(f.Name, string(os.PathSeparator)) + targetPath := strings.Join(splitPath[1:], string(os.PathSeparator)) + filePath := filepath.Join(dst, targetPath) + fw, err := os.OpenFile(filePath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, os.FileMode(f.Mode)) + defer func(fw *os.File) { + err := fw.Close() + if err != nil { + return + } + }(fw) + if err != nil { + return err + } + if _, err := io.Copy(fw, tr); err != nil { + return err + } + } + + } + + return nil +} diff --git a/agent/pkg/utils/compression/tgz_test.go b/agent/pkg/utils/compression/tgz_test.go new file mode 100644 index 0000000..a11a294 --- /dev/null +++ b/agent/pkg/utils/compression/tgz_test.go @@ -0,0 +1,11 @@ +package compression + +import ( + "testing" +) + +func TestTGZHandler_UNTarGZTo(t *testing.T) { + z := &TGZHandler{} + //z.UNTarGZTo("./testdata/test.tar.gz", "./testdata/v2.0.0") + z.UNTarGZ("./testdata/test.tar.gz", "./testdata/v2.0.0") +} diff --git a/agent/pkg/utils/compression/zip.go b/agent/pkg/utils/compression/zip.go new file mode 100644 index 0000000..52128fd --- /dev/null +++ b/agent/pkg/utils/compression/zip.go @@ -0,0 +1,109 @@ +package compression + +import ( + "archive/zip" + "io" + "os" + "path/filepath" + "strings" +) + +// 参考 https://www.cnblogs.com/super-codex/articles/17124567.html +type ZipHandler struct { +} + +func NewZipHandler() *ZipHandler { + return &ZipHandler{} +} + +// UnZip +// 解压zip文件到指定目录,包含顶层文件夹 +func (z *ZipHandler) UnZip(src, dst string) error { + archive, err := zip.OpenReader(src) + if err != nil { + return err + } + defer func(archive *zip.ReadCloser) { + err := archive.Close() + if err != nil { + return + } + }(archive) + + for _, item := range archive.File { + filePath := filepath.Join(dst, item.Name) + + if err := os.MkdirAll(filepath.Dir(filePath), os.ModePerm); err != nil { + return err + } + + if item.FileInfo().IsDir() { + err := os.MkdirAll(filePath, os.ModePerm) + if err != nil { + return err + } + continue + } + + file, err := os.OpenFile(filePath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, item.Mode()) + if err != nil { + return err + } + + fileInArchive, err := item.Open() + if err != nil { + return err + } + + if _, err := io.Copy(file, fileInArchive); err != nil { + return err + } + } + return nil +} + +// UnZipTo +// 解压zip文件到指定目录,不包含顶层文件夹,只是将文件解压到指定目录 +func (z *ZipHandler) UnZipTo(src, dst string) error { + archive, err := zip.OpenReader(src) + if err != nil { + return err + } + defer func(archive *zip.ReadCloser) { + err := archive.Close() + if err != nil { + return + } + }(archive) + + for _, item := range archive.File { + splitPath := strings.Split(item.Name, string(os.PathSeparator)) + targetPath := strings.Join(splitPath[1:], string(os.PathSeparator)) + filePath := filepath.Join(dst, targetPath) + + if err := os.MkdirAll(filepath.Dir(filePath), os.ModePerm); err != nil { + return err + } + + if item.FileInfo().IsDir() { + err := os.MkdirAll(filePath, os.ModePerm) + if err != nil { + return err + } + continue + } + + file, err := os.OpenFile(filePath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, item.Mode()) + if err != nil { + return err + } + fileInArchive, err := item.Open() + if err != nil { + return err + } + if _, err := io.Copy(file, fileInArchive); err != nil { + return err + } + } + return nil +} diff --git a/agent/pkg/utils/compression/zip_test.go b/agent/pkg/utils/compression/zip_test.go new file mode 100644 index 0000000..31037f9 --- /dev/null +++ b/agent/pkg/utils/compression/zip_test.go @@ -0,0 +1,8 @@ +package compression + +import "testing" + +func TestZipHandler_UnZipTo(t *testing.T) { + z := &ZipHandler{} + z.UnZip("./testdata/test.zip", "./testdata/v1.0.0") +} diff --git a/agent/pkg/utils/httputils/http.go b/agent/pkg/utils/httputils/http.go new file mode 100644 index 0000000..658ce56 --- /dev/null +++ b/agent/pkg/utils/httputils/http.go @@ -0,0 +1,183 @@ +package httputils + +import ( + "crypto/tls" + "crypto/x509" + "encoding/base64" + "fmt" + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/config" + "io" + "net" + "net/http" + "net/url" + "os" + "time" +) + +const DefaultTimeout = 10 * time.Second + +func HttpGet(url string, auth *config.Auth, timeout time.Duration, customHeaders map[string]string, cookies []*http.Cookie) ([]byte, int, []*http.Cookie, error) { + req, err := http.NewRequest(http.MethodGet, url, nil) + if err != nil { + return nil, 0, nil, err + } + + for _, c := range cookies { + req.AddCookie(c) + } + + transport, err := CreateTransport(auth, &http.Transport{}, timeout, customHeaders) + if err != nil { + return nil, 0, nil, err + } + + client := http.Client{Transport: transport, Timeout: timeout} + + resp, err := client.Do(req) + if err != nil { + return nil, 0, nil, err + } + defer resp.Body.Close() + body, err := io.ReadAll(resp.Body) + return body, resp.StatusCode, resp.Cookies(), err +} + +// HttpPost sends an HTTP Post request to the given URL and returns the response body. +func HttpPost(url string, auth *config.Auth, body io.Reader, timeout time.Duration, customHeaders map[string]string) ([]byte, int, []*http.Cookie, error) { + req, err := http.NewRequest(http.MethodPost, url, body) + if err != nil { + return nil, 0, nil, err + } + + transport, err := CreateTransport(auth, &http.Transport{TLSClientConfig: &tls.Config{InsecureSkipVerify: true}}, timeout, customHeaders) + if err != nil { + return nil, 0, nil, err + } + + client := http.Client{Transport: transport, Timeout: timeout} + + resp, err := client.Do(req) + if err != nil { + return nil, 0, nil, err + } + defer resp.Body.Close() + respBody, err := io.ReadAll(resp.Body) + return respBody, resp.StatusCode, resp.Cookies(), err +} + +// Url拼接的部分Query条件 +func ConvertToQueryParams(requrl string, params map[string]string) string { + data := url.Values{} + for key, value := range params { + data.Set(key, value) + } + u, _ := url.ParseRequestURI(requrl) + u.RawQuery = data.Encode() + return u.String() +} + +type authRoundTripper struct { + auth string + originalRT http.RoundTripper +} + +type customHeadersRoundTripper struct { + headers map[string]string + originalRT http.RoundTripper +} + +func (rt *authRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + req.Header.Set("Authorization", rt.auth) + return rt.originalRT.RoundTrip(req) +} + +func (rt *customHeadersRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + // note: no need to check for nil or empty map - newCustomHeadersRoundTripper will assure us there will always be at least 1 + for k, v := range rt.headers { + req.Header.Set(k, v) + } + return rt.originalRT.RoundTrip(req) +} + +func newAuthRoundTripper(auth *config.Auth, rt http.RoundTripper) http.RoundTripper { + switch auth.Type { + case config.AuthTypeBearer: + token := auth.Token + return &authRoundTripper{auth: "Bearer " + token, originalRT: rt} + case config.AuthTypeBasic: + encoded := base64.StdEncoding.EncodeToString([]byte(auth.Username + ":" + auth.Password)) + return &authRoundTripper{auth: "Basic " + encoded, originalRT: rt} + default: + return rt + } +} + +func newCustomHeadersRoundTripper(headers map[string]string, rt http.RoundTripper) http.RoundTripper { + if len(headers) == 0 { + // if there are no custom headers then there is no need for a special RoundTripper; therefore just return the original RoundTripper + return rt + } + return &customHeadersRoundTripper{ + headers: headers, + originalRT: rt, + } +} + +// Creates a new HTTP Transport with TLS, Timeouts, and optional custom headers. +// +// Please remember that setting long timeouts is not recommended as it can make +// idle connections stay open for as long as 2 * timeout. This should only be +// done in cases where you know the request is very likely going to be reused at +// some point in the near future. +func CreateTransport(auth *config.Auth, transportConfig *http.Transport, timeout time.Duration, customHeaders map[string]string) (http.RoundTripper, error) { + // Limits the time spent establishing a TCP connection if a new one is + // needed. If DialContext is not set, Dial is used, we only create a new one + // if neither is defined. + if transportConfig.DialContext == nil { + transportConfig.DialContext = (&net.Dialer{ + Timeout: timeout, + }).DialContext + } + + transportConfig.IdleConnTimeout = timeout + + // We might need some custom RoundTrippers to manipulate the requests (for auth and other custom request headers). + // Chain together the RoundTrippers that we need, retaining the outer-most round tripper so we can return it. + outerRoundTripper := newCustomHeadersRoundTripper(customHeaders, transportConfig) + + if auth != nil { + tlscfg, err := GetTLSConfig(auth) + if err != nil { + return nil, err + } + if tlscfg != nil { + transportConfig.TLSClientConfig = tlscfg + } + outerRoundTripper = newAuthRoundTripper(auth, outerRoundTripper) + } + + return outerRoundTripper, nil +} + +func GetTLSConfig(auth *config.Auth) (*tls.Config, error) { + if auth.InsecureSkipVerify || auth.CAFile != "" { + var certPool *x509.CertPool + if auth.CAFile != "" { + certPool = x509.NewCertPool() + cert, err := os.ReadFile(auth.CAFile) + + if err != nil { + return nil, fmt.Errorf("failed to get root CA certificates: %s", err) + } + + if ok := certPool.AppendCertsFromPEM(cert); !ok { + return nil, fmt.Errorf("supplied CA file could not be parsed") + } + } + return &tls.Config{ + InsecureSkipVerify: auth.InsecureSkipVerify, + RootCAs: certPool, + }, nil + } + return nil, nil +} diff --git a/agent/pkg/utils/ping/ping.go b/agent/pkg/utils/ping/ping.go new file mode 100644 index 0000000..91d34f2 --- /dev/null +++ b/agent/pkg/utils/ping/ping.go @@ -0,0 +1,24 @@ +package ping + +import ( + "git.inspur.com/sbg-jszt/cfn/cfn-schedule-agent/pkg/log" + "github.com/prometheus-community/pro-bing" + "time" +) + +func Ping(addr string) (time.Duration, error) { + pinger, err := probing.NewPinger(addr) + if err != nil { + log.Errorf("构件pinger失败,%s", err) + return 0, err + } + pinger.Count = 3 + err = pinger.Run() // Blocks until finished. + if err != nil { + log.Errorf("ping失败,%s", err) + return 0, err + } + stats := pinger.Statistics() // get send/receive/duplicate/rtt stats + + return stats.AvgRtt, nil +} diff --git a/agent/pkg/utils/randomutils/random.go b/agent/pkg/utils/randomutils/random.go new file mode 100644 index 0000000..faecd24 --- /dev/null +++ b/agent/pkg/utils/randomutils/random.go @@ -0,0 +1,12 @@ +package randomutils + +import ( + "math/rand" + "time" +) + +func GetRandomNumber(min, max int32) (num int32) { + rand.Seed(time.Now().UnixNano()) + num = rand.Int31n(max-min-1) + min + 1 + return +} diff --git a/agent/pkg/utils/stop_channel.go b/agent/pkg/utils/stop_channel.go new file mode 100644 index 0000000..df22b8a --- /dev/null +++ b/agent/pkg/utils/stop_channel.go @@ -0,0 +1,9 @@ +package utils + +import "os" + +var StopCh = make(chan os.Signal, 1) + +var ChUserSig = make(chan os.Signal, 1) + +var StopNats = make(chan os.Signal, 1) diff --git a/agent/pkg/utils/utils.go b/agent/pkg/utils/utils.go new file mode 100644 index 0000000..c5f6d9e --- /dev/null +++ b/agent/pkg/utils/utils.go @@ -0,0 +1,90 @@ +package utils + +import ( + "errors" + "os" + "path" + "path/filepath" + "runtime" + "strings" +) + +// If 模拟简单的三元操作 +func If(condition bool, trueVal, falseVal any) any { + if condition { + return trueVal + } + return falseVal +} + +// GetRunPath 获取执行目录作为默认目录 +func GetRunPath() string { + currentPath, err := os.Getwd() + if err != nil { + return "" + } + return currentPath +} + +// GetFileDirectoryToCaller 根据运行堆栈信息获取文件目录,skip 默认1 +func GetFileDirectoryToCaller(opts ...int) (directory string, ok bool) { + var filename string + directory = "" + skip := 1 + if opts != nil { + skip = opts[0] + } + if _, filename, _, ok = runtime.Caller(skip); ok { + directory = path.Dir(filename) + } + return +} + +// GetCurrentAbPathByExecutable 获取当前执行文件绝对路径 +func GetCurrentAbPathByExecutable() (string, error) { + exePath, err := os.Executable() + if err != nil { + return "", err + } + res, _ := filepath.EvalSymlinks(exePath) + return filepath.Dir(res), nil +} + +// GetCurrentPath 获取当前执行文件路径,如果是临时目录则获取当前文件的的执行路径 +func GetCurrentPath() (dir string, err error) { + dir, err = GetCurrentAbPathByExecutable() + if err != nil { + return "", err + } + + tmpDir, err := filepath.EvalSymlinks(os.TempDir()) + if err != nil { + return "", err + } + + if strings.Contains(dir, tmpDir) { + var ok bool + if dir, ok = GetFileDirectoryToCaller(2); !ok { + return "", errors.New("failed to get path") + } + } + return dir, nil +} + +// GetDefaultPath 获取当前执行文件路径,如果是临时目录则获取运行命令的工作目录 +func GetDefaultPath() (dir string, err error) { + dir, err = GetCurrentAbPathByExecutable() + if err != nil { + return "", err + } + + tmpDir, err := filepath.EvalSymlinks(os.TempDir()) + if err != nil { + return "", err + } + + if strings.Contains(dir, tmpDir) { + return GetRunPath(), nil + } + return dir, nil +} diff --git a/agent/pkg/utils/utils_test.go b/agent/pkg/utils/utils_test.go new file mode 100644 index 0000000..02fb13a --- /dev/null +++ b/agent/pkg/utils/utils_test.go @@ -0,0 +1,48 @@ +package utils + +import ( + "testing" +) + +func TestGetRunPath(t *testing.T) { + path := GetRunPath() + if path == "" { + t.Error("获取运行路径失败") + } +} + +func TestGetCurrentPath(t *testing.T) { + _, err := GetCurrentPath() + if err != nil { + t.Error("获取运行路径失败") + } +} + +func TestGetCurrentAbPathByExecutable(t *testing.T) { + _, err := GetCurrentAbPathByExecutable() + if err != nil { + t.Error("获取路径失败") + } +} + +func TestGetCurrentFileDirectory(t *testing.T) { + path, ok := GetFileDirectoryToCaller() + if !ok { + t.Error("获取路径失败", path) + } + + path, ok = GetFileDirectoryToCaller(1) + if !ok { + t.Error("获取路径失败", path) + } +} + +func TestIf(t *testing.T) { + if 3 != If(false, 1, 3) { + t.Error("模拟三元操作失败") + } + + if 1 != If(true, 1, 3) { + t.Error("模拟三元操作失败") + } +}