修改配置文件 prometheus.yml # my global config global: scrape_interval: 25s # Set the scrape interval to every 15 seconds. Default is every 1 minute. evaluation_interval: 25s # Evaluate rules every 15 seconds. The default is every 1 minute. # scrape_timeout is set to the global default (10s). # Alertmanager configuration alerting: alertmanagers: – static_configs: – targets: # – alertmanager:9093 # Load rules once and periodically evaluate them according to the global ‘evaluation_interval’. rule_files: # – “first_rules.yml” # – “second_rules.yml” # A scrape configuration containing exactly one endpoint to scrape: # Here it’s Prometheus itself. scrape_configs: # The job name is added as a label `job=` to any timeseries scraped from this config. – job_name: “prometheus” # metrics_path defaults to ‘/metrics’ # scheme defaults to ‘http’. static_configs: – targets: [“hadoopm01:9090”] – job_name: “node_export_hadoopm01” static_configs: – targets: [‘hadoops01:9100’] – job_name: “node_export_hadoopm02” static_configs: – targets: [‘hadoops02:9100’] – job_name: “node_export_hadoops01” static_configs: – targets: [‘hadoops03:9100’] – job_name: “node_export_hadoops02” static_configs: – targets: [‘hadoops04:9100’] – job_name: “node_export_hadoops03” static_configs: – targets: [‘hadoops05:9100’] – job_name: “node_export_hadoops04” static_configs: – targets: [‘hadoops06:9100’] – job_name: “node_export_hadoops05” static_configs: – targets: [‘hadoops07:9100’]