-
Notifications
You must be signed in to change notification settings - Fork 20
iagent troubleshooting
Healthbot uses iagent (ingest agent) containers
Iagent containers use PyEZ "tables and views" to collect data from Junos devices.
PyEZ uses ncclient (NETCONF client implementation in Python).
Iagent is scheduled by SaltStack.
Example:
- edit the file python_input.yml to indicate healtbot ip address and credentials
$ vi python_input.yml
- Run this command to configure healthbot with the details described in the file python_input.yml
$ python ./configure_healthbot.py
- so, healthbot is now configured with
- devices
vMX1
tovMX7
- The rule
check-bgp-state-using-netconf
. This rule uses an iagent sensor with the table and viewbgp_sessions_state.yml
- The playbook
bgp-using-netconf
uses the rulecheck-bgp-state-using-netconf
- The device group
Group1
is configured with devicesvMX1
tovMX7
and with an instance of the playbookbgp-using-netconf
- devices
Run this command on a Junos device to show system connections activity for NETCONF
jcluser@vMX3> show system connections | grep 830
tcp4 0 0 100.123.1.2.830 100.123.35.1.33130 ESTABLISHED
tcp4 0 0 *.830 *.* LISTEN
This section can be useful for troubleshooting purpose.
ssh to healthbot and run this command to list iagent containers
$ docker ps | grep iagent
Start a shell session in an iagent container
$ docker exec -it <container-id> bash
Example with container_id 8eb6358d7a9a
$ docker exec -it 8eb6358d7a9a bash
root@8eb6358d7a9a:/# ls jfit/salt/_tables/
bgp_sessions_state.yml
root@8eb6358d7a9a:/# more jfit/salt/_tables/bgp_sessions_state.yml
---
bgpTable:
rpc: get-bgp-neighbor-information
item: bgp-peer
view: bgpView
key: peer-address
bgpView:
fields:
peer_as: peer-as
flap_count: flap-count
peer_state: peer-state
peer_address: peer-address
peer_type: peer-type
root@8eb6358d7a9a:/# more /srv/pillar/top.sls
base:
Group1_vMX1:
- Group1_vMX1
Group1_vMX2:
- Group1_vMX2
Group1_vMX3:
- Group1_vMX3
Group1_vMX4:
- Group1_vMX4
Group1_vMX5:
- Group1_vMX5
Group1_vMX6:
- Group1_vMX6
Group1_vMX7:
- Group1_vMX7
root@8eb6358d7a9a:/# ls /srv/pillar/
Group1_vMX1.sls Group1_vMX2.sls Group1_vMX3.sls Group1_vMX4.sls Group1_vMX5.sls Group1_vMX6.sls Group1_vMX7.sls top.sls
root@8eb6358d7a9a:/# more /srv/pillar/Group1_vMX1.sls
proxy:
proxytype: junos
host: 100.123.1.0
username: jcluser
passwd: Juniper!1
timeout: 300
port: 830
root@8eb6358d7a9a:/# more jfit/salt/top.sls
base:
Group1_vMX1:
- Group1_vMX1*
Group1_vMX2:
- Group1_vMX2*
Group1_vMX3:
- Group1_vMX3*
Group1_vMX4:
- Group1_vMX4*
Group1_vMX5:
- Group1_vMX5*
Group1_vMX6:
- Group1_vMX6*
Group1_vMX7:
- Group1_vMX7*
root@8eb6358d7a9a:/# more jfit/salt/Group1_vMX1__scheduler__bgp_sessions_state_bgpTable.sls
bgp_sessions_state.yml bgpTable:
schedule.present:
- function: iagent.run
- job_args:
- bgpTable
- bgp_sessions_state.yml
- /jfit/salt/_tables
- seconds: 15
- splay: 2
- run_on_start: True
- returner: iagent_influxdb
- return_config: Group1_vMX1
root@8eb6358d7a9a:/# ls jfit/salt/
Group1_vMX1__scheduler__bgp_sessions_state_bgpTable.sls Group1_vMX4__scheduler__bgp_sessions_state_bgpTable.sls Group1_vMX7__scheduler__bgp_sessions_state_bgpTable.sls top.sls
Group1_vMX2__scheduler__bgp_sessions_state_bgpTable.sls Group1_vMX5__scheduler__bgp_sessions_state_bgpTable.sls
Group1_vMX3__scheduler__bgp_sessions_state_bgpTable.sls Group1_vMX6__scheduler__bgp_sessions_state_bgpTable.sls
Run this command to list the jobs currently scheduled
root@8eb6358d7a9a:/# salt '*' schedule.list
Group1_vMX7:
schedule:
bgp_sessions_state.yml bgpTable:
args:
- bgpTable
- bgp_sessions_state.yml
- /jfit/salt/_tables
enabled: true
function: iagent.run
jid_include: true
maxrunning: 1
name: bgp_sessions_state.yml bgpTable
return_config: Group1_vMX7
returner: iagent_influxdb
seconds: 15
splay: 2
enabled: true
Group1_vMX1:
schedule:
bgp_sessions_state.yml bgpTable:
args:
- bgpTable
- bgp_sessions_state.yml
- /jfit/salt/_tables
enabled: true
function: iagent.run
jid_include: true
maxrunning: 1
name: bgp_sessions_state.yml bgpTable
return_config: Group1_vMX1
returner: iagent_influxdb
seconds: 15
splay: 2
enabled: true
Group1_vMX2:
schedule:
bgp_sessions_state.yml bgpTable:
args:
- bgpTable
- bgp_sessions_state.yml
- /jfit/salt/_tables
enabled: true
function: iagent.run
jid_include: true
maxrunning: 1
name: bgp_sessions_state.yml bgpTable
return_config: Group1_vMX2
returner: iagent_influxdb
seconds: 15
splay: 2
enabled: true
Group1_vMX4:
schedule:
bgp_sessions_state.yml bgpTable:
args:
- bgpTable
- bgp_sessions_state.yml
- /jfit/salt/_tables
enabled: true
function: iagent.run
jid_include: true
maxrunning: 1
name: bgp_sessions_state.yml bgpTable
return_config: Group1_vMX4
returner: iagent_influxdb
seconds: 15
splay: 2
enabled: true
Group1_vMX5:
schedule:
bgp_sessions_state.yml bgpTable:
args:
- bgpTable
- bgp_sessions_state.yml
- /jfit/salt/_tables
enabled: true
function: iagent.run
jid_include: true
maxrunning: 1
name: bgp_sessions_state.yml bgpTable
return_config: Group1_vMX5
returner: iagent_influxdb
seconds: 15
splay: 2
enabled: true
Group1_vMX6:
schedule:
bgp_sessions_state.yml bgpTable:
args:
- bgpTable
- bgp_sessions_state.yml
- /jfit/salt/_tables
enabled: true
function: iagent.run
jid_include: true
maxrunning: 1
name: bgp_sessions_state.yml bgpTable
return_config: Group1_vMX6
returner: iagent_influxdb
seconds: 15
splay: 2
enabled: true
Group1_vMX3:
schedule:
bgp_sessions_state.yml bgpTable:
args:
- bgpTable
- bgp_sessions_state.yml
- /jfit/salt/_tables
enabled: true
function: iagent.run
jid_include: true
maxrunning: 1
name: bgp_sessions_state.yml bgpTable
return_config: Group1_vMX3
returner: iagent_influxdb
seconds: 15
splay: 2
enabled: true
Run this command to list the keys
root@8eb6358d7a9a:/# salt-key -L
Accepted Keys:
Group1_vMX1
Group1_vMX2
Group1_vMX3
Group1_vMX4
Group1_vMX5
Group1_vMX6
Group1_vMX7
Denied Keys:
Unaccepted Keys:
Rejected Keys:
Run this command to see the Salt-proxy processes
root@8eb6358d7a9a:/# ps -ef | grep salt-proxy
root 34 1 1 13:58 ? 00:00:11 /usr/bin/python /usr/local/bin/salt-proxy --proxyid=Group1_vMX1 -d
root 65 1 1 13:58 ? 00:00:10 /usr/bin/python /usr/local/bin/salt-proxy --proxyid=Group1_vMX2 -d
root 75 1 1 13:58 ? 00:00:09 /usr/bin/python /usr/local/bin/salt-proxy --proxyid=Group1_vMX3 -d
root 90 1 1 13:58 ? 00:00:09 /usr/bin/python /usr/local/bin/salt-proxy --proxyid=Group1_vMX4 -d
root 112 1 1 13:58 ? 00:00:08 /usr/bin/python /usr/local/bin/salt-proxy --proxyid=Group1_vMX5 -d
root 125 1 1 13:58 ? 00:00:08 /usr/bin/python /usr/local/bin/salt-proxy --proxyid=Group1_vMX6 -d
root 139 1 1 13:58 ? 00:00:08 /usr/bin/python /usr/local/bin/salt-proxy --proxyid=Group1_vMX7 -d
Run this command to make sure the proxies are up and responding to the master. This is not an ICMP ping.
root@8eb6358d7a9a:/# salt '*' test.ping
Group1_vMX7:
True
Group1_vMX6:
True
Group1_vMX5:
True
Group1_vMX2:
True
Group1_vMX1:
True
Group1_vMX4:
True
Group1_vMX3:
True
Run this command to make sure the proxies can communicate with Junos devices
root@8eb6358d7a9a:/# salt '*' junos.cli "show chassis hardware"
Group1_vMX2:
----------
message:
Hardware inventory:
Item Version Part number Serial number Description
Chassis VM5B6A238173 VMX
Midplane
Routing Engine 0 RE-VMX
CB 0 VMX SCB
FPC 0 Virtual FPC
CPU Rev. 1.0 RIOT-LITE BUILTIN
MIC 0 Virtual
PIC 0 BUILTIN BUILTIN Virtual
out:
True
Group1_vMX3:
----------
message:
Hardware inventory:
Item Version Part number Serial number Description
Chassis VM5BBCC0B520 VMX
Midplane
Routing Engine 0 RE-VMX
CB 0 VMX SCB
FPC 0 Virtual FPC
CPU Rev. 1.0 RIOT-LITE BUILTIN
MIC 0 Virtual
PIC 0 BUILTIN BUILTIN Virtual
out:
True
Group1_vMX4:
----------
message:
Hardware inventory:
Item Version Part number Serial number Description
Chassis VM5B6A238173 VMX
Midplane
Routing Engine 0 RE-VMX
CB 0 VMX SCB
FPC 0 Virtual FPC
CPU Rev. 1.0 RIOT-LITE BUILTIN
MIC 0 Virtual
PIC 0 BUILTIN BUILTIN Virtual
out:
True
Group1_vMX5:
----------
message:
Hardware inventory:
Item Version Part number Serial number Description
Chassis VM5AE25B176A VMX
Midplane
Routing Engine 0 RE-VMX
CB 0 VMX SCB
FPC 0 Virtual FPC
CPU Rev. 1.0 RIOT-LITE BUILTIN
MIC 0 Virtual
PIC 0 BUILTIN BUILTIN Virtual
out:
True
Group1_vMX6:
----------
message:
Hardware inventory:
Item Version Part number Serial number Description
Chassis VM5BBCC0B520 VMX
Midplane
Routing Engine 0 RE-VMX
CB 0 VMX SCB
FPC 0 Virtual FPC
CPU Rev. 1.0 RIOT-LITE BUILTIN
MIC 0 Virtual
PIC 0 BUILTIN BUILTIN Virtual
out:
True
Group1_vMX1:
----------
message:
Hardware inventory:
Item Version Part number Serial number Description
Chassis VM5B6A238173 VMX
Midplane
Routing Engine 0 RE-VMX
CB 0 VMX SCB
FPC 0 Virtual FPC
CPU Rev. 1.0 RIOT-LITE BUILTIN
MIC 0 Virtual
PIC 0 BUILTIN BUILTIN Virtual
out:
True
Group1_vMX7:
----------
message:
Hardware inventory:
Item Version Part number Serial number Description
Chassis VM5BBCC0B520 VMX
Midplane
Routing Engine 0 RE-VMX
CB 0 VMX SCB
FPC 0 Virtual FPC
CPU
out:
True
Salt provides a runner that displays events in real-time as they are received on the Salt master/sent from the master
Run this command to watch the events bus.
You should see the data collected from Junos devices by SaltStack.
root@8eb6358d7a9a:/# salt-run state.event pretty=True
salt/job/20181122142445854179/ret/Group1_vMX1 {
"_stamp": "2018-11-22T14:24:45.854809",
"arg": [
"bgpTable",
"bgp_sessions_state.yml",
"/jfit/salt/_tables"
],
"cmd": "_return",
"fun": "iagent.run",
"fun_args": [
"bgpTable",
"bgp_sessions_state.yml",
"/jfit/salt/_tables"
],
"id": "Group1_vMX1",
"jid": "20181122142445854179",
"pid": 34,
"ret_config": "Group1_vMX1",
"return": {
"changes": {
"hostname": "100.123.1.0",
"out": true,
"reply": {
"11.0.0.102+51849": {
"flap_count": "5",
"peer_address": "11.0.0.102+51849",
"peer_as": "11",
"peer_state": "Established",
"peer_type": "Internal"
},
"11.0.0.103+55504": {
"flap_count": "5",
"peer_address": "11.0.0.103+55504",
"peer_as": "11",
"peer_state": "Established",
"peer_type": "Internal"
},
"11.0.0.104+53942": {
"flap_count": "6",
"peer_address": "11.0.0.104+53942",
"peer_as": "11",
"peer_state": "Established",
"peer_type": "Internal"
}
},
"table": {
"bgpTable": {
"item": "bgp-peer",
"key": "peer-address",
"rpc": "get-bgp-neighbor-information",
"view": "bgpView"
},
"bgpView": {
"fields": {
"flap_count": "flap-count",
"peer_address": "peer-address",
"peer_as": "peer-as",
"peer_state": "peer-state",
"peer_type": "peer-type"
}
}
},
"tablename": "bgpTable"
},
"comment": "",
"name": "'iagent.run'",
"result": true
},
"schedule": "bgp_sessions_state.yml bgpTable",
"success": true,
"tgt": "Group1_vMX1",
"tgt_type": "glob"
}
salt/job/20181122142446186042/ret/Group1_vMX2 {
"_stamp": "2018-11-22T14:24:46.186711",
"arg": [
"bgpTable",
"bgp_sessions_state.yml",
"/jfit/salt/_tables"
],
"cmd": "_return",
"fun": "iagent.run",
"fun_args": [
"bgpTable",
"bgp_sessions_state.yml",
"/jfit/salt/_tables"
],
"id": "Group1_vMX2",
"jid": "20181122142446186042",
"pid": 65,
"ret_config": "Group1_vMX2",
"return": {
"changes": {
"hostname": "100.123.1.1",
"out": true,
"reply": {
"11.0.0.101+179": {
"flap_count": "0",
"peer_address": "11.0.0.101+179",
"peer_as": "11",
"peer_state": "Established",
"peer_type": "Internal"
},
"11.0.0.103+55811": {
"flap_count": "17",
"peer_address": "11.0.0.103+55811",
"peer_as": "11",
"peer_state": "Established",
"peer_type": "Internal"
},
"11.0.0.104+179": {
"flap_count": "5",
"peer_address": "11.0.0.104+179",
"peer_as": "11",
"peer_state": "Established",
"peer_type": "Internal"
}
},
"table": {
"bgpTable": {
"item": "bgp-peer",
"key": "peer-address",
"rpc": "get-bgp-neighbor-information",
"view": "bgpView"
},
"bgpView": {
"fields": {
"flap_count": "flap-count",
"peer_address": "peer-address",
"peer_as": "peer-as",
"peer_state": "peer-state",
"peer_type": "peer-type"
}
}
},
"tablename": "bgpTable"
},
"comment": "",
"name": "'iagent.run'",
"result": true
},
"schedule": "bgp_sessions_state.yml bgpTable",
"success": true,
"tgt": "Group1_vMX2",
"tgt_type": "glob"
}