Compare commits
150 commits
Author | SHA1 | Date | |
---|---|---|---|
4242832396 | |||
13b84b4da1 | |||
837860e31a | |||
c1a7f661a3 | |||
|
7d268ea0e8 | ||
|
9d496c6854 | ||
|
6ef23eae4d | ||
12ceea413c | |||
7a968deee9 | |||
d55cbd62fc | |||
59ce76fc29 | |||
|
c3d3e6857a | ||
|
56627c1aa9 | ||
|
8dde35dd31 | ||
|
de88ca85b9 | ||
|
514627aa72 | ||
|
4b54a0a3db | ||
|
e44fd2fe78 | ||
|
1429fedb9d | ||
|
818c835711 | ||
|
40af19e801 | ||
|
1d69f4e2f0 | ||
|
a7104b6b94 | ||
|
ee41fb4fc2 | ||
|
e04d0c6d59 | ||
|
c9a57de843 | ||
|
f512e7a0a9 | ||
|
116334be2f | ||
|
ba4cdb217b | ||
|
7ab7bbb9e1 | ||
|
b6a3acd6b4 | ||
|
bb05e12f6e | ||
|
fa51ca31ca | ||
|
0fdb56e01d | ||
|
19a158ec82 | ||
|
08360bafbb | ||
|
e0d734d0ca | ||
|
dc224e209b | ||
|
7259c7602d | ||
|
8cba98ec43 | ||
|
ab96965767 | ||
|
221ac16e87 | ||
|
9204ae2187 | ||
|
528ecc09b0 | ||
|
117b39350e | ||
|
4cf054278f | ||
|
b82dc80fe3 | ||
|
49b269efa6 | ||
|
cedb6818a3 | ||
|
a9af96bba2 | ||
|
282f914665 | ||
|
ca2a69b66f | ||
|
778814b0a1 | ||
|
e0685e7167 | ||
|
ba0be73d3c | ||
|
bf65da0c58 | ||
|
5b0df6ca05 | ||
|
2d8ec831e6 | ||
|
fde6211f5b | ||
|
e96a50379b | ||
|
633b6d3851 | ||
|
04c1f83e74 | ||
|
9b06584fed | ||
|
f7cdd92fa3 | ||
|
53769db3e4 | ||
|
bce02a5e7c | ||
|
368d793704 | ||
|
776f951e3b | ||
|
84f1f00ffe | ||
|
8c12fa8e86 | ||
|
5d0f0bf2fa | ||
|
4bd4b6bb94 | ||
|
d286fde999 | ||
|
dfe937d54e | ||
|
042a6fcf35 | ||
|
f4d7796094 | ||
|
0b9087fa41 | ||
|
79a08359ae | ||
|
29d2f23986 | ||
|
838ffd8e41 | ||
|
ea66becd3d | ||
|
0cff7d3477 | ||
|
be770a947a | ||
|
6181800cb3 | ||
|
2f23844dfd | ||
|
5f0aae6c01 | ||
|
6717b43cc9 | ||
|
05bfc69f26 | ||
|
d06931852f | ||
|
46354865d3 | ||
|
3dbeb5b9de | ||
|
dfb6b234ba | ||
|
9e797c376e | ||
|
cf2cc54da4 | ||
|
2a4f24f00a | ||
|
a7b965a8b5 | ||
|
ddf2e4d1cc | ||
|
7bf955d601 | ||
|
dd4d935fb2 | ||
|
ce83364ff9 | ||
|
75f14fa895 | ||
|
bb08813700 | ||
|
ad951b9288 | ||
|
b53dfa9b59 | ||
|
3b8917aaf3 | ||
|
18773a2dff | ||
|
84a9aca141 | ||
|
a7c6ae68e2 | ||
|
0f210e16f2 | ||
|
e789619b34 | ||
|
af9df9ab4b | ||
|
b092079820 | ||
|
021c5db7d3 | ||
|
1fc0aee929 | ||
|
7cfcd685f3 | ||
|
d1ee380ffb | ||
|
305d4d41ec | ||
|
34c1619ce8 | ||
|
fcb038f4b6 | ||
|
5d9602402b | ||
|
71c603620b | ||
|
2f09cf8d42 | ||
|
58775c0950 | ||
|
8a46af19b8 | ||
|
bdc450ef6a | ||
|
794f9787f0 | ||
|
86527af1c0 | ||
|
b738f4bfef | ||
|
aaea0a2477 | ||
|
9eafcbf215 | ||
|
95d7f98389 | ||
|
576eb07dd7 | ||
|
337e272eea | ||
|
a1af028df6 | ||
|
58c18fc2da | ||
|
3639662961 | ||
|
1d98d3c8e9 | ||
|
dc582b5de6 | ||
|
51efa8edba | ||
|
c4eb8f34ac | ||
|
7147670255 | ||
|
9e934af835 | ||
|
2a1196b52d | ||
|
77a84b365f | ||
|
0f2cb531ae | ||
|
00653628c6 | ||
|
7999244096 | ||
|
137728be1f | ||
|
0fe17c9687 | ||
|
28955612be |
44 changed files with 4356 additions and 1261 deletions
1
.envrc
Normal file
1
.envrc
Normal file
|
@ -0,0 +1 @@
|
|||
use nix
|
3
.gitignore
vendored
3
.gitignore
vendored
|
@ -181,3 +181,6 @@ dmypy.json
|
|||
# End of https://www.gitignore.io/api/emacs,python
|
||||
|
||||
netbox-docker
|
||||
/.vscode
|
||||
.direnv
|
||||
.pre-commit-config.yaml
|
||||
|
|
1
MANIFEST.in
Normal file
1
MANIFEST.in
Normal file
|
@ -0,0 +1 @@
|
|||
include requirements.txt
|
54
README.md
54
README.md
|
@ -13,14 +13,15 @@ The goal is to generate an existing infrastructure on Netbox and have the abilit
|
|||
* Generic ability to guess datacenters and rack location through drivers (`cmd` and `file` and custom ones)
|
||||
* Update existing `Device` and `Interface`
|
||||
* Handle blade moving (new slot, new chassis)
|
||||
* Handle blade GPU expansions
|
||||
* Automatic cabling (server's interface to switch's interface) using lldp
|
||||
* Local inventory using `Inventory Item` for CPU, RAM, RAID cards, physical disks (behind raid cards)
|
||||
* Local inventory using `Inventory Item` for CPU, GPU, RAM, RAID cards, physical disks (behind raid cards)
|
||||
* PSUs creation and power consumption reporting (based on vendor's tools)
|
||||
|
||||
# Requirements
|
||||
|
||||
- Netbox >= 2.6
|
||||
- Python >= 3.4
|
||||
- Netbox >= 3.7
|
||||
- Python >= 3.7
|
||||
- [pynetbox](https://github.com/digitalocean/pynetbox/)
|
||||
- [python3-netaddr](https://github.com/drkjam/netaddr)
|
||||
- [python3-netifaces](https://github.com/al45tair/netifaces)
|
||||
|
@ -50,7 +51,7 @@ The agent can be run from a shell and get its configuration from either the conf
|
|||
Configuration values are overridden based on the following precedence: command line arguments (might include config file) > environment variables > default config file > defaults.
|
||||
|
||||
```
|
||||
# netbox_agent -c /etc/netbox_agent.yml --register
|
||||
# netbox_agent -c /etc/netbox_agent.yaml --register
|
||||
INFO:root:Creating chassis blade (serial: QTFCQ574502EF)
|
||||
INFO:root:Creating blade (serial: QTFCQ574502D2) myserver on chassis QTFCQ574502EF
|
||||
INFO:root:Setting device (QTFCQ574502D2) new slot on Slot 9 (Chassis QTFCQ574502EF)..
|
||||
|
@ -91,10 +92,12 @@ netbox:
|
|||
token: supersecrettoken
|
||||
# uncomment to disable ssl verification
|
||||
# ssl_verify: false
|
||||
# uncomment to use the system's CA certificates
|
||||
# ssl_ca_certs_file: /etc/ssl/certs/ca-certificates.crt
|
||||
|
||||
# Network configuration
|
||||
network:
|
||||
# Regex to ignore interfaces
|
||||
# Regex to ignore interfaces
|
||||
ignore_interfaces: "(dummy.*|docker.*)"
|
||||
# Regex to ignore IP addresses
|
||||
ignore_ips: (127\.0\.0\..*)
|
||||
|
@ -110,15 +113,15 @@ network:
|
|||
# blade_role: "Blade"
|
||||
# server_role: "Server"
|
||||
# tags: server, blade, ,just a comma,delimited,list
|
||||
#
|
||||
# custom_fields: field1=value1,field2=value2#
|
||||
#
|
||||
# Can use this to set the tenant
|
||||
#
|
||||
#tenant:
|
||||
# driver: "file:/tmp/tenant"
|
||||
# regex: "(.*)"
|
||||

|
||||
## Enable virtual machine support
|
||||
|
||||
## Enable virtual machine support
|
||||
# virtual:
|
||||
# # not mandatory, can be guessed
|
||||
# enabled: True
|
||||
|
@ -144,7 +147,7 @@ rack_location:
|
|||
# driver: "file:/tmp/datacenter"
|
||||
# regex: "(.*)"
|
||||
|
||||
# Enable local inventory reporting
|
||||
# Enable local inventory reporting
|
||||
inventory: true
|
||||
```
|
||||
|
||||
|
@ -159,6 +162,36 @@ The `get_blade_slot` method return the name of the `Device Bay`.
|
|||
|
||||
Certain vendors don't report the blade slot in `dmidecode`, so we can use the `slot_location` regex feature of the configuration file.
|
||||
|
||||
Some blade servers can be equipped with additional hardware using expansion blades, next to the processing blade, such as GPU expansion, or drives bay expansion. By default, the hardware from the expnasion is associated with the blade server itself, but it's possible to register the expansion as its own device using the `--expansion-as-device` command line parameter, or by setting `expansion_as_device` to `true` in the configuration file.
|
||||
|
||||
## Drives attributes processing
|
||||
|
||||
It is possible to process drives extended attributes such as the drive's physical or logical identifier, logical drive RAID type, size, consistency and so on.
|
||||
|
||||
Those attributes as set as `custom_fields` in Netbox, and need to be registered properly before being able to specify them during the inventory phase.
|
||||
|
||||
As the custom fields have to be created prior being able to register the disks extended attributes, this feature is only activated using the `--process-virtual-drives` command line parameter, or by setting `process_virtual_drives` to `true` in the configuration file.
|
||||
|
||||
The custom fields to create as `DCIM > inventory item` `Text` are described below.
|
||||
|
||||
```
|
||||
NAME LABEL DESCRIPTION
|
||||
mount_point Mount point Device mount point(s)
|
||||
pd_identifier Physical disk identifier Physical disk identifier in the RAID controller
|
||||
vd_array Virtual drive array Virtual drive array the disk is member of
|
||||
vd_consistency Virtual drive consistency Virtual disk array consistency
|
||||
vd_device Virtual drive device Virtual drive system device
|
||||
vd_raid_type Virtual drive RAID Virtual drive array RAID type
|
||||
vd_size Virtual drive size Virtual drive array size
|
||||
```
|
||||
|
||||
In the current implementation, the disks attributes ore not updated: if a disk with the correct serial number is found, it's sufficient to consider it as up to date.
|
||||
|
||||
To force the reprocessing of the disks extended attributes, the `--force-disk-refresh` command line option can be used: it removes all existing disks to before populating them with the correct parsing. Unless this option is specified, the extended attributes won't be modified unless a disk is replaced.
|
||||
|
||||
It is possible to dump the physical/virtual disks map on the filesystem under the JSON notation to ease or automate disks management. The file path has to be provided using the `--dump-disks-map` command line parameter.
|
||||
|
||||
|
||||
## Anycast IP
|
||||
|
||||
The default behavior of the agent is to assign an interface to an IP.
|
||||
|
@ -202,6 +235,7 @@ Tested on:
|
|||
* HP ProLiant BL460c Gen8
|
||||
* HP ProLiant BL460c Gen9
|
||||
* HP ProLiant BL460c Gen10
|
||||
* HP ProLiant BL460c Gen10 Graphics Exp its expansion HP ProLiant BL460c Graphics Expansion Blade
|
||||
* HP Moonshot 1500 Enclosure (your `DeviceType` should have slots batch create with `Bay c[1-45n1]`) with HP ProLiant m750, m710x, m510 Server Cartridge
|
||||
|
||||
### Pizzas
|
||||
|
@ -254,5 +288,5 @@ On a personal note, I use the docker image from [netbox-community/netbox-docker]
|
|||
# git clone https://github.com/netbox-community/netbox-docker
|
||||
# cd netbox-docker
|
||||
# docker-compose pull
|
||||
# docker-compose up
|
||||
# docker-compose up
|
||||
```
|
||||
|
|
55
default.nix
Normal file
55
default.nix
Normal file
|
@ -0,0 +1,55 @@
|
|||
{
|
||||
sources ? import ./npins,
|
||||
pkgs ? import sources.nixpkgs { },
|
||||
}:
|
||||
|
||||
let
|
||||
checks = (import sources.git-hooks).run {
|
||||
src = ./.;
|
||||
|
||||
hooks =
|
||||
{
|
||||
commitizen.enable = true;
|
||||
}
|
||||
// (pkgs.lib.genAttrs
|
||||
[
|
||||
"black"
|
||||
"isort"
|
||||
"ruff"
|
||||
]
|
||||
(hook: {
|
||||
enable = true;
|
||||
stages = [ "pre-push" ];
|
||||
})
|
||||
);
|
||||
};
|
||||
|
||||
python3 = pkgs.python3.override {
|
||||
packageOverrides = self: _: {
|
||||
netifaces2 = self.callPackage ./nix/netifaces2.nix { };
|
||||
};
|
||||
};
|
||||
in
|
||||
|
||||
{
|
||||
devShell = pkgs.mkShell {
|
||||
name = "netbox-agent.dev";
|
||||
|
||||
packages = [
|
||||
(python3.withPackages (ps: [
|
||||
ps.pynetbox
|
||||
ps.netaddr
|
||||
ps.netifaces2
|
||||
ps.pyyaml
|
||||
ps.jsonargparse
|
||||
ps.python-slugify
|
||||
ps.packaging
|
||||
ps.distro
|
||||
]))
|
||||
] ++ checks.enabledPackages;
|
||||
|
||||
shellHook = ''
|
||||
${checks.shellHook}
|
||||
'';
|
||||
};
|
||||
}
|
|
@ -18,14 +18,15 @@ network:
|
|||
# blade_role: "Blade"
|
||||
# server_role: "Server"
|
||||
# tags: server, blade, ,just a comma,delimited,list
|
||||
|
||||
# custom_fields: field1=value1,field2=value2
|
||||
#
|
||||
#
|
||||
# Use this to set the tenant
|
||||
#
|
||||
#tenant:
|
||||
# driver: "file:/tmp/tenant"
|
||||
# regex: "(.*)"
|
||||

|
||||
|
||||
datacenter_location:
|
||||
driver: "cmd:cat /etc/qualification | tr [A-Z] [a-z]"
|
||||
regex: "datacenter: (?P<datacenter>[A-Za-z0-9]+)"
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
from pkg_resources import DistributionNotFound, get_distribution
|
||||
from importlib.metadata import PackageNotFoundError
|
||||
from importlib.metadata import version as _get_version
|
||||
|
||||
try:
|
||||
__version__ = get_distribution(__name__).version
|
||||
except DistributionNotFound:
|
||||
__version__ = _get_version(__name__)
|
||||
except PackageNotFoundError:
|
||||
pass
|
||||
|
|
|
@ -1,5 +1,8 @@
|
|||
from packaging import version
|
||||
|
||||
import netbox_agent.dmidecode as dmidecode
|
||||
from netbox_agent.config import config
|
||||
from netbox_agent.config import netbox_instance as nb
|
||||
from netbox_agent.logging import logging # NOQA
|
||||
from netbox_agent.vendors.dell import DellHost
|
||||
from netbox_agent.vendors.generic import GenericHost
|
||||
|
@ -9,12 +12,12 @@ from netbox_agent.vendors.supermicro import SupermicroHost
|
|||
from netbox_agent.virtualmachine import VirtualMachine, is_vm
|
||||
|
||||
MANUFACTURERS = {
|
||||
'Dell Inc.': DellHost,
|
||||
'HP': HPHost,
|
||||
'HPE': HPHost,
|
||||
'Supermicro': SupermicroHost,
|
||||
'Quanta Cloud Technology Inc.': QCTHost,
|
||||
'Generic': GenericHost,
|
||||
"Dell Inc.": DellHost,
|
||||
"HP": HPHost,
|
||||
"HPE": HPHost,
|
||||
"Supermicro": SupermicroHost,
|
||||
"Quanta Cloud Technology Inc.": QCTHost,
|
||||
"Generic": GenericHost,
|
||||
}
|
||||
|
||||
|
||||
|
@ -23,26 +26,38 @@ def run(config):
|
|||
|
||||
if config.virtual.enabled or is_vm(dmi):
|
||||
if not config.virtual.cluster_name:
|
||||
raise Exception('virtual.cluster_name parameter is mandatory because it\'s a VM')
|
||||
raise Exception(
|
||||
"virtual.cluster_name parameter is mandatory because it's a VM"
|
||||
)
|
||||
server = VirtualMachine(dmi=dmi)
|
||||
else:
|
||||
manufacturer = dmidecode.get_by_type(dmi, 'Chassis')[0].get('Manufacturer')
|
||||
manufacturer = dmidecode.get_by_type(dmi, "Chassis")[0].get("Manufacturer")
|
||||
try:
|
||||
server = MANUFACTURERS[manufacturer](dmi=dmi)
|
||||
except KeyError:
|
||||
server = GenericHost(dmi=dmi)
|
||||
|
||||
if version.parse(nb.version) < version.parse("3.7"):
|
||||
print("netbox-agent is not compatible with Netbox prior to version 3.7")
|
||||
return False
|
||||
|
||||
if (
|
||||
config.register
|
||||
or config.update_all
|
||||
or config.update_network
|
||||
or config.update_location
|
||||
or config.update_inventory
|
||||
or config.update_psu
|
||||
):
|
||||
server.netbox_create_or_update(config)
|
||||
if config.debug:
|
||||
server.print_debug()
|
||||
if config.register or config.update_all or config.update_network or config.update_location or \
|
||||
config.update_inventory or config.update_psu:
|
||||
server.netbox_create_or_update(config)
|
||||
return True
|
||||
|
||||
|
||||
def main():
|
||||
return run(config)
|
||||
return 0 if run(config) else 1
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
|
|
@ -10,83 +10,171 @@ import urllib3
|
|||
def get_config():
|
||||
p = jsonargparse.ArgumentParser(
|
||||
default_config_files=[
|
||||
'/etc/netbox_agent.yaml',
|
||||
'~/.config/netbox_agent.yaml',
|
||||
'~/.netbox_agent.yaml',
|
||||
"/etc/netbox_agent.yaml",
|
||||
"~/.config/netbox_agent.yaml",
|
||||
"~/.netbox_agent.yaml",
|
||||
],
|
||||
prog='netbox_agent',
|
||||
prog="netbox_agent",
|
||||
description="Netbox agent to run on your infrastructure's servers",
|
||||
env_prefix='NETBOX_AGENT_',
|
||||
default_env=True
|
||||
env_prefix="NETBOX_AGENT_",
|
||||
default_env=True,
|
||||
)
|
||||
p.add_argument('-c', '--config', action=jsonargparse.ActionConfigFile)
|
||||
p.add_argument("-c", "--config", action=jsonargparse.ActionConfigFile)
|
||||
|
||||
p.add_argument('-r', '--register', action='store_true', help='Register server to Netbox')
|
||||
p.add_argument('-u', '--update-all', action='store_true', help='Update all infos in Netbox')
|
||||
p.add_argument('-d', '--debug', action='store_true', help='Print debug infos')
|
||||
p.add_argument('--update-network', action='store_true', help='Update network')
|
||||
p.add_argument('--update-inventory', action='store_true', help='Update inventory')
|
||||
p.add_argument('--update-location', action='store_true', help='Update location')
|
||||
p.add_argument('--update-psu', action='store_true', help='Update PSU')
|
||||
p.add_argument(
|
||||
"-r", "--register", action="store_true", help="Register server to Netbox"
|
||||
)
|
||||
p.add_argument(
|
||||
"-u", "--update-all", action="store_true", help="Update all infos in Netbox"
|
||||
)
|
||||
p.add_argument("-d", "--debug", action="store_true", help="Print debug infos")
|
||||
p.add_argument("--update-network", action="store_true", help="Update network")
|
||||
p.add_argument("--update-inventory", action="store_true", help="Update inventory")
|
||||
p.add_argument("--update-location", action="store_true", help="Update location")
|
||||
p.add_argument("--update-psu", action="store_true", help="Update PSU")
|
||||
p.add_argument(
|
||||
"--purge-old-devices",
|
||||
action="store_true",
|
||||
help="Purge existing (old ?) devices having same name but different serial",
|
||||
)
|
||||
p.add_argument(
|
||||
"--expansion-as-device",
|
||||
action="store_true",
|
||||
help="Manage blade expansions as external devices",
|
||||
)
|
||||
|
||||
p.add_argument('--log_level', default='debug')
|
||||
p.add_argument('--netbox.url', help='Netbox URL')
|
||||
p.add_argument('--netbox.token', help='Netbox API Token')
|
||||
p.add_argument('--netbox.ssl_verify', default=True, action='store_true',
|
||||
help='Disable SSL verification')
|
||||
p.add_argument('--virtual.enabled', action='store_true', help='Is a virtual machine or not')
|
||||
p.add_argument('--virtual.cluster_name', help='Cluster name of VM')
|
||||
p.add_argument('--hostname_cmd', default=None,
|
||||
help="Command to output hostname, used as Device's name in netbox")
|
||||
p.add_argument('--device.tags', default=r'',
|
||||
help='tags to use for a host')
|
||||
p.add_argument('--device.blade_role', default=r'Blade',
|
||||
help='role to use for a blade server')
|
||||
p.add_argument('--device.chassis_role', default=r'Server Chassis',
|
||||
help='role to use for a chassis')
|
||||
p.add_argument('--device.server_role', default=r'Server',
|
||||
help='role to use for a server')
|
||||
p.add_argument('--tenant.driver',
|
||||
help='tenant driver, ie cmd, file')
|
||||
p.add_argument('--tenant.driver_file',
|
||||
help='tenant driver custom driver file path')
|
||||
p.add_argument('--tenant.regex',
|
||||
help='tenant regex to extract Netbox tenant slug')
|
||||
p.add_argument('--datacenter_location.driver',
|
||||
help='Datacenter location driver, ie: cmd, file')
|
||||
p.add_argument('--datacenter_location.driver_file',
|
||||
help='Datacenter location custom driver file path')
|
||||
p.add_argument('--datacenter_location.regex',
|
||||
help='Datacenter location regex to extract Netbox DC slug')
|
||||
p.add_argument('--rack_location.driver', help='Rack location driver, ie: cmd, file')
|
||||
p.add_argument('--rack_location.driver_file', help='Rack location custom driver file path')
|
||||
p.add_argument('--rack_location.regex', help='Rack location regex to extract Rack name')
|
||||
p.add_argument('--slot_location.driver', help='Slot location driver, ie: cmd, file')
|
||||
p.add_argument('--slot_location.driver_file', help='Slot location custom driver file path')
|
||||
p.add_argument('--slot_location.regex', help='Slot location regex to extract slot name')
|
||||
p.add_argument('--network.ignore_interfaces', default=r'(dummy.*|docker.*)',
|
||||
help='Regex to ignore interfaces')
|
||||
p.add_argument('--network.ignore_ips', default=r'^(127\.0\.0\..*|fe80.*|::1.*)',
|
||||
help='Regex to ignore IPs')
|
||||
p.add_argument('--network.lldp', help='Enable auto-cabling feature through LLDP infos')
|
||||
p.add_argument('--inventory', action='store_true',
|
||||
help='Enable HW inventory (CPU, Memory, RAID Cards, Disks) feature')
|
||||
p.add_argument("--log_level", default="debug")
|
||||
p.add_argument("--netbox.ssl_ca_certs_file", help="SSL CA certificates file")
|
||||
p.add_argument("--netbox.url", help="Netbox URL")
|
||||
p.add_argument("--netbox.token", help="Netbox API Token")
|
||||
p.add_argument(
|
||||
"--netbox.ssl_verify",
|
||||
default=True,
|
||||
action="store_true",
|
||||
help="Disable SSL verification",
|
||||
)
|
||||
p.add_argument(
|
||||
"--virtual.enabled", action="store_true", help="Is a virtual machine or not"
|
||||
)
|
||||
p.add_argument("--virtual.cluster_name", help="Cluster name of VM")
|
||||
p.add_argument(
|
||||
"--hostname_cmd",
|
||||
default=None,
|
||||
help="Command to output hostname, used as Device's name in netbox",
|
||||
)
|
||||
p.add_argument(
|
||||
"--device.platform",
|
||||
default=None,
|
||||
help="Override device platform. Here we use OS distribution.",
|
||||
)
|
||||
p.add_argument("--device.tags", default=r"", help="tags to use for a host")
|
||||
p.add_argument(
|
||||
"--preserve-tags",
|
||||
action="store_true",
|
||||
help="Append new unique tags, preserve those already present",
|
||||
)
|
||||
p.add_argument(
|
||||
"--device.custom_fields",
|
||||
default=r"",
|
||||
help="custom_fields to use for a host, eg: field1=v1,field2=v2",
|
||||
)
|
||||
p.add_argument(
|
||||
"--device.blade_role", default=r"Blade", help="role to use for a blade server"
|
||||
)
|
||||
p.add_argument(
|
||||
"--device.chassis_role",
|
||||
default=r"Server Chassis",
|
||||
help="role to use for a chassis",
|
||||
)
|
||||
p.add_argument(
|
||||
"--device.server_role", default=r"Server", help="role to use for a server"
|
||||
)
|
||||
p.add_argument("--tenant.driver", help="tenant driver, ie cmd, file")
|
||||
p.add_argument("--tenant.driver_file", help="tenant driver custom driver file path")
|
||||
p.add_argument("--tenant.regex", help="tenant regex to extract Netbox tenant slug")
|
||||
p.add_argument(
|
||||
"--datacenter_location.driver", help="Datacenter location driver, ie: cmd, file"
|
||||
)
|
||||
p.add_argument(
|
||||
"--datacenter_location.driver_file",
|
||||
help="Datacenter location custom driver file path",
|
||||
)
|
||||
p.add_argument(
|
||||
"--datacenter_location.regex",
|
||||
help="Datacenter location regex to extract Netbox DC slug",
|
||||
)
|
||||
p.add_argument("--rack_location.driver", help="Rack location driver, ie: cmd, file")
|
||||
p.add_argument(
|
||||
"--rack_location.driver_file", help="Rack location custom driver file path"
|
||||
)
|
||||
p.add_argument(
|
||||
"--rack_location.regex", help="Rack location regex to extract Rack name"
|
||||
)
|
||||
p.add_argument("--slot_location.driver", help="Slot location driver, ie: cmd, file")
|
||||
p.add_argument(
|
||||
"--slot_location.driver_file", help="Slot location custom driver file path"
|
||||
)
|
||||
p.add_argument(
|
||||
"--slot_location.regex", help="Slot location regex to extract slot name"
|
||||
)
|
||||
p.add_argument(
|
||||
"--network.ignore_interfaces",
|
||||
default=r"(dummy.*|docker.*)",
|
||||
help="Regex to ignore interfaces",
|
||||
)
|
||||
p.add_argument(
|
||||
"--network.ignore_ips",
|
||||
default=r"^(127\.0\.0\..*|fe80.*|::1.*)",
|
||||
help="Regex to ignore IPs",
|
||||
)
|
||||
p.add_argument(
|
||||
"--network.ipmi", default=True, help="Enable gathering IPMI information"
|
||||
)
|
||||
p.add_argument(
|
||||
"--network.lldp", help="Enable auto-cabling feature through LLDP infos"
|
||||
)
|
||||
p.add_argument(
|
||||
"--inventory",
|
||||
action="store_true",
|
||||
help="Enable HW inventory (CPU, Memory, RAID Cards, Disks) feature",
|
||||
)
|
||||
p.add_argument(
|
||||
"--process-virtual-drives",
|
||||
action="store_true",
|
||||
help="Process virtual drives information from RAID "
|
||||
"controllers to fill disk custom_fields",
|
||||
)
|
||||
p.add_argument(
|
||||
"--force-disk-refresh",
|
||||
action="store_true",
|
||||
help="Forces disks detection reprocessing",
|
||||
)
|
||||
p.add_argument(
|
||||
"--dump-disks-map", help="File path to dump physical/virtual disks map"
|
||||
)
|
||||
|
||||
options = p.parse_args()
|
||||
return options
|
||||
|
||||
|
||||
config = get_config()
|
||||
|
||||
|
||||
def get_netbox_instance():
|
||||
config = get_config()
|
||||
if config.netbox.url is None or config.netbox.token is None:
|
||||
logging.error('Netbox URL and token are mandatory')
|
||||
logging.error("Netbox URL and token are mandatory")
|
||||
sys.exit(1)
|
||||
|
||||
nb = pynetbox.api(
|
||||
url=get_config().netbox.url,
|
||||
token=get_config().netbox.token,
|
||||
)
|
||||
if get_config().netbox.ssl_verify is False:
|
||||
ca_certs_file = config.netbox.ssl_ca_certs_file
|
||||
if ca_certs_file is not None:
|
||||
session = requests.Session()
|
||||
session.verify = ca_certs_file
|
||||
nb.http_session = session
|
||||
elif config.netbox.ssl_verify is False:
|
||||
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
|
||||
session = requests.Session()
|
||||
session.verify = False
|
||||
|
@ -95,5 +183,4 @@ def get_netbox_instance():
|
|||
return nb
|
||||
|
||||
|
||||
config = get_config()
|
||||
netbox_instance = get_netbox_instance()
|
||||
|
|
|
@ -5,55 +5,57 @@ import sys
|
|||
|
||||
from netbox_agent.misc import is_tool
|
||||
|
||||
_handle_re = _re.compile('^Handle\\s+(.+),\\s+DMI\\s+type\\s+(\\d+),\\s+(\\d+)\\s+bytes$')
|
||||
_in_block_re = _re.compile('^\\t\\t(.+)$')
|
||||
_record_re = _re.compile('\\t(.+):\\s+(.+)$')
|
||||
_record2_re = _re.compile('\\t(.+):$')
|
||||
_handle_re = _re.compile(
|
||||
"^Handle\\s+(.+),\\s+DMI\\s+type\\s+(\\d+),\\s+(\\d+)\\s+bytes$"
|
||||
)
|
||||
_in_block_re = _re.compile("^\\t\\t(.+)$")
|
||||
_record_re = _re.compile("\\t(.+):\\s+(.+)$")
|
||||
_record2_re = _re.compile("\\t(.+):$")
|
||||
|
||||
_type2str = {
|
||||
0: 'BIOS',
|
||||
1: 'System',
|
||||
2: 'Baseboard',
|
||||
3: 'Chassis',
|
||||
4: 'Processor',
|
||||
5: 'Memory Controller',
|
||||
6: 'Memory Module',
|
||||
7: 'Cache',
|
||||
8: 'Port Connector',
|
||||
9: 'System Slots',
|
||||
10: ' On Board Devices',
|
||||
11: ' OEM Strings',
|
||||
12: ' System Configuration Options',
|
||||
13: ' BIOS Language',
|
||||
14: ' Group Associations',
|
||||
15: ' System Event Log',
|
||||
16: ' Physical Memory Array',
|
||||
17: ' Memory Device',
|
||||
18: ' 32-bit Memory Error',
|
||||
19: ' Memory Array Mapped Address',
|
||||
20: ' Memory Device Mapped Address',
|
||||
21: ' Built-in Pointing Device',
|
||||
22: ' Portable Battery',
|
||||
23: ' System Reset',
|
||||
24: ' Hardware Security',
|
||||
25: ' System Power Controls',
|
||||
26: ' Voltage Probe',
|
||||
27: ' Cooling Device',
|
||||
28: ' Temperature Probe',
|
||||
29: ' Electrical Current Probe',
|
||||
30: ' Out-of-band Remote Access',
|
||||
31: ' Boot Integrity Services',
|
||||
32: ' System Boot',
|
||||
33: ' 64-bit Memory Error',
|
||||
34: ' Management Device',
|
||||
35: ' Management Device Component',
|
||||
36: ' Management Device Threshold Data',
|
||||
37: ' Memory Channel',
|
||||
38: ' IPMI Device',
|
||||
39: ' Power Supply',
|
||||
40: ' Additional Information',
|
||||
41: ' Onboard Devices Extended Information',
|
||||
42: ' Management Controller Host Interface'
|
||||
0: "BIOS",
|
||||
1: "System",
|
||||
2: "Baseboard",
|
||||
3: "Chassis",
|
||||
4: "Processor",
|
||||
5: "Memory Controller",
|
||||
6: "Memory Module",
|
||||
7: "Cache",
|
||||
8: "Port Connector",
|
||||
9: "System Slots",
|
||||
10: " On Board Devices",
|
||||
11: " OEM Strings",
|
||||
12: " System Configuration Options",
|
||||
13: " BIOS Language",
|
||||
14: " Group Associations",
|
||||
15: " System Event Log",
|
||||
16: " Physical Memory Array",
|
||||
17: " Memory Device",
|
||||
18: " 32-bit Memory Error",
|
||||
19: " Memory Array Mapped Address",
|
||||
20: " Memory Device Mapped Address",
|
||||
21: " Built-in Pointing Device",
|
||||
22: " Portable Battery",
|
||||
23: " System Reset",
|
||||
24: " Hardware Security",
|
||||
25: " System Power Controls",
|
||||
26: " Voltage Probe",
|
||||
27: " Cooling Device",
|
||||
28: " Temperature Probe",
|
||||
29: " Electrical Current Probe",
|
||||
30: " Out-of-band Remote Access",
|
||||
31: " Boot Integrity Services",
|
||||
32: " System Boot",
|
||||
33: " 64-bit Memory Error",
|
||||
34: " Management Device",
|
||||
35: " Management Device Component",
|
||||
36: " Management Device Threshold Data",
|
||||
37: " Memory Channel",
|
||||
38: " IPMI Device",
|
||||
39: " Power Supply",
|
||||
40: " Additional Information",
|
||||
41: " Onboard Devices Extended Information",
|
||||
42: " Management Controller Host Interface",
|
||||
}
|
||||
_str2type = {}
|
||||
for type_id, type_str in _type2str.items():
|
||||
|
@ -70,7 +72,7 @@ def parse(output=None):
|
|||
else:
|
||||
buffer = _execute_cmd()
|
||||
if isinstance(buffer, bytes):
|
||||
buffer = buffer.decode('utf-8')
|
||||
buffer = buffer.decode("utf-8")
|
||||
_data = _parse(buffer)
|
||||
return _data
|
||||
|
||||
|
@ -129,24 +131,31 @@ def get_by_type(data, type_id):
|
|||
|
||||
result = []
|
||||
for entry in data.values():
|
||||
if entry['DMIType'] == type_id:
|
||||
if entry["DMIType"] == type_id:
|
||||
result.append(entry)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def _execute_cmd():
|
||||
if not is_tool('dmidecode'):
|
||||
logging.error('Dmidecode does not seem to be present on your system. Add it your path or '
|
||||
'check the compatibility of this project with your distro.')
|
||||
if not is_tool("dmidecode"):
|
||||
logging.error(
|
||||
"Dmidecode does not seem to be present on your system. Add it your path or "
|
||||
"check the compatibility of this project with your distro."
|
||||
)
|
||||
sys.exit(1)
|
||||
return _subprocess.check_output(['dmidecode', ], stderr=_subprocess.PIPE)
|
||||
return _subprocess.check_output(
|
||||
[
|
||||
"dmidecode",
|
||||
],
|
||||
stderr=_subprocess.PIPE,
|
||||
)
|
||||
|
||||
|
||||
def _parse(buffer):
|
||||
output_data = {}
|
||||
# Each record is separated by double newlines
|
||||
split_output = buffer.split('\n\n')
|
||||
split_output = buffer.split("\n\n")
|
||||
|
||||
for record in split_output:
|
||||
record_element = record.splitlines()
|
||||
|
@ -164,21 +173,21 @@ def _parse(buffer):
|
|||
dmi_handle = handle_data[0]
|
||||
|
||||
output_data[dmi_handle] = {}
|
||||
output_data[dmi_handle]['DMIType'] = int(handle_data[1])
|
||||
output_data[dmi_handle]['DMISize'] = int(handle_data[2])
|
||||
output_data[dmi_handle]["DMIType"] = int(handle_data[1])
|
||||
output_data[dmi_handle]["DMISize"] = int(handle_data[2])
|
||||
|
||||
# Okay, we know 2nd line == name
|
||||
output_data[dmi_handle]['DMIName'] = record_element[1]
|
||||
output_data[dmi_handle]["DMIName"] = record_element[1]
|
||||
|
||||
in_block_elemet = ''
|
||||
in_block_list = ''
|
||||
in_block_elemet = ""
|
||||
in_block_list = ""
|
||||
|
||||
# Loop over the rest of the record, gathering values
|
||||
for i in range(2, len(record_element), 1):
|
||||
if i >= len(record_element):
|
||||
break
|
||||
# Check whether we are inside a \t\t block
|
||||
if in_block_elemet != '':
|
||||
if in_block_elemet != "":
|
||||
in_block_data = _in_block_re.findall(record_element[i])
|
||||
|
||||
if in_block_data:
|
||||
|
@ -192,7 +201,7 @@ def _parse(buffer):
|
|||
else:
|
||||
# We are out of the \t\t block; reset it again, and let
|
||||
# the parsing continue
|
||||
in_block_elemet = ''
|
||||
in_block_elemet = ""
|
||||
|
||||
record_data = _record_re.findall(record_element[i])
|
||||
|
||||
|
@ -208,7 +217,7 @@ def _parse(buffer):
|
|||
# This is an array of data - let the loop know we are inside
|
||||
# an array block
|
||||
in_block_elemet = record_data2[0]
|
||||
in_block_list = ''
|
||||
in_block_list = ""
|
||||
|
||||
continue
|
||||
|
||||
|
|
|
@ -2,7 +2,7 @@ import re
|
|||
|
||||
|
||||
def get(value, regex):
|
||||
for line in open(value, 'r'):
|
||||
for line in open(value, "r"):
|
||||
r = re.search(regex, line)
|
||||
if r and len(r.groups()) > 0:
|
||||
return r.groups()[0]
|
||||
|
|
|
@ -6,16 +6,16 @@ from shutil import which
|
|||
|
||||
# mapping fields from ethtool output to simple names
|
||||
field_map = {
|
||||
'Supported ports': 'ports',
|
||||
'Supported link modes': 'sup_link_modes',
|
||||
'Supports auto-negotiation': 'sup_autoneg',
|
||||
'Advertised link modes': 'adv_link_modes',
|
||||
'Advertised auto-negotiation': 'adv_autoneg',
|
||||
'Speed': 'speed',
|
||||
'Duplex': 'duplex',
|
||||
'Port': 'port',
|
||||
'Auto-negotiation': 'autoneg',
|
||||
'Link detected': 'link',
|
||||
"Supported ports": "ports",
|
||||
"Supported link modes": "sup_link_modes",
|
||||
"Supports auto-negotiation": "sup_autoneg",
|
||||
"Advertised link modes": "adv_link_modes",
|
||||
"Advertised auto-negotiation": "adv_autoneg",
|
||||
"Speed": "speed",
|
||||
"Duplex": "duplex",
|
||||
"Port": "port",
|
||||
"Auto-negotiation": "autoneg",
|
||||
"Link detected": "link",
|
||||
}
|
||||
|
||||
|
||||
|
@ -25,7 +25,7 @@ def merge_two_dicts(x, y):
|
|||
return z
|
||||
|
||||
|
||||
class Ethtool():
|
||||
class Ethtool:
|
||||
"""
|
||||
This class aims to parse ethtool output
|
||||
There is several bindings to have something proper, but it requires
|
||||
|
@ -40,39 +40,40 @@ class Ethtool():
|
|||
parse ethtool output
|
||||
"""
|
||||
|
||||
output = subprocess.getoutput('ethtool {}'.format(self.interface))
|
||||
output = subprocess.getoutput("ethtool {}".format(self.interface))
|
||||
|
||||
fields = {}
|
||||
field = ''
|
||||
fields['speed'] = '-'
|
||||
fields['link'] = '-'
|
||||
fields['duplex'] = '-'
|
||||
for line in output.split('\n')[1:]:
|
||||
field = ""
|
||||
fields["speed"] = "-"
|
||||
fields["link"] = "-"
|
||||
fields["duplex"] = "-"
|
||||
for line in output.split("\n")[1:]:
|
||||
line = line.rstrip()
|
||||
r = line.find(':')
|
||||
r = line.find(":")
|
||||
if r > 0:
|
||||
field = line[:r].strip()
|
||||
if field not in field_map:
|
||||
continue
|
||||
field = field_map[field]
|
||||
output = line[r + 1:].strip()
|
||||
output = line[r + 1 :].strip()
|
||||
fields[field] = output
|
||||
else:
|
||||
if len(field) > 0 and \
|
||||
field in field_map:
|
||||
fields[field] += ' ' + line.strip()
|
||||
if len(field) > 0 and field in field_map:
|
||||
fields[field] += " " + line.strip()
|
||||
return fields
|
||||
|
||||
def _parse_ethtool_module_output(self):
|
||||
status, output = subprocess.getstatusoutput('ethtool -m {}'.format(self.interface))
|
||||
status, output = subprocess.getstatusoutput(
|
||||
"ethtool -m {}".format(self.interface)
|
||||
)
|
||||
if status == 0:
|
||||
r = re.search(r'Identifier.*\((\w+)\)', output)
|
||||
r = re.search(r"Identifier.*\((\w+)\)", output)
|
||||
if r and len(r.groups()) > 0:
|
||||
return {'form_factor': r.groups()[0]}
|
||||
return {"form_factor": r.groups()[0]}
|
||||
return {}
|
||||
|
||||
def parse(self):
|
||||
if which('ethtool') is None:
|
||||
if which("ethtool") is None:
|
||||
return None
|
||||
output = self._parse_ethtool_output()
|
||||
output.update(self._parse_ethtool_module_output())
|
||||
|
|
|
@ -1,5 +1,8 @@
|
|||
import json
|
||||
import logging
|
||||
import re
|
||||
import sys
|
||||
import traceback
|
||||
|
||||
import pynetbox
|
||||
|
||||
|
@ -12,16 +15,17 @@ from netbox_agent.raid.omreport import OmreportRaid
|
|||
from netbox_agent.raid.storcli import StorcliRaid
|
||||
|
||||
INVENTORY_TAG = {
|
||||
'cpu': {'name': 'hw:cpu', 'slug': 'hw-cpu'},
|
||||
'disk': {'name': 'hw:disk', 'slug': 'hw-disk'},
|
||||
'interface': {'name': 'hw:interface', 'slug': 'hw-interface'},
|
||||
'memory': {'name': 'hw:memory', 'slug': 'hw-memory'},
|
||||
'motherboard': {'name': 'hw:motherboard', 'slug': 'hw-motherboard'},
|
||||
'raid_card': {'name': 'hw:raid_card', 'slug': 'hw-raid-card'},
|
||||
"cpu": {"name": "hw:cpu", "slug": "hw-cpu"},
|
||||
"gpu": {"name": "hw:gpu", "slug": "hw-gpu"},
|
||||
"disk": {"name": "hw:disk", "slug": "hw-disk"},
|
||||
"interface": {"name": "hw:interface", "slug": "hw-interface"},
|
||||
"memory": {"name": "hw:memory", "slug": "hw-memory"},
|
||||
"motherboard": {"name": "hw:motherboard", "slug": "hw-motherboard"},
|
||||
"raid_card": {"name": "hw:raid_card", "slug": "hw-raid-card"},
|
||||
}
|
||||
|
||||
|
||||
class Inventory():
|
||||
class Inventory:
|
||||
"""
|
||||
Better Inventory items coming, see:
|
||||
- https://github.com/netbox-community/netbox/issues/3087
|
||||
|
@ -32,6 +36,7 @@ class Inventory():
|
|||
* cpu
|
||||
* raid cards
|
||||
* disks
|
||||
* gpus
|
||||
|
||||
methods that:
|
||||
* get local item
|
||||
|
@ -44,10 +49,11 @@ class Inventory():
|
|||
- no scan of NVMe devices
|
||||
"""
|
||||
|
||||
def __init__(self, server):
|
||||
def __init__(self, server, update_expansion=False):
|
||||
self.create_netbox_tags()
|
||||
self.server = server
|
||||
netbox_server = self.server.get_netbox_server()
|
||||
self.update_expansion = update_expansion
|
||||
netbox_server = self.server.get_netbox_server(update_expansion)
|
||||
|
||||
self.device_id = netbox_server.id if netbox_server else None
|
||||
self.raid = None
|
||||
|
@ -56,16 +62,17 @@ class Inventory():
|
|||
self.lshw = LSHW()
|
||||
|
||||
def create_netbox_tags(self):
|
||||
ret = []
|
||||
for key, tag in INVENTORY_TAG.items():
|
||||
nb_tag = nb.extras.tags.get(
|
||||
name=tag['name']
|
||||
)
|
||||
nb_tag = nb.extras.tags.get(name=tag["name"])
|
||||
if not nb_tag:
|
||||
nb_tag = nb.extras.tags.create(
|
||||
name=tag['name'],
|
||||
slug=tag['slug'],
|
||||
comments=tag['name'],
|
||||
name=tag["name"],
|
||||
slug=tag["slug"],
|
||||
comments=tag["name"],
|
||||
)
|
||||
ret.append(nb_tag)
|
||||
return ret
|
||||
|
||||
def find_or_create_manufacturer(self, name):
|
||||
if name is None:
|
||||
|
@ -75,29 +82,28 @@ class Inventory():
|
|||
name=name,
|
||||
)
|
||||
if not manufacturer:
|
||||
logging.info('Creating missing manufacturer {name}'.format(name=name))
|
||||
logging.info("Creating missing manufacturer {name}".format(name=name))
|
||||
manufacturer = nb.dcim.manufacturers.create(
|
||||
name=name,
|
||||
slug=re.sub('[^A-Za-z0-9]+', '-', name).lower(),
|
||||
slug=re.sub("[^A-Za-z0-9]+", "-", name).lower(),
|
||||
)
|
||||
|
||||
logging.info('Creating missing manufacturer {name}'.format(name=name))
|
||||
logging.info("Creating missing manufacturer {name}".format(name=name))
|
||||
|
||||
return manufacturer
|
||||
|
||||
def get_netbox_inventory(self, device_id, tag):
|
||||
try:
|
||||
items = nb.dcim.inventory_items.filter(
|
||||
device_id=device_id,
|
||||
tag=tag
|
||||
)
|
||||
items = nb.dcim.inventory_items.filter(device_id=device_id, tag=tag)
|
||||
except pynetbox.core.query.RequestError:
|
||||
logging.info('Tag {tag} is missing, returning empty array.'.format(tag=tag))
|
||||
logging.info("Tag {tag} is missing, returning empty array.".format(tag=tag))
|
||||
items = []
|
||||
|
||||
return items
|
||||
return list(items)
|
||||
|
||||
def create_netbox_inventory_item(self, device_id, tags, vendor, name, serial, description):
|
||||
def create_netbox_inventory_item(
|
||||
self, device_id, tags, vendor, name, serial, description
|
||||
):
|
||||
manufacturer = self.find_or_create_manufacturer(vendor)
|
||||
|
||||
_ = nb.dcim.inventory_items.create(
|
||||
|
@ -105,26 +111,25 @@ class Inventory():
|
|||
manufacturer=manufacturer.id,
|
||||
discovered=True,
|
||||
tags=tags,
|
||||
name='{}'.format(name),
|
||||
serial='{}'.format(serial),
|
||||
description=description
|
||||
name="{}".format(name),
|
||||
serial="{}".format(serial),
|
||||
description=description,
|
||||
)
|
||||
|
||||
logging.info('Creating inventory item {} {}/{} {} '.format(
|
||||
vendor,
|
||||
name,
|
||||
serial,
|
||||
description)
|
||||
logging.info(
|
||||
"Creating inventory item {} {}/{} {} ".format(
|
||||
vendor, name, serial, description
|
||||
)
|
||||
)
|
||||
|
||||
def get_hw_motherboards(self):
|
||||
motherboards = []
|
||||
|
||||
m = {}
|
||||
m['serial'] = self.lshw.motherboard_serial
|
||||
m['vendor'] = self.lshw.vendor
|
||||
m['name'] = '{} {}'.format(self.lshw.vendor, self.lshw.motherboard)
|
||||
m['description'] = '{} Motherboard'.format(self.lshw.motherboard)
|
||||
m["serial"] = self.lshw.motherboard_serial
|
||||
m["vendor"] = self.lshw.vendor
|
||||
m["name"] = "{} {}".format(self.lshw.vendor, self.lshw.motherboard)
|
||||
m["description"] = "{} Motherboard".format(self.lshw.motherboard)
|
||||
|
||||
motherboards.append(m)
|
||||
|
||||
|
@ -134,27 +139,29 @@ class Inventory():
|
|||
|
||||
motherboards = self.get_hw_motherboards()
|
||||
nb_motherboards = self.get_netbox_inventory(
|
||||
device_id=self.device_id,
|
||||
tag=INVENTORY_TAG['motherboard']['slug'])
|
||||
device_id=self.device_id, tag=INVENTORY_TAG["motherboard"]["slug"]
|
||||
)
|
||||
|
||||
for nb_motherboard in nb_motherboards:
|
||||
if nb_motherboard.serial not in [x['serial'] for x in motherboards]:
|
||||
logging.info('Deleting unknown motherboard {motherboard}/{serial}'.format(
|
||||
motherboard=self.lshw.motherboard,
|
||||
serial=nb_motherboard.serial,
|
||||
))
|
||||
if nb_motherboard.serial not in [x["serial"] for x in motherboards]:
|
||||
logging.info(
|
||||
"Deleting unknown motherboard {motherboard}/{serial}".format(
|
||||
motherboard=self.lshw.motherboard,
|
||||
serial=nb_motherboard.serial,
|
||||
)
|
||||
)
|
||||
nb_motherboard.delete()
|
||||
|
||||
# create interfaces that are not in netbox
|
||||
for motherboard in motherboards:
|
||||
if motherboard.get('serial') not in [x.serial for x in nb_motherboards]:
|
||||
if motherboard.get("serial") not in [x.serial for x in nb_motherboards]:
|
||||
self.create_netbox_inventory_item(
|
||||
device_id=self.device_id,
|
||||
tags=[INVENTORY_TAG['motherboard']['name']],
|
||||
vendor='{}'.format(motherboard.get('vendor', 'N/A')),
|
||||
serial='{}'.format(motherboard.get('serial', 'No SN')),
|
||||
name='{}'.format(motherboard.get('name')),
|
||||
description='{}'.format(motherboard.get('description'))
|
||||
tags=[{"name": INVENTORY_TAG["motherboard"]["name"]}],
|
||||
vendor="{}".format(motherboard.get("vendor", "N/A")),
|
||||
serial="{}".format(motherboard.get("serial", "No SN")),
|
||||
name="{}".format(motherboard.get("name")),
|
||||
description="{}".format(motherboard.get("description")),
|
||||
)
|
||||
|
||||
def create_netbox_interface(self, iface):
|
||||
|
@ -163,84 +170,93 @@ class Inventory():
|
|||
device=self.device_id,
|
||||
manufacturer=manufacturer.id,
|
||||
discovered=True,
|
||||
tags=[INVENTORY_TAG['interface']['name']],
|
||||
name="{}".format(iface['product']),
|
||||
serial='{}'.format(iface['serial']),
|
||||
description='{} {}'.format(iface['description'], iface['name'])
|
||||
tags=[{"name": INVENTORY_TAG["interface"]["name"]}],
|
||||
name="{}".format(iface["product"]),
|
||||
serial="{}".format(iface["serial"]),
|
||||
description="{} {}".format(iface["description"], iface["name"]),
|
||||
)
|
||||
|
||||
def do_netbox_interfaces(self):
|
||||
nb_interfaces = self.get_netbox_inventory(
|
||||
device_id=self.device_id,
|
||||
tag=INVENTORY_TAG['interface']['slug'])
|
||||
device_id=self.device_id, tag=INVENTORY_TAG["interface"]["slug"]
|
||||
)
|
||||
interfaces = self.lshw.interfaces
|
||||
|
||||
# delete interfaces that are in netbox but not locally
|
||||
# use the serial_number has the comparison element
|
||||
for nb_interface in nb_interfaces:
|
||||
if nb_interface.serial not in [x['serial'] for x in interfaces]:
|
||||
logging.info('Deleting unknown interface {serial}'.format(
|
||||
serial=nb_interface.serial,
|
||||
))
|
||||
if nb_interface.serial not in [x["serial"] for x in interfaces]:
|
||||
logging.info(
|
||||
"Deleting unknown interface {serial}".format(
|
||||
serial=nb_interface.serial,
|
||||
)
|
||||
)
|
||||
nb_interface.delete()
|
||||
|
||||
# create interfaces that are not in netbox
|
||||
for iface in interfaces:
|
||||
if iface.get('serial') not in [x.serial for x in nb_interfaces]:
|
||||
if iface.get("serial") not in [x.serial for x in nb_interfaces]:
|
||||
self.create_netbox_interface(iface)
|
||||
|
||||
def create_netbox_cpus(self):
|
||||
for cpu in self.lshw.get_hw_linux('cpu'):
|
||||
for cpu in self.lshw.get_hw_linux("cpu"):
|
||||
manufacturer = self.find_or_create_manufacturer(cpu["vendor"])
|
||||
_ = nb.dcim.inventory_items.create(
|
||||
device=self.device_id,
|
||||
manufacturer=manufacturer.id,
|
||||
discovered=True,
|
||||
tags=[INVENTORY_TAG['cpu']['name']],
|
||||
name=cpu['product'],
|
||||
description='CPU {}'.format(cpu['location']),
|
||||
tags=[{"name": INVENTORY_TAG["cpu"]["name"]}],
|
||||
name=cpu["product"],
|
||||
description="CPU {}".format(cpu["location"]),
|
||||
# asset_tag=cpu['location']
|
||||
)
|
||||
|
||||
logging.info('Creating CPU model {}'.format(cpu['product']))
|
||||
logging.info("Creating CPU model {}".format(cpu["product"]))
|
||||
|
||||
def do_netbox_cpus(self):
|
||||
cpus = self.lshw.get_hw_linux('cpu')
|
||||
cpus = self.lshw.get_hw_linux("cpu")
|
||||
nb_cpus = self.get_netbox_inventory(
|
||||
device_id=self.device_id,
|
||||
tag=INVENTORY_TAG['cpu']['slug'],
|
||||
tag=INVENTORY_TAG["cpu"]["slug"],
|
||||
)
|
||||
|
||||
if not len(nb_cpus) or \
|
||||
len(nb_cpus) and len(cpus) != len(nb_cpus):
|
||||
if not len(nb_cpus) or len(nb_cpus) and len(cpus) != len(nb_cpus):
|
||||
for x in nb_cpus:
|
||||
x.delete()
|
||||
|
||||
self.create_netbox_cpus()
|
||||
|
||||
def get_raid_cards(self):
|
||||
def get_raid_cards(self, filter_cards=False):
|
||||
raid_class = None
|
||||
if self.server.manufacturer == 'Dell':
|
||||
if is_tool('omreport'):
|
||||
if self.server.manufacturer in ("Dell", "Huawei"):
|
||||
if is_tool("omreport"):
|
||||
raid_class = OmreportRaid
|
||||
if is_tool('storcli'):
|
||||
if is_tool("storcli"):
|
||||
raid_class = StorcliRaid
|
||||
elif self.server.manufacturer == 'HP':
|
||||
if is_tool('ssacli'):
|
||||
elif self.server.manufacturer in ("HP", "HPE"):
|
||||
if is_tool("ssacli"):
|
||||
raid_class = HPRaid
|
||||
|
||||
if not raid_class:
|
||||
return []
|
||||
|
||||
self.raid = raid_class()
|
||||
controllers = self.raid.get_controllers()
|
||||
if len(self.raid.get_controllers()):
|
||||
return controllers
|
||||
|
||||
if (
|
||||
filter_cards
|
||||
and config.expansion_as_device
|
||||
and self.server.own_expansion_slot()
|
||||
):
|
||||
return [
|
||||
c
|
||||
for c in self.raid.get_controllers()
|
||||
if c.is_external() is self.update_expansion
|
||||
]
|
||||
else:
|
||||
return self.raid.get_controllers()
|
||||
|
||||
def create_netbox_raid_card(self, raid_card):
|
||||
manufacturer = self.find_or_create_manufacturer(
|
||||
raid_card.get_manufacturer()
|
||||
)
|
||||
manufacturer = self.find_or_create_manufacturer(raid_card.get_manufacturer())
|
||||
|
||||
name = raid_card.get_product_name()
|
||||
serial = raid_card.get_serial_number()
|
||||
|
@ -248,15 +264,17 @@ class Inventory():
|
|||
device=self.device_id,
|
||||
discovered=True,
|
||||
manufacturer=manufacturer.id if manufacturer else None,
|
||||
tags=[INVENTORY_TAG['raid_card']['name']],
|
||||
name='{}'.format(name),
|
||||
serial='{}'.format(serial),
|
||||
description='RAID Card',
|
||||
tags=[{"name": INVENTORY_TAG["raid_card"]["name"]}],
|
||||
name="{}".format(name),
|
||||
serial="{}".format(serial),
|
||||
description="RAID Card",
|
||||
)
|
||||
logging.info(
|
||||
"Creating RAID Card {name} (SN: {serial})".format(
|
||||
name=name,
|
||||
serial=serial,
|
||||
)
|
||||
)
|
||||
logging.info('Creating RAID Card {name} (SN: {serial})'.format(
|
||||
name=name,
|
||||
serial=serial,
|
||||
))
|
||||
return nb_raid_card
|
||||
|
||||
def do_netbox_raid_cards(self):
|
||||
|
@ -271,18 +289,19 @@ class Inventory():
|
|||
"""
|
||||
|
||||
nb_raid_cards = self.get_netbox_inventory(
|
||||
device_id=self.device_id,
|
||||
tag=[INVENTORY_TAG['raid_card']['slug']]
|
||||
device_id=self.device_id, tag=[INVENTORY_TAG["raid_card"]["slug"]]
|
||||
)
|
||||
raid_cards = self.get_raid_cards()
|
||||
raid_cards = self.get_raid_cards(filter_cards=True)
|
||||
|
||||
# delete cards that are in netbox but not locally
|
||||
# use the serial_number has the comparison element
|
||||
for nb_raid_card in nb_raid_cards:
|
||||
if nb_raid_card.serial not in [x.get_serial_number() for x in raid_cards]:
|
||||
logging.info('Deleting unknown locally RAID Card {serial}'.format(
|
||||
serial=nb_raid_card.serial,
|
||||
))
|
||||
logging.info(
|
||||
"Deleting unknown locally RAID Card {serial}".format(
|
||||
serial=nb_raid_card.serial,
|
||||
)
|
||||
)
|
||||
nb_raid_card.delete()
|
||||
|
||||
# create card that are not in netbox
|
||||
|
@ -290,56 +309,70 @@ class Inventory():
|
|||
if raid_card.get_serial_number() not in [x.serial for x in nb_raid_cards]:
|
||||
self.create_netbox_raid_card(raid_card)
|
||||
|
||||
def is_virtual_disk(self, disk):
|
||||
logicalname = disk.get('logicalname')
|
||||
description = disk.get('description')
|
||||
size = disk.get('size')
|
||||
product = disk.get('product')
|
||||
|
||||
def is_virtual_disk(self, disk, raid_devices):
|
||||
disk_type = disk.get("type")
|
||||
logicalname = disk.get("logicalname")
|
||||
description = disk.get("description")
|
||||
size = disk.get("size")
|
||||
product = disk.get("product")
|
||||
if (
|
||||
logicalname in raid_devices
|
||||
or disk_type is None
|
||||
or product is None
|
||||
or description is None
|
||||
):
|
||||
return True
|
||||
non_raid_disks = [
|
||||
'MR9361-8i',
|
||||
"MR9361-8i",
|
||||
]
|
||||
|
||||
if size is None and logicalname is None or \
|
||||
'virtual' in product.lower() or 'logical' in product.lower() or \
|
||||
product in non_raid_disks or \
|
||||
description == 'SCSI Enclosure' or \
|
||||
'volume' in description.lower():
|
||||
if (
|
||||
logicalname in raid_devices
|
||||
or product in non_raid_disks
|
||||
or "virtual" in product.lower()
|
||||
or "logical" in product.lower()
|
||||
or "volume" in description.lower()
|
||||
or "dvd-ram" in description.lower()
|
||||
or description == "SCSI Enclosure"
|
||||
or (size is None and logicalname is None)
|
||||
):
|
||||
return True
|
||||
return False
|
||||
|
||||
def get_hw_disks(self):
|
||||
disks = []
|
||||
|
||||
for raid_card in self.get_raid_cards(filter_cards=True):
|
||||
disks.extend(raid_card.get_physical_disks())
|
||||
|
||||
raid_devices = [
|
||||
d.get("custom_fields", {}).get("vd_device")
|
||||
for d in disks
|
||||
if d.get("custom_fields", {}).get("vd_device")
|
||||
]
|
||||
|
||||
for disk in self.lshw.get_hw_linux("storage"):
|
||||
if self.is_virtual_disk(disk):
|
||||
if self.is_virtual_disk(disk, raid_devices):
|
||||
continue
|
||||
|
||||
logicalname = disk.get('logicalname')
|
||||
description = disk.get('description')
|
||||
size = disk.get('size', 0)
|
||||
product = disk.get('product')
|
||||
serial = disk.get('serial')
|
||||
|
||||
d = {}
|
||||
d["name"] = ""
|
||||
d['Size'] = '{} GB'.format(int(size / 1024 / 1024 / 1024))
|
||||
d['logicalname'] = logicalname
|
||||
d['description'] = description
|
||||
d['SN'] = serial
|
||||
d['Model'] = product
|
||||
if disk.get('vendor'):
|
||||
d['Vendor'] = disk['vendor']
|
||||
size = round(int(disk.get("size", 0)) / 1073741824, 1)
|
||||
d = {
|
||||
"name": "",
|
||||
"Size": "{} GB".format(size),
|
||||
"logicalname": disk.get("logicalname"),
|
||||
"description": disk.get("description"),
|
||||
"SN": disk.get("serial"),
|
||||
"Model": disk.get("product"),
|
||||
"Type": disk.get("type"),
|
||||
}
|
||||
if disk.get("vendor"):
|
||||
d["Vendor"] = disk["vendor"]
|
||||
else:
|
||||
d['Vendor'] = get_vendor(disk['product'])
|
||||
d["Vendor"] = get_vendor(disk["product"])
|
||||
disks.append(d)
|
||||
|
||||
for raid_card in self.get_raid_cards():
|
||||
disks += raid_card.get_physical_disks()
|
||||
|
||||
# remove duplicate serials
|
||||
seen = set()
|
||||
uniq = [x for x in disks if x['SN'] not in seen and not seen.add(x['SN'])]
|
||||
uniq = [x for x in disks if x["SN"] not in seen and not seen.add(x["SN"])]
|
||||
return uniq
|
||||
|
||||
def create_netbox_disk(self, disk):
|
||||
|
@ -347,104 +380,184 @@ class Inventory():
|
|||
if "Vendor" in disk:
|
||||
manufacturer = self.find_or_create_manufacturer(disk["Vendor"])
|
||||
|
||||
logicalname = disk.get('logicalname')
|
||||
desc = disk.get('description')
|
||||
# nonraid disk
|
||||
if logicalname and desc:
|
||||
if type(logicalname) is list:
|
||||
logicalname = logicalname[0]
|
||||
name = '{} - {} ({})'.format(
|
||||
desc,
|
||||
logicalname,
|
||||
disk.get('Size', 0))
|
||||
description = 'Device {}'.format(disk.get('logicalname', 'Unknown'))
|
||||
else:
|
||||
name = '{} ({})'.format(disk['Model'], disk['Size'])
|
||||
description = '{}'.format(disk['Type'])
|
||||
name = "{} ({})".format(disk["Model"], disk["Size"])
|
||||
description = disk["Type"]
|
||||
sn = disk.get("SN", "unknown")
|
||||
|
||||
_ = nb.dcim.inventory_items.create(
|
||||
device=self.device_id,
|
||||
discovered=True,
|
||||
tags=[INVENTORY_TAG['disk']['name']],
|
||||
name=name,
|
||||
serial=disk['SN'],
|
||||
part_id=disk['Model'],
|
||||
description=description,
|
||||
manufacturer=manufacturer.id if manufacturer else None
|
||||
parms = {
|
||||
"device": self.device_id,
|
||||
"discovered": True,
|
||||
"tags": [{"name": INVENTORY_TAG["disk"]["name"]}],
|
||||
"name": name,
|
||||
"serial": sn,
|
||||
"part_id": disk["Model"],
|
||||
"description": description,
|
||||
"manufacturer": getattr(manufacturer, "id", None),
|
||||
}
|
||||
if config.process_virtual_drives:
|
||||
parms["custom_fields"] = disk.get("custom_fields", {})
|
||||
|
||||
_ = nb.dcim.inventory_items.create(**parms)
|
||||
|
||||
logging.info(
|
||||
"Creating Disk {model} {serial}".format(
|
||||
model=disk["Model"],
|
||||
serial=sn,
|
||||
)
|
||||
)
|
||||
|
||||
logging.info('Creating Disk {model} {serial}'.format(
|
||||
model=disk['Model'],
|
||||
serial=disk['SN'],
|
||||
))
|
||||
def dump_disks_map(self, disks):
|
||||
disk_map = [d["custom_fields"] for d in disks if "custom_fields" in d]
|
||||
if config.dump_disks_map == "-":
|
||||
f = sys.stdout
|
||||
else:
|
||||
f = open(config.dump_disks_map, "w")
|
||||
f.write(json.dumps(disk_map, separators=(",", ":"), indent=4, sort_keys=True))
|
||||
if config.dump_disks_map != "-":
|
||||
f.close()
|
||||
|
||||
def do_netbox_disks(self):
|
||||
nb_disks = self.get_netbox_inventory(
|
||||
device_id=self.device_id,
|
||||
tag=INVENTORY_TAG['disk']['slug'])
|
||||
device_id=self.device_id, tag=INVENTORY_TAG["disk"]["slug"]
|
||||
)
|
||||
disks = self.get_hw_disks()
|
||||
if config.dump_disks_map:
|
||||
try:
|
||||
self.dump_disks_map(disks)
|
||||
except Exception as e:
|
||||
logging.error("Failed to dump disks map: {}".format(e))
|
||||
logging.debug(traceback.format_exc())
|
||||
disk_serials = [d["SN"] for d in disks if "SN" in d]
|
||||
|
||||
# delete disks that are in netbox but not locally
|
||||
# use the serial_number has the comparison element
|
||||
for nb_disk in nb_disks:
|
||||
if nb_disk.serial not in [x['SN'] for x in disks if x.get('SN')]:
|
||||
logging.info('Deleting unknown locally Disk {serial}'.format(
|
||||
serial=nb_disk.serial,
|
||||
))
|
||||
if nb_disk.serial not in disk_serials or config.force_disk_refresh:
|
||||
logging.info(
|
||||
"Deleting unknown locally Disk {serial}".format(
|
||||
serial=nb_disk.serial,
|
||||
)
|
||||
)
|
||||
nb_disk.delete()
|
||||
|
||||
if config.force_disk_refresh:
|
||||
nb_disks = self.get_netbox_inventory(
|
||||
device_id=self.device_id, tag=INVENTORY_TAG["disk"]["slug"]
|
||||
)
|
||||
|
||||
# create disks that are not in netbox
|
||||
for disk in disks:
|
||||
if disk.get('SN') not in [x.serial for x in nb_disks]:
|
||||
if disk.get("SN") not in [d.serial for d in nb_disks]:
|
||||
self.create_netbox_disk(disk)
|
||||
|
||||
def create_netbox_memory(self, memory):
|
||||
manufacturer = self.find_or_create_manufacturer(memory['vendor'])
|
||||
name = 'Slot {} ({}GB)'.format(memory['slot'], memory['size'])
|
||||
manufacturer = self.find_or_create_manufacturer(memory["vendor"])
|
||||
name = "Slot {} ({}GB)".format(memory["slot"], memory["size"])
|
||||
nb_memory = nb.dcim.inventory_items.create(
|
||||
device=self.device_id,
|
||||
discovered=True,
|
||||
manufacturer=manufacturer.id,
|
||||
tags=[INVENTORY_TAG['memory']['name']],
|
||||
tags=[{"name": INVENTORY_TAG["memory"]["name"]}],
|
||||
name=name,
|
||||
part_id=memory['product'],
|
||||
serial=memory['serial'],
|
||||
description=memory['description'],
|
||||
part_id=memory["product"],
|
||||
serial=memory["serial"],
|
||||
description=memory["description"],
|
||||
)
|
||||
|
||||
logging.info('Creating Memory {location} {type} {size}GB'.format(
|
||||
location=memory['slot'],
|
||||
type=memory['product'],
|
||||
size=memory['size'],
|
||||
))
|
||||
logging.info(
|
||||
"Creating Memory {location} {type} {size}GB".format(
|
||||
location=memory["slot"],
|
||||
type=memory["product"],
|
||||
size=memory["size"],
|
||||
)
|
||||
)
|
||||
|
||||
return nb_memory
|
||||
|
||||
def do_netbox_memories(self):
|
||||
memories = self.lshw.memories
|
||||
nb_memories = self.get_netbox_inventory(
|
||||
device_id=self.device_id,
|
||||
tag=INVENTORY_TAG['memory']['slug']
|
||||
device_id=self.device_id, tag=INVENTORY_TAG["memory"]["slug"]
|
||||
)
|
||||
|
||||
for nb_memory in nb_memories:
|
||||
if nb_memory.serial not in [x['serial'] for x in memories]:
|
||||
logging.info('Deleting unknown locally Memory {serial}'.format(
|
||||
serial=nb_memory.serial,
|
||||
))
|
||||
if nb_memory.serial not in [x["serial"] for x in memories]:
|
||||
logging.info(
|
||||
"Deleting unknown locally Memory {serial}".format(
|
||||
serial=nb_memory.serial,
|
||||
)
|
||||
)
|
||||
nb_memory.delete()
|
||||
|
||||
for memory in memories:
|
||||
if memory.get('serial') not in [x.serial for x in nb_memories]:
|
||||
if memory.get("serial") not in [x.serial for x in nb_memories]:
|
||||
self.create_netbox_memory(memory)
|
||||
|
||||
def create_netbox_gpus(self, gpus):
|
||||
for gpu in gpus:
|
||||
if "product" in gpu and len(gpu["product"]) > 50:
|
||||
gpu["product"] = gpu["product"][:48] + ".."
|
||||
|
||||
manufacturer = self.find_or_create_manufacturer(gpu["vendor"])
|
||||
_ = nb.dcim.inventory_items.create(
|
||||
device=self.device_id,
|
||||
manufacturer=manufacturer.id,
|
||||
discovered=True,
|
||||
tags=[{"name": INVENTORY_TAG["gpu"]["name"]}],
|
||||
name=gpu["product"],
|
||||
description=gpu["description"],
|
||||
)
|
||||
|
||||
logging.info("Creating GPU model {}".format(gpu["product"]))
|
||||
|
||||
def is_external_gpu(self, gpu):
|
||||
is_3d_gpu = gpu["description"].startswith("3D")
|
||||
return (
|
||||
self.server.is_blade()
|
||||
and self.server.own_gpu_expansion_slot()
|
||||
and is_3d_gpu
|
||||
)
|
||||
|
||||
def do_netbox_gpus(self):
|
||||
gpus = []
|
||||
gpu_models = {}
|
||||
for gpu in self.lshw.get_hw_linux("gpu"):
|
||||
# Filters GPU if an expansion bay is detected:
|
||||
# The internal (VGA) GPU only goes into the blade inventory,
|
||||
# the external (3D) GPU goes into the expansion blade.
|
||||
if (
|
||||
config.expansion_as_device
|
||||
and self.update_expansion ^ self.is_external_gpu(gpu)
|
||||
):
|
||||
continue
|
||||
gpus.append(gpu)
|
||||
gpu_models.setdefault(gpu["product"], 0)
|
||||
gpu_models[gpu["product"]] += 1
|
||||
|
||||
nb_gpus = self.get_netbox_inventory(
|
||||
device_id=self.device_id,
|
||||
tag=INVENTORY_TAG["gpu"]["slug"],
|
||||
)
|
||||
nb_gpu_models = {}
|
||||
for gpu in nb_gpus:
|
||||
nb_gpu_models.setdefault(str(gpu), 0)
|
||||
nb_gpu_models[str(gpu)] += 1
|
||||
up_to_date = set(gpu_models) == set(nb_gpu_models)
|
||||
if not gpus or not up_to_date:
|
||||
for x in nb_gpus:
|
||||
x.delete()
|
||||
if gpus and not up_to_date:
|
||||
self.create_netbox_gpus(gpus)
|
||||
|
||||
def create_or_update(self):
|
||||
if config.inventory is None or config.update_inventory is None:
|
||||
return False
|
||||
self.do_netbox_cpus()
|
||||
self.do_netbox_memories()
|
||||
self.do_netbox_raid_cards()
|
||||
if self.update_expansion is False:
|
||||
self.do_netbox_cpus()
|
||||
self.do_netbox_memories()
|
||||
self.do_netbox_interfaces()
|
||||
self.do_netbox_motherboard()
|
||||
self.do_netbox_gpus()
|
||||
self.do_netbox_disks()
|
||||
self.do_netbox_interfaces()
|
||||
self.do_netbox_motherboard()
|
||||
self.do_netbox_raid_cards()
|
||||
return True
|
||||
|
|
|
@ -4,7 +4,7 @@ import subprocess
|
|||
from netaddr import IPNetwork
|
||||
|
||||
|
||||
class IPMI():
|
||||
class IPMI:
|
||||
"""
|
||||
Parse IPMI output
|
||||
ie:
|
||||
|
@ -37,9 +37,9 @@ class IPMI():
|
|||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.ret, self.output = subprocess.getstatusoutput('ipmitool lan print')
|
||||
self.ret, self.output = subprocess.getstatusoutput("ipmitool lan print")
|
||||
if self.ret != 0:
|
||||
logging.error('Cannot get ipmi info: {}'.format(self.output))
|
||||
logging.error("Cannot get ipmi info: {}".format(self.output))
|
||||
|
||||
def parse(self):
|
||||
_ipmi = {}
|
||||
|
@ -47,22 +47,31 @@ class IPMI():
|
|||
return _ipmi
|
||||
|
||||
for line in self.output.splitlines():
|
||||
key = line.split(':')[0].strip()
|
||||
if key not in ['802.1q VLAN ID', 'IP Address', 'Subnet Mask', 'MAC Address']:
|
||||
key = line.split(":")[0].strip()
|
||||
if key not in [
|
||||
"802.1q VLAN ID",
|
||||
"IP Address",
|
||||
"Subnet Mask",
|
||||
"MAC Address",
|
||||
]:
|
||||
continue
|
||||
value = ':'.join(line.split(':')[1:]).strip()
|
||||
value = ":".join(line.split(":")[1:]).strip()
|
||||
_ipmi[key] = value
|
||||
|
||||
ret = {}
|
||||
ret['name'] = 'IPMI'
|
||||
ret['bonding'] = False
|
||||
ret['mac'] = _ipmi['MAC Address']
|
||||
ret['vlan'] = int(_ipmi['802.1q VLAN ID']) \
|
||||
if _ipmi['802.1q VLAN ID'] != 'Disabled' else None
|
||||
ip = _ipmi['IP Address']
|
||||
netmask = _ipmi['Subnet Mask']
|
||||
address = str(IPNetwork('{}/{}'.format(ip, netmask)))
|
||||
ret["name"] = "IPMI"
|
||||
ret["mtu"] = 1500
|
||||
ret["bonding"] = False
|
||||
ret["mac"] = _ipmi["MAC Address"]
|
||||
ret["vlan"] = (
|
||||
int(_ipmi["802.1q VLAN ID"])
|
||||
if _ipmi["802.1q VLAN ID"] != "Disabled"
|
||||
else None
|
||||
)
|
||||
ip = _ipmi["IP Address"]
|
||||
netmask = _ipmi["Subnet Mask"]
|
||||
address = str(IPNetwork("{}/{}".format(ip, netmask)))
|
||||
|
||||
ret['ip'] = [address]
|
||||
ret['ipmi'] = True
|
||||
ret["ip"] = [address]
|
||||
ret["ipmi"] = True
|
||||
return ret
|
||||
|
|
|
@ -1,12 +1,17 @@
|
|||
import logging
|
||||
import subprocess
|
||||
|
||||
from netbox_agent.misc import is_tool
|
||||
|
||||
class LLDP():
|
||||
|
||||
class LLDP:
|
||||
def __init__(self, output=None):
|
||||
if not is_tool("lldpctl"):
|
||||
logging.debug("lldpd package seems to be missing or daemon not running.")
|
||||
if output:
|
||||
self.output = output
|
||||
else:
|
||||
self.output = subprocess.getoutput('lldpctl -f keyvalue')
|
||||
self.output = subprocess.getoutput("lldpctl -f keyvalue")
|
||||
self.data = self.parse()
|
||||
|
||||
def parse(self):
|
||||
|
@ -14,7 +19,7 @@ class LLDP():
|
|||
vlans = {}
|
||||
vid = None
|
||||
for entry in self.output.splitlines():
|
||||
if '=' not in entry:
|
||||
if "=" not in entry:
|
||||
continue
|
||||
path, value = entry.strip().split("=", 1)
|
||||
split_path = path.split(".")
|
||||
|
@ -26,38 +31,41 @@ class LLDP():
|
|||
vlans[interface] = {}
|
||||
|
||||
for path_component in path_components:
|
||||
current_dict[path_component] = current_dict.get(path_component, {})
|
||||
current_dict = current_dict[path_component]
|
||||
if 'vlan-id' in path:
|
||||
if not isinstance(current_dict.get(path_component), dict):
|
||||
current_dict[path_component] = {}
|
||||
current_dict = current_dict.get(path_component)
|
||||
if "vlan-id" in path:
|
||||
vid = value
|
||||
vlans[interface][value] = vlans[interface].get(vid, {})
|
||||
elif path.endswith('vlan'):
|
||||
vid = value.replace('vlan-', '')
|
||||
elif path.endswith("vlan"):
|
||||
vid = value.replace("vlan-", "")
|
||||
vlans[interface][vid] = vlans[interface].get(vid, {})
|
||||
elif 'pvid' in path:
|
||||
vlans[interface][vid]['pvid'] = True
|
||||
if 'vlan' not in path:
|
||||
elif "pvid" in path:
|
||||
vlans[interface][vid]["pvid"] = True
|
||||
if "vlan" not in path:
|
||||
current_dict[final] = value
|
||||
for interface, vlan in vlans.items():
|
||||
output_dict['lldp'][interface]['vlan'] = vlan
|
||||
output_dict["lldp"][interface]["vlan"] = vlan
|
||||
if not output_dict:
|
||||
logging.debug("No LLDP output, please check your network config.")
|
||||
return output_dict
|
||||
|
||||
def get_switch_ip(self, interface):
|
||||
# lldp.eth0.chassis.mgmt-ip=100.66.7.222
|
||||
if self.data['lldp'].get(interface) is None:
|
||||
if self.data["lldp"].get(interface) is None:
|
||||
return None
|
||||
return self.data['lldp'][interface]['chassis']['mgmt-ip']
|
||||
return self.data["lldp"][interface]["chassis"].get("mgmt-ip")
|
||||
|
||||
def get_switch_port(self, interface):
|
||||
# lldp.eth0.port.descr=GigabitEthernet1/0/1
|
||||
if self.data['lldp'].get(interface) is None:
|
||||
if self.data["lldp"].get(interface) is None:
|
||||
return None
|
||||
if self.data['lldp'][interface]['port'].get('ifname'):
|
||||
return self.data['lldp'][interface]['port']['ifname']
|
||||
return self.data['lldp'][interface]['port']['descr']
|
||||
if self.data["lldp"][interface]["port"].get("ifname"):
|
||||
return self.data["lldp"][interface]["port"]["ifname"]
|
||||
return self.data["lldp"][interface]["port"]["descr"]
|
||||
|
||||
def get_switch_vlan(self, interface):
|
||||
# lldp.eth0.vlan.vlan-id=296
|
||||
if self.data['lldp'].get(interface) is None:
|
||||
if self.data["lldp"].get(interface) is None:
|
||||
return None
|
||||
return self.data['lldp'][interface]['vlan']
|
||||
return self.data["lldp"][interface]["vlan"]
|
||||
|
|
|
@ -4,7 +4,7 @@ import importlib.machinery
|
|||
from netbox_agent.config import config
|
||||
|
||||
|
||||
class LocationBase():
|
||||
class LocationBase:
|
||||
"""
|
||||
This class is used to guess the location in order to push the information
|
||||
in Netbox for a `Device`
|
||||
|
@ -27,15 +27,19 @@ class LocationBase():
|
|||
if self.driver_file:
|
||||
try:
|
||||
# FIXME: Works with Python 3.3+, support older version?
|
||||
loader = importlib.machinery.SourceFileLoader('driver_file', self.driver_file)
|
||||
loader = importlib.machinery.SourceFileLoader(
|
||||
"driver_file", self.driver_file
|
||||
)
|
||||
self.driver = loader.load_module()
|
||||
except ImportError:
|
||||
raise ImportError("Couldn't import {} as a module".format(self.driver_file))
|
||||
raise ImportError(
|
||||
"Couldn't import {} as a module".format(self.driver_file)
|
||||
)
|
||||
else:
|
||||
if self.driver:
|
||||
try:
|
||||
self.driver = importlib.import_module(
|
||||
'netbox_agent.drivers.{}'.format(self.driver)
|
||||
"netbox_agent.drivers.{}".format(self.driver)
|
||||
)
|
||||
except ImportError:
|
||||
raise ImportError("Driver {} doesn't exists".format(self.driver))
|
||||
|
@ -43,19 +47,23 @@ class LocationBase():
|
|||
def get(self):
|
||||
if self.driver is None:
|
||||
return None
|
||||
if not hasattr(self.driver, 'get'):
|
||||
if not hasattr(self.driver, "get"):
|
||||
raise Exception(
|
||||
"Your driver {} doesn't have a get() function, please fix it".format(self.driver)
|
||||
"Your driver {} doesn't have a get() function, please fix it".format(
|
||||
self.driver
|
||||
)
|
||||
)
|
||||
return getattr(self.driver, 'get')(self.driver_value, self.regex)
|
||||
return getattr(self.driver, "get")(self.driver_value, self.regex)
|
||||
|
||||
|
||||
class Tenant(LocationBase):
|
||||
def __init__(self):
|
||||
driver = config.tenant.driver.split(':')[0] if \
|
||||
config.tenant.driver else None
|
||||
driver_value = ':'.join(config.tenant.driver.split(':')[1:]) if \
|
||||
config.tenant.driver else None
|
||||
driver = config.tenant.driver.split(":")[0] if config.tenant.driver else None
|
||||
driver_value = (
|
||||
":".join(config.tenant.driver.split(":")[1:])
|
||||
if config.tenant.driver
|
||||
else None
|
||||
)
|
||||
driver_file = config.tenant.driver_file
|
||||
regex = config.tenant.regex
|
||||
super().__init__(driver, driver_value, driver_file, regex)
|
||||
|
@ -63,10 +71,16 @@ class Tenant(LocationBase):
|
|||
|
||||
class Datacenter(LocationBase):
|
||||
def __init__(self):
|
||||
driver = config.datacenter_location.driver.split(':')[0] if \
|
||||
config.datacenter_location.driver else None
|
||||
driver_value = ':'.join(config.datacenter_location.driver.split(':')[1:]) if \
|
||||
config.datacenter_location.driver else None
|
||||
driver = (
|
||||
config.datacenter_location.driver.split(":")[0]
|
||||
if config.datacenter_location.driver
|
||||
else None
|
||||
)
|
||||
driver_value = (
|
||||
":".join(config.datacenter_location.driver.split(":")[1:])
|
||||
if config.datacenter_location.driver
|
||||
else None
|
||||
)
|
||||
driver_file = config.datacenter_location.driver_file
|
||||
regex = config.datacenter_location.regex
|
||||
super().__init__(driver, driver_value, driver_file, regex)
|
||||
|
@ -74,10 +88,16 @@ class Datacenter(LocationBase):
|
|||
|
||||
class Rack(LocationBase):
|
||||
def __init__(self):
|
||||
driver = config.rack_location.driver.split(':')[0] if \
|
||||
config.rack_location.driver else None
|
||||
driver_value = ':'.join(config.rack_location.driver.split(':')[1:]) if \
|
||||
config.rack_location.driver else None
|
||||
driver = (
|
||||
config.rack_location.driver.split(":")[0]
|
||||
if config.rack_location.driver
|
||||
else None
|
||||
)
|
||||
driver_value = (
|
||||
":".join(config.rack_location.driver.split(":")[1:])
|
||||
if config.rack_location.driver
|
||||
else None
|
||||
)
|
||||
driver_file = config.rack_location.driver_file
|
||||
regex = config.rack_location.regex
|
||||
super().__init__(driver, driver_value, driver_file, regex)
|
||||
|
@ -85,10 +105,16 @@ class Rack(LocationBase):
|
|||
|
||||
class Slot(LocationBase):
|
||||
def __init__(self):
|
||||
driver = config.slot_location.driver.split(':')[0] if \
|
||||
config.slot_location.driver else None
|
||||
driver_value = ':'.join(config.slot_location.driver.split(':')[1:]) if \
|
||||
config.slot_location.driver else None
|
||||
driver = (
|
||||
config.slot_location.driver.split(":")[0]
|
||||
if config.slot_location.driver
|
||||
else None
|
||||
)
|
||||
driver_value = (
|
||||
":".join(config.slot_location.driver.split(":")[1:])
|
||||
if config.slot_location.driver
|
||||
else None
|
||||
)
|
||||
driver_file = config.slot_location.driver_file
|
||||
regex = config.slot_location.regex
|
||||
super().__init__(driver, driver_value, driver_file, regex)
|
||||
|
|
|
@ -3,7 +3,7 @@ import logging
|
|||
from netbox_agent.config import config
|
||||
|
||||
logger = logging.getLogger()
|
||||
if config.log_level.lower() == 'debug':
|
||||
if config.log_level.lower() == "debug":
|
||||
logger.setLevel(logging.DEBUG)
|
||||
else:
|
||||
logger.setLevel(logging.INFO)
|
||||
|
|
|
@ -6,22 +6,27 @@ import sys
|
|||
from netbox_agent.misc import is_tool
|
||||
|
||||
|
||||
class LSHW():
|
||||
class LSHW:
|
||||
def __init__(self):
|
||||
if not is_tool('lshw'):
|
||||
logging.error('lshw does not seem to be installed')
|
||||
if not is_tool("lshw"):
|
||||
logging.error("lshw does not seem to be installed")
|
||||
sys.exit(1)
|
||||
|
||||
data = subprocess.getoutput(
|
||||
'lshw -quiet -json'
|
||||
)
|
||||
self.hw_info = json.loads(data)
|
||||
data = subprocess.getoutput("lshw -quiet -json")
|
||||
json_data = json.loads(data)
|
||||
# Starting from version 02.18, `lshw -json` wraps its result in a list
|
||||
# rather than returning directly a dictionary
|
||||
if isinstance(json_data, list):
|
||||
self.hw_info = json_data[0]
|
||||
else:
|
||||
self.hw_info = json_data
|
||||
self.info = {}
|
||||
self.memories = []
|
||||
self.interfaces = []
|
||||
self.cpus = []
|
||||
self.power = []
|
||||
self.disks = []
|
||||
self.gpus = []
|
||||
self.vendor = self.hw_info["vendor"]
|
||||
self.product = self.hw_info["product"]
|
||||
self.chassis_serial = self.hw_info["serial"]
|
||||
|
@ -53,64 +58,93 @@ class LSHW():
|
|||
def get_hw_linux(self, hwclass):
|
||||
if hwclass == "cpu":
|
||||
return self.cpus
|
||||
if hwclass == "gpu":
|
||||
return self.gpus
|
||||
if hwclass == "network":
|
||||
return self.interfaces
|
||||
if hwclass == 'storage':
|
||||
if hwclass == "storage":
|
||||
return self.disks
|
||||
if hwclass == 'memory':
|
||||
if hwclass == "memory":
|
||||
return self.memories
|
||||
|
||||
def find_network(self, obj):
|
||||
d = {}
|
||||
d["name"] = obj["logicalname"]
|
||||
d["macaddress"] = obj["serial"]
|
||||
d["serial"] = obj["serial"]
|
||||
d["product"] = obj["product"]
|
||||
d["vendor"] = obj["vendor"]
|
||||
d["description"] = obj["description"]
|
||||
# Some interfaces do not have device (logical) name (eth0, for
|
||||
# instance), such as not connected network mezzanine cards in blade
|
||||
# servers. In such situations, the card will be named `unknown[0-9]`.
|
||||
unkn_intfs = []
|
||||
for i in self.interfaces:
|
||||
# newer versions of lshw can return a list of names, see issue #227
|
||||
if not isinstance(i["name"], list):
|
||||
if i["name"].startswith("unknown"):
|
||||
unkn_intfs.append(i)
|
||||
else:
|
||||
for j in i["name"]:
|
||||
if j.startswith("unknown"):
|
||||
unkn_intfs.append(j)
|
||||
|
||||
self.interfaces.append(d)
|
||||
unkn_name = "unknown{}".format(len(unkn_intfs))
|
||||
self.interfaces.append(
|
||||
{
|
||||
"name": obj.get("logicalname", unkn_name),
|
||||
"macaddress": obj.get("serial", ""),
|
||||
"serial": obj.get("serial", ""),
|
||||
"product": obj.get("product", "Unknown NIC"),
|
||||
"vendor": obj.get("vendor", "Unknown"),
|
||||
"description": obj.get("description", ""),
|
||||
}
|
||||
)
|
||||
|
||||
def find_storage(self, obj):
|
||||
if "children" in obj:
|
||||
for device in obj["children"]:
|
||||
d = {}
|
||||
d["logicalname"] = device.get("logicalname")
|
||||
d["product"] = device.get("product")
|
||||
d["serial"] = device.get("serial")
|
||||
d["version"] = device.get("version")
|
||||
d["size"] = device.get("size")
|
||||
d["description"] = device.get("description")
|
||||
|
||||
self.disks.append(d)
|
||||
|
||||
self.disks.append(
|
||||
{
|
||||
"logicalname": device.get("logicalname"),
|
||||
"product": device.get("product"),
|
||||
"serial": device.get("serial"),
|
||||
"version": device.get("version"),
|
||||
"size": device.get("size"),
|
||||
"description": device.get("description"),
|
||||
"type": device.get("description"),
|
||||
}
|
||||
)
|
||||
elif "nvme" in obj["configuration"]["driver"]:
|
||||
nvme = json.loads(
|
||||
subprocess.check_output(
|
||||
["nvme", '-list', '-o', 'json'],
|
||||
encoding='utf8')
|
||||
)
|
||||
|
||||
for device in nvme["Devices"]:
|
||||
d = {}
|
||||
d['logicalname'] = device["DevicePath"]
|
||||
d['product'] = device["ModelNumber"]
|
||||
d['serial'] = device["SerialNumber"]
|
||||
d["version"] = device["Firmware"]
|
||||
d['size'] = device["UsedSize"]
|
||||
d['description'] = "NVME Disk"
|
||||
|
||||
self.disks.append(d)
|
||||
if not is_tool("nvme"):
|
||||
logging.error("nvme-cli >= 1.0 does not seem to be installed")
|
||||
return
|
||||
try:
|
||||
nvme = json.loads(
|
||||
subprocess.check_output(
|
||||
["nvme", "-list", "-o", "json"], encoding="utf8"
|
||||
)
|
||||
)
|
||||
for device in nvme["Devices"]:
|
||||
d = {
|
||||
"logicalname": device["DevicePath"],
|
||||
"product": device["ModelNumber"],
|
||||
"serial": device["SerialNumber"],
|
||||
"version": device["Firmware"],
|
||||
"description": "NVME",
|
||||
"type": "NVME",
|
||||
}
|
||||
if "UsedSize" in device:
|
||||
d["size"] = device["UsedSize"]
|
||||
if "UsedBytes" in device:
|
||||
d["size"] = device["UsedBytes"]
|
||||
self.disks.append(d)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def find_cpus(self, obj):
|
||||
if "product" in obj:
|
||||
c = {}
|
||||
c["product"] = obj["product"]
|
||||
c["vendor"] = obj["vendor"]
|
||||
c["description"] = obj["description"]
|
||||
c["location"] = obj["slot"]
|
||||
|
||||
self.cpus.append(c)
|
||||
self.cpus.append(
|
||||
{
|
||||
"product": obj.get("product", "Unknown CPU"),
|
||||
"vendor": obj.get("vendor", "Unknown vendor"),
|
||||
"description": obj.get("description", ""),
|
||||
"location": obj.get("slot", ""),
|
||||
}
|
||||
)
|
||||
|
||||
def find_memories(self, obj):
|
||||
if "children" not in obj:
|
||||
|
@ -121,16 +155,26 @@ class LSHW():
|
|||
if "empty" in dimm["description"]:
|
||||
continue
|
||||
|
||||
d = {}
|
||||
d["slot"] = dimm.get("slot")
|
||||
d["description"] = dimm.get("description")
|
||||
d["id"] = dimm.get("id")
|
||||
d["serial"] = dimm.get("serial", 'N/A')
|
||||
d["vendor"] = dimm.get("vendor", 'N/A')
|
||||
d["product"] = dimm.get("product", 'N/A')
|
||||
d["size"] = dimm.get("size", 0) / 2 ** 20 / 1024
|
||||
self.memories.append(
|
||||
{
|
||||
"slot": dimm.get("slot"),
|
||||
"description": dimm.get("description"),
|
||||
"id": dimm.get("id"),
|
||||
"serial": dimm.get("serial", "N/A"),
|
||||
"vendor": dimm.get("vendor", "N/A"),
|
||||
"product": dimm.get("product", "N/A"),
|
||||
"size": dimm.get("size", 0) / 2**20 / 1024,
|
||||
}
|
||||
)
|
||||
|
||||
self.memories.append(d)
|
||||
def find_gpus(self, obj):
|
||||
if "product" in obj:
|
||||
infos = {
|
||||
"product": obj.get("product", "Unknown GPU"),
|
||||
"vendor": obj.get("vendor", "Unknown"),
|
||||
"description": obj.get("description", ""),
|
||||
}
|
||||
self.gpus.append(infos)
|
||||
|
||||
def walk_bridge(self, obj):
|
||||
if "children" not in obj:
|
||||
|
@ -139,6 +183,8 @@ class LSHW():
|
|||
for bus in obj["children"]:
|
||||
if bus["class"] == "storage":
|
||||
self.find_storage(bus)
|
||||
if bus["class"] == "display":
|
||||
self.find_gpus(bus)
|
||||
|
||||
if "children" in bus:
|
||||
for b in bus["children"]:
|
||||
|
@ -146,6 +192,8 @@ class LSHW():
|
|||
self.find_storage(b)
|
||||
if b["class"] == "network":
|
||||
self.find_network(b)
|
||||
if b["class"] == "display":
|
||||
self.find_gpus(b)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
|
|
@ -1,51 +1,78 @@
|
|||
import re
|
||||
import socket
|
||||
import subprocess
|
||||
from shutil import which
|
||||
|
||||
from slugify import slugify
|
||||
|
||||
from netbox_agent.config import netbox_instance as nb
|
||||
|
||||
|
||||
def is_tool(name):
|
||||
'''Check whether `name` is on PATH and marked as executable.'''
|
||||
"""Check whether `name` is on PATH and marked as executable."""
|
||||
return which(name) is not None
|
||||
|
||||
|
||||
def get_device_role(role):
|
||||
device_role = nb.dcim.device_roles.get(
|
||||
name=role
|
||||
)
|
||||
device_role = nb.dcim.device_roles.get(name=role)
|
||||
if device_role is None:
|
||||
raise Exception('DeviceRole "{}" does not exist, please create it'.format(role))
|
||||
return device_role
|
||||
|
||||
|
||||
def get_device_type(type):
|
||||
device_type = nb.dcim.device_types.get(
|
||||
model=type
|
||||
)
|
||||
device_type = nb.dcim.device_types.get(model=type)
|
||||
if device_type is None:
|
||||
raise Exception('DeviceType "{}" does not exist, please create it'.format(type))
|
||||
return device_type
|
||||
|
||||
|
||||
def get_device_platform(device_platform):
|
||||
if device_platform is None:
|
||||
try:
|
||||
# Python 3.8+ moved linux_distribution() to distro
|
||||
try:
|
||||
import distro
|
||||
|
||||
linux_distribution = " ".join(distro.linux_distribution())
|
||||
except ImportError:
|
||||
import platform
|
||||
|
||||
linux_distribution = " ".join(platform.linux_distribution())
|
||||
|
||||
if not linux_distribution:
|
||||
return None
|
||||
except (ModuleNotFoundError, NameError, AttributeError):
|
||||
return None
|
||||
else:
|
||||
linux_distribution = device_platform
|
||||
|
||||
device_platform = nb.dcim.platforms.get(name=linux_distribution)
|
||||
if device_platform is None:
|
||||
device_platform = nb.dcim.platforms.create(
|
||||
name=linux_distribution, slug=slugify(linux_distribution)
|
||||
)
|
||||
return device_platform
|
||||
|
||||
|
||||
def get_vendor(name):
|
||||
vendors = {
|
||||
'PERC': 'Dell',
|
||||
'SANDISK': 'SanDisk',
|
||||
'DELL': 'Dell',
|
||||
'ST': 'Seagate',
|
||||
'CRUCIAL': 'Crucial',
|
||||
'MICRON': 'Micron',
|
||||
'INTEL': 'Intel',
|
||||
'SAMSUNG': 'Samsung',
|
||||
'EH0': 'HP',
|
||||
'HGST': 'HGST',
|
||||
'HUH': 'HGST',
|
||||
'MB': 'Toshiba',
|
||||
'MC': 'Toshiba',
|
||||
'MD': 'Toshiba',
|
||||
'MG': 'Toshiba',
|
||||
'WD': 'WDC'
|
||||
"PERC": "Dell",
|
||||
"SANDISK": "SanDisk",
|
||||
"DELL": "Dell",
|
||||
"ST": "Seagate",
|
||||
"CRUCIAL": "Crucial",
|
||||
"MICRON": "Micron",
|
||||
"INTEL": "Intel",
|
||||
"SAMSUNG": "Samsung",
|
||||
"EH0": "HP",
|
||||
"HGST": "HGST",
|
||||
"HUH": "HGST",
|
||||
"MB": "Toshiba",
|
||||
"MC": "Toshiba",
|
||||
"MD": "Toshiba",
|
||||
"MG": "Toshiba",
|
||||
"WD": "WDC",
|
||||
}
|
||||
for key, value in vendors.items():
|
||||
if name.upper().startswith(key):
|
||||
|
@ -55,17 +82,32 @@ def get_vendor(name):
|
|||
|
||||
def get_hostname(config):
|
||||
if config.hostname_cmd is None:
|
||||
return '{}'.format(socket.gethostname())
|
||||
return "{}".format(socket.gethostname())
|
||||
return subprocess.getoutput(config.hostname_cmd)
|
||||
|
||||
|
||||
def create_netbox_tags(tags):
|
||||
ret = []
|
||||
for tag in tags:
|
||||
nb_tag = nb.extras.tags.get(
|
||||
name=tag
|
||||
)
|
||||
nb_tag = nb.extras.tags.get(name=tag)
|
||||
if not nb_tag:
|
||||
nb_tag = nb.extras.tags.create(
|
||||
name=tag,
|
||||
slug=tag,
|
||||
slug=slugify(tag),
|
||||
)
|
||||
ret.append(nb_tag)
|
||||
return ret
|
||||
|
||||
|
||||
def get_mount_points():
|
||||
mount_points = {}
|
||||
output = subprocess.getoutput("mount")
|
||||
for r in output.split("\n"):
|
||||
if not r.startswith("/dev/"):
|
||||
continue
|
||||
mount_info = r.split()
|
||||
device = mount_info[0]
|
||||
device = re.sub(r"\d+$", "", device)
|
||||
mp = mount_info[2]
|
||||
mount_points.setdefault(device, []).append(mp)
|
||||
return mount_points
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
import logging
|
||||
import os
|
||||
import re
|
||||
from itertools import chain
|
||||
from itertools import chain, islice
|
||||
|
||||
import netifaces
|
||||
from netaddr import IPAddress
|
||||
|
@ -26,36 +26,46 @@ class Network(object):
|
|||
self.dcim_choices = {}
|
||||
dcim_c = nb.dcim.interfaces.choices()
|
||||
for _choice_type in dcim_c:
|
||||
key = 'interface:{}'.format(_choice_type)
|
||||
key = "interface:{}".format(_choice_type)
|
||||
self.dcim_choices[key] = {}
|
||||
for choice in dcim_c[_choice_type]:
|
||||
self.dcim_choices[key][choice['display_name']] = choice['value']
|
||||
self.dcim_choices[key][choice["display_name"]] = choice["value"]
|
||||
|
||||
self.ipam_choices = {}
|
||||
ipam_c = nb.ipam.ip_addresses.choices()
|
||||
for _choice_type in ipam_c:
|
||||
key = 'ip-address:{}'.format(_choice_type)
|
||||
key = "ip-address:{}".format(_choice_type)
|
||||
self.ipam_choices[key] = {}
|
||||
for choice in ipam_c[_choice_type]:
|
||||
self.ipam_choices[key][choice['display_name']] = choice['value']
|
||||
self.ipam_choices[key][choice["display_name"]] = choice["value"]
|
||||
|
||||
def get_network_type():
|
||||
return NotImplementedError
|
||||
|
||||
def scan(self):
|
||||
nics = []
|
||||
for interface in os.listdir('/sys/class/net/'):
|
||||
for interface in os.listdir("/sys/class/net/"):
|
||||
# ignore if it's not a link (ie: bonding_masters etc)
|
||||
if not os.path.islink('/sys/class/net/{}'.format(interface)):
|
||||
if not os.path.islink("/sys/class/net/{}".format(interface)):
|
||||
continue
|
||||
|
||||
if config.network.ignore_interfaces and \
|
||||
re.match(config.network.ignore_interfaces, interface):
|
||||
logging.debug('Ignore interface {interface}'.format(interface=interface))
|
||||
if config.network.ignore_interfaces and re.match(
|
||||
config.network.ignore_interfaces, interface
|
||||
):
|
||||
logging.debug(
|
||||
"Ignore interface {interface}".format(interface=interface)
|
||||
)
|
||||
continue
|
||||
|
||||
ip_addr = netifaces.ifaddresses(interface).get(netifaces.AF_INET, [])
|
||||
ip6_addr = netifaces.ifaddresses(interface).get(netifaces.AF_INET6, [])
|
||||
if config.network.ignore_ips:
|
||||
for i, ip in enumerate(ip_addr):
|
||||
if re.match(config.network.ignore_ips, ip["addr"]):
|
||||
ip_addr.pop(i)
|
||||
for i, ip in enumerate(ip6_addr):
|
||||
if re.match(config.network.ignore_ips, ip["addr"]):
|
||||
ip6_addr.pop(i)
|
||||
|
||||
# netifaces returns a ipv6 netmask that netaddr does not understand.
|
||||
# this strips the netmask down to the correct format for netaddr,
|
||||
|
@ -73,65 +83,72 @@ class Network(object):
|
|||
# }
|
||||
#
|
||||
for addr in ip6_addr:
|
||||
addr["addr"] = addr["addr"].replace('%{}'.format(interface), '')
|
||||
addr["netmask"] = addr["netmask"].split('/')[0]
|
||||
addr["addr"] = addr["addr"].replace("%{}".format(interface), "")
|
||||
addr["mask"] = addr["mask"].split("/")[0]
|
||||
ip_addr.append(addr)
|
||||
|
||||
if config.network.ignore_ips and ip_addr:
|
||||
for i, ip in enumerate(ip_addr):
|
||||
if re.match(config.network.ignore_ips, ip['addr']):
|
||||
ip_addr.pop(i)
|
||||
|
||||
mac = open('/sys/class/net/{}/address'.format(interface), 'r').read().strip()
|
||||
mac = (
|
||||
open("/sys/class/net/{}/address".format(interface), "r").read().strip()
|
||||
)
|
||||
mtu = int(
|
||||
open("/sys/class/net/{}/mtu".format(interface), "r").read().strip()
|
||||
)
|
||||
vlan = None
|
||||
if len(interface.split('.')) > 1:
|
||||
vlan = int(interface.split('.')[1])
|
||||
if len(interface.split(".")) > 1:
|
||||
vlan = int(interface.split(".")[1])
|
||||
|
||||
bonding = False
|
||||
bonding_slaves = []
|
||||
if os.path.isdir('/sys/class/net/{}/bonding'.format(interface)):
|
||||
if os.path.isdir("/sys/class/net/{}/bonding".format(interface)):
|
||||
bonding = True
|
||||
bonding_slaves = open(
|
||||
'/sys/class/net/{}/bonding/slaves'.format(interface)
|
||||
).read().split()
|
||||
bonding_slaves = (
|
||||
open("/sys/class/net/{}/bonding/slaves".format(interface))
|
||||
.read()
|
||||
.split()
|
||||
)
|
||||
|
||||
# Tun and TAP support
|
||||
virtual = os.path.isfile(
|
||||
'/sys/class/net/{}/tun_flags'.format(interface)
|
||||
)
|
||||
virtual = os.path.isfile("/sys/class/net/{}/tun_flags".format(interface))
|
||||
|
||||
nic = {
|
||||
'name': interface,
|
||||
'mac': mac if mac != '00:00:00:00:00:00' else None,
|
||||
'ip': [
|
||||
'{}/{}'.format(
|
||||
x['addr'],
|
||||
IPAddress(x['netmask']).netmask_bits()
|
||||
) for x in ip_addr
|
||||
] if ip_addr else None, # FIXME: handle IPv6 addresses
|
||||
'ethtool': Ethtool(interface).parse(),
|
||||
'virtual': virtual,
|
||||
'vlan': vlan,
|
||||
'bonding': bonding,
|
||||
'bonding_slaves': bonding_slaves,
|
||||
"name": interface,
|
||||
"mac": mac if mac != "00:00:00:00:00:00" else None,
|
||||
"ip": (
|
||||
[
|
||||
"{}/{}".format(x["addr"], IPAddress(x["mask"]).netmask_bits())
|
||||
for x in ip_addr
|
||||
]
|
||||
if ip_addr
|
||||
else None
|
||||
), # FIXME: handle IPv6 addresses
|
||||
"ethtool": Ethtool(interface).parse(),
|
||||
"virtual": virtual,
|
||||
"vlan": vlan,
|
||||
"mtu": mtu,
|
||||
"bonding": bonding,
|
||||
"bonding_slaves": bonding_slaves,
|
||||
}
|
||||
nics.append(nic)
|
||||
return nics
|
||||
|
||||
def _set_bonding_interfaces(self):
|
||||
bonding_nics = (x for x in self.nics if x['bonding'])
|
||||
bonding_nics = (x for x in self.nics if x["bonding"])
|
||||
for nic in bonding_nics:
|
||||
bond_int = self.get_netbox_network_card(nic)
|
||||
logging.debug('Setting slave interface for {name}'.format(
|
||||
name=bond_int.name
|
||||
))
|
||||
logging.debug(
|
||||
"Setting slave interface for {name}".format(name=bond_int.name)
|
||||
)
|
||||
for slave_int in (
|
||||
self.get_netbox_network_card(slave_nic)
|
||||
for slave_nic in self.nics
|
||||
if slave_nic['name'] in nic['bonding_slaves']):
|
||||
self.get_netbox_network_card(slave_nic)
|
||||
for slave_nic in self.nics
|
||||
if slave_nic["name"] in nic["bonding_slaves"]
|
||||
):
|
||||
if slave_int.lag is None or slave_int.lag.id != bond_int.id:
|
||||
logging.debug('Settting interface {name} as slave of {master}'.format(
|
||||
name=slave_int.name, master=bond_int.name
|
||||
))
|
||||
logging.debug(
|
||||
"Settting interface {name} as slave of {master}".format(
|
||||
name=slave_int.name, master=bond_int.name
|
||||
)
|
||||
)
|
||||
slave_int.lag = bond_int
|
||||
slave_int.save()
|
||||
else:
|
||||
|
@ -142,50 +159,50 @@ class Network(object):
|
|||
return self.nics
|
||||
|
||||
def get_netbox_network_card(self, nic):
|
||||
if nic['mac'] is None:
|
||||
if nic["mac"] is None:
|
||||
interface = self.nb_net.interfaces.get(
|
||||
name=nic['name'],
|
||||
**self.custom_arg_id,
|
||||
name=nic["name"], **self.custom_arg_id
|
||||
)
|
||||
else:
|
||||
interface = self.nb_net.interfaces.get(
|
||||
mac_address=nic['mac'],
|
||||
name=nic['name'],
|
||||
**self.custom_arg_id,
|
||||
mac_address=nic["mac"], name=nic["name"], **self.custom_arg_id
|
||||
)
|
||||
return interface
|
||||
|
||||
def get_netbox_network_cards(self):
|
||||
return self.nb_net.interfaces.filter(
|
||||
**self.custom_arg_id,
|
||||
)
|
||||
return self.nb_net.interfaces.filter(**self.custom_arg_id)
|
||||
|
||||
def get_netbox_type_for_nic(self, nic):
|
||||
if self.get_network_type() == 'virtual':
|
||||
return self.dcim_choices['interface:type']['Virtual']
|
||||
if self.get_network_type() == "virtual":
|
||||
return self.dcim_choices["interface:type"]["Virtual"]
|
||||
|
||||
if nic.get('bonding'):
|
||||
return self.dcim_choices['interface:type']['Link Aggregation Group (LAG)']
|
||||
if nic.get("bonding"):
|
||||
return self.dcim_choices["interface:type"]["Link Aggregation Group (LAG)"]
|
||||
|
||||
if nic.get('bonding'):
|
||||
return self.dcim_choices['interface:type']['Link Aggregation Group (LAG)']
|
||||
if nic.get("bonding"):
|
||||
return self.dcim_choices["interface:type"]["Link Aggregation Group (LAG)"]
|
||||
|
||||
if nic.get('virtual'):
|
||||
return self.dcim_choices['interface:type']['Virtual']
|
||||
if nic.get("virtual"):
|
||||
return self.dcim_choices["interface:type"]["Virtual"]
|
||||
|
||||
if nic.get('ethtool') is None:
|
||||
return self.dcim_choices['interface:type']['Other']
|
||||
if nic.get("ethtool") is None:
|
||||
return self.dcim_choices["interface:type"]["Other"]
|
||||
|
||||
if nic['ethtool']['speed'] == '10000Mb/s':
|
||||
if nic['ethtool']['port'] == 'FIBRE':
|
||||
return self.dcim_choices['interface:type']['SFP+ (10GE)']
|
||||
return self.dcim_choices['interface:type']['10GBASE-T (10GE)']
|
||||
if nic["ethtool"]["speed"] == "10000Mb/s":
|
||||
if nic["ethtool"]["port"] in ("FIBRE", "Direct Attach Copper"):
|
||||
return self.dcim_choices["interface:type"]["SFP+ (10GE)"]
|
||||
return self.dcim_choices["interface:type"]["10GBASE-T (10GE)"]
|
||||
|
||||
elif nic['ethtool']['speed'] == '1000Mb/s':
|
||||
if nic['ethtool']['port'] == 'FIBRE':
|
||||
return self.dcim_choices['interface:type']['SFP (1GE)']
|
||||
return self.dcim_choices['interface:type']['1000BASE-T (1GE)']
|
||||
return self.dcim_choices['interface:type']['Other']
|
||||
elif nic["ethtool"]["speed"] == "25000Mb/s":
|
||||
if nic["ethtool"]["port"] in ("FIBRE", "Direct Attach Copper"):
|
||||
return self.dcim_choices["interface:type"]["SFP28 (25GE)"]
|
||||
|
||||
elif nic["ethtool"]["speed"] == "1000Mb/s":
|
||||
if nic["ethtool"]["port"] in ("FIBRE", "Direct Attach Copper"):
|
||||
return self.dcim_choices["interface:type"]["SFP (1GE)"]
|
||||
return self.dcim_choices["interface:type"]["1000BASE-T (1GE)"]
|
||||
|
||||
return self.dcim_choices["interface:type"]["Other"]
|
||||
|
||||
def get_or_create_vlan(self, vlan_id):
|
||||
# FIXME: we may need to specify the datacenter
|
||||
|
@ -195,94 +212,129 @@ class Network(object):
|
|||
)
|
||||
if vlan is None:
|
||||
vlan = nb.ipam.vlans.create(
|
||||
name='VLAN {}'.format(vlan_id),
|
||||
name="VLAN {}".format(vlan_id),
|
||||
vid=vlan_id,
|
||||
)
|
||||
return vlan
|
||||
|
||||
def reset_vlan_on_interface(self, nic, interface):
|
||||
update = False
|
||||
vlan_id = nic['vlan']
|
||||
lldp_vlan = self.lldp.get_switch_vlan(nic['name']) if config.network.lldp else None
|
||||
vlan_id = nic["vlan"]
|
||||
lldp_vlan = (
|
||||
self.lldp.get_switch_vlan(nic["name"])
|
||||
if config.network.lldp and isinstance(self, ServerNetwork)
|
||||
else None
|
||||
)
|
||||
# For strange reason, we need to get the object from scratch
|
||||
# The object returned by pynetbox's save isn't always working (since pynetbox 6)
|
||||
interface = self.nb_net.interfaces.get(id=interface.id)
|
||||
|
||||
# if local interface isn't a interface vlan or lldp doesn't report a vlan-id
|
||||
if vlan_id is None and lldp_vlan is None and \
|
||||
(interface.mode is not None or len(interface.tagged_vlans) > 0):
|
||||
logging.info('Interface {interface} is not tagged, reseting mode'.format(
|
||||
interface=interface))
|
||||
# Handle the case were the local interface isn't an interface vlan as reported by Netbox
|
||||
# and that LLDP doesn't report a vlan-id
|
||||
if (
|
||||
vlan_id is None
|
||||
and lldp_vlan is None
|
||||
and (interface.mode is not None or len(interface.tagged_vlans) > 0)
|
||||
):
|
||||
logging.info(
|
||||
"Interface {interface} is not tagged, reseting mode".format(
|
||||
interface=interface
|
||||
)
|
||||
)
|
||||
update = True
|
||||
interface.mode = None
|
||||
interface.tagged_vlans = []
|
||||
interface.untagged_vlan = None
|
||||
# if it's a vlan interface
|
||||
# if the local interface is configured with a vlan, it's supposed to be taggued
|
||||
# if mode is either not set or not correctly configured or vlan are not
|
||||
# correctly configured, we reset the vlan
|
||||
elif vlan_id and (
|
||||
interface.mode is None or
|
||||
type(interface.mode) is not int and (
|
||||
interface.mode.value == self.dcim_choices['interface:mode']['Access'] or
|
||||
len(interface.tagged_vlans) != 1 or
|
||||
interface.tagged_vlans[0].vid != vlan_id)):
|
||||
logging.info('Resetting tagged VLAN(s) on interface {interface}'.format(
|
||||
interface=interface))
|
||||
interface.mode is None
|
||||
or not isinstance(interface.mode, int)
|
||||
and (
|
||||
hasattr(interface.mode, "value")
|
||||
and interface.mode.value
|
||||
== self.dcim_choices["interface:mode"]["Access"]
|
||||
or len(interface.tagged_vlans) != 1
|
||||
or int(interface.tagged_vlans[0].vid) != int(vlan_id)
|
||||
)
|
||||
):
|
||||
logging.info(
|
||||
"Resetting tagged VLAN(s) on interface {interface}".format(
|
||||
interface=interface
|
||||
)
|
||||
)
|
||||
update = True
|
||||
nb_vlan = self.get_or_create_vlan(vlan_id)
|
||||
interface.mode = self.dcim_choices['interface:mode']['Tagged']
|
||||
interface.mode = self.dcim_choices["interface:mode"]["Tagged"]
|
||||
interface.tagged_vlans = [nb_vlan] if nb_vlan else []
|
||||
interface.untagged_vlan = None
|
||||
# if lldp reports a vlan-id with pvid
|
||||
# Finally if LLDP reports a vlan-id with the pvid attribute
|
||||
elif lldp_vlan:
|
||||
pvid_vlan = [key for (key, value) in lldp_vlan.items() if value['pvid']]
|
||||
pvid_vlan = [key for (key, value) in lldp_vlan.items() if value["pvid"]]
|
||||
if len(pvid_vlan) > 0 and (
|
||||
interface.mode is None or
|
||||
interface.mode.value != self.dcim_choices['interface:mode']['Access'] or
|
||||
interface.untagged_vlan is None or
|
||||
interface.untagged_vlan.vid != int(pvid_vlan[0])):
|
||||
logging.info('Resetting access VLAN on interface {interface}'.format(
|
||||
interface=interface))
|
||||
interface.mode is None
|
||||
or interface.mode.value != self.dcim_choices["interface:mode"]["Access"]
|
||||
or interface.untagged_vlan is None
|
||||
or interface.untagged_vlan.vid != int(pvid_vlan[0])
|
||||
):
|
||||
logging.info(
|
||||
"Resetting access VLAN on interface {interface}".format(
|
||||
interface=interface
|
||||
)
|
||||
)
|
||||
update = True
|
||||
nb_vlan = self.get_or_create_vlan(pvid_vlan[0])
|
||||
interface.mode = self.dcim_choices['interface:mode']['Access']
|
||||
interface.mode = self.dcim_choices["interface:mode"]["Access"]
|
||||
interface.untagged_vlan = nb_vlan.id
|
||||
return update, interface
|
||||
|
||||
def create_netbox_nic(self, nic, mgmt=False):
|
||||
# TODO: add Optic Vendor, PN and Serial
|
||||
type = self.get_netbox_type_for_nic(nic)
|
||||
logging.info('Creating NIC {name} ({mac}) on {device}'.format(
|
||||
name=nic['name'], mac=nic['mac'], device=self.device.name))
|
||||
nic_type = self.get_netbox_type_for_nic(nic)
|
||||
logging.info(
|
||||
"Creating NIC {name} ({mac}) on {device}".format(
|
||||
name=nic["name"], mac=nic["mac"], device=self.device.name
|
||||
)
|
||||
)
|
||||
|
||||
nb_vlan = None
|
||||
|
||||
params = {
|
||||
'name': nic['name'],
|
||||
'type': type,
|
||||
'mgmt_only': mgmt,
|
||||
**self.custom_arg,
|
||||
}
|
||||
params = dict(self.custom_arg)
|
||||
params.update(
|
||||
{
|
||||
"name": nic["name"],
|
||||
"type": nic_type,
|
||||
"mgmt_only": mgmt,
|
||||
}
|
||||
)
|
||||
if nic["mac"]:
|
||||
params["mac_address"] = nic["mac"]
|
||||
|
||||
if not nic.get('virtual', False):
|
||||
params['mac_address'] = nic['mac']
|
||||
if nic["mtu"]:
|
||||
params["mtu"] = nic["mtu"]
|
||||
|
||||
interface = self.nb_net.interfaces.create(**params)
|
||||
|
||||
if nic['vlan']:
|
||||
nb_vlan = self.get_or_create_vlan(nic['vlan'])
|
||||
interface.mode = 200
|
||||
if nic["vlan"]:
|
||||
nb_vlan = self.get_or_create_vlan(nic["vlan"])
|
||||
interface.mode = self.dcim_choices["interface:mode"]["Tagged"]
|
||||
interface.tagged_vlans = [nb_vlan.id]
|
||||
interface.save()
|
||||
elif config.network.lldp and self.lldp.get_switch_vlan(nic['name']) is not None:
|
||||
elif config.network.lldp and self.lldp.get_switch_vlan(nic["name"]) is not None:
|
||||
# if lldp reports a vlan on an interface, tag the interface in access and set the vlan
|
||||
# report only the interface which has `pvid=yes` (ie: lldp.eth3.vlan.pvid=yes)
|
||||
# if pvid is not present, it'll be processed as a vlan tagged interface
|
||||
vlans = self.lldp.get_switch_vlan(nic['name'])
|
||||
vlans = self.lldp.get_switch_vlan(nic["name"])
|
||||
for vid, vlan_infos in vlans.items():
|
||||
nb_vlan = self.get_or_create_vlan(vid)
|
||||
if vlan_infos.get('vid'):
|
||||
interface.mode = self.dcim_choices['interface:mode']['Access']
|
||||
if vlan_infos.get("vid"):
|
||||
interface.mode = self.dcim_choices["interface:mode"]["Access"]
|
||||
interface.untagged_vlan = nb_vlan.id
|
||||
interface.save()
|
||||
|
||||
# cable the interface
|
||||
if config.network.lldp:
|
||||
if config.network.lldp and isinstance(self, ServerNetwork):
|
||||
switch_ip = self.lldp.get_switch_ip(interface.name)
|
||||
switch_interface = self.lldp.get_switch_port(interface.name)
|
||||
|
||||
|
@ -295,7 +347,7 @@ class Network(object):
|
|||
return interface
|
||||
|
||||
def create_or_update_netbox_ip_on_interface(self, ip, interface):
|
||||
'''
|
||||
"""
|
||||
Two behaviors:
|
||||
- Anycast IP
|
||||
* If IP exists and is in Anycast, create a new Anycast one
|
||||
|
@ -306,130 +358,180 @@ class Network(object):
|
|||
* If IP doesn't exist, create it
|
||||
* If IP exists and isn't assigned, take it
|
||||
* If IP exists and interface is wrong, change interface
|
||||
'''
|
||||
"""
|
||||
netbox_ips = nb.ipam.ip_addresses.filter(
|
||||
address=ip,
|
||||
)
|
||||
if not len(netbox_ips):
|
||||
logging.info('Create new IP {ip} on {interface}'.format(
|
||||
ip=ip, interface=interface))
|
||||
netbox_ip = nb.ipam.ip_addresses.create(
|
||||
address=ip,
|
||||
interface=interface.id,
|
||||
status=1,
|
||||
if not netbox_ips:
|
||||
logging.info(
|
||||
"Create new IP {ip} on {interface}".format(ip=ip, interface=interface)
|
||||
)
|
||||
else:
|
||||
netbox_ip = netbox_ips[0]
|
||||
# If IP exists in anycast
|
||||
if netbox_ip.role and netbox_ip.role.label == 'Anycast':
|
||||
logging.debug('IP {} is Anycast..'.format(ip))
|
||||
unassigned_anycast_ip = [x for x in netbox_ips if x.interface is None]
|
||||
assigned_anycast_ip = [x for x in netbox_ips if
|
||||
x.interface and x.interface.id == interface.id]
|
||||
# use the first available anycast ip
|
||||
if len(unassigned_anycast_ip):
|
||||
logging.info('Assigning existing Anycast IP {} to interface'.format(ip))
|
||||
netbox_ip = unassigned_anycast_ip[0]
|
||||
netbox_ip.interface = interface
|
||||
netbox_ip.save()
|
||||
# or if everything is assigned to other servers
|
||||
elif not len(assigned_anycast_ip):
|
||||
logging.info('Creating Anycast IP {} and assigning it to interface'.format(ip))
|
||||
netbox_ip = nb.ipam.ip_addresses.create(
|
||||
address=ip,
|
||||
interface=interface.id,
|
||||
status=1,
|
||||
role=self.ipam_choices['ip-address:role']['Anycast'],
|
||||
tenant=self.tenant.id if self.tenant else None,
|
||||
)
|
||||
return netbox_ip
|
||||
else:
|
||||
if netbox_ip.interface is None:
|
||||
logging.info('Assigning existing IP {ip} to {interface}'.format(
|
||||
ip=ip, interface=interface))
|
||||
elif netbox_ip.interface.id != interface.id:
|
||||
logging.info(
|
||||
'Detected interface change for ip {ip}: old interface is '
|
||||
'{old_interface} (id: {old_id}), new interface is {new_interface} '
|
||||
' (id: {new_id})'
|
||||
.format(
|
||||
old_interface=netbox_ip.interface, new_interface=interface,
|
||||
old_id=netbox_ip.id, new_id=interface.id, ip=netbox_ip.address
|
||||
))
|
||||
else:
|
||||
return netbox_ip
|
||||
query_params = {
|
||||
"address": ip,
|
||||
"status": "active",
|
||||
"assigned_object_type": self.assigned_object_type,
|
||||
"assigned_object_id": interface.id,
|
||||
}
|
||||
|
||||
netbox_ip = nb.ipam.ip_addresses.create(**query_params)
|
||||
return netbox_ip
|
||||
|
||||
netbox_ip = list(netbox_ips)[0]
|
||||
# If IP exists in anycast
|
||||
if netbox_ip.role and netbox_ip.role.label == "Anycast":
|
||||
logging.debug("IP {} is Anycast..".format(ip))
|
||||
unassigned_anycast_ip = [x for x in netbox_ips if x.interface is None]
|
||||
assigned_anycast_ip = [
|
||||
x for x in netbox_ips if x.interface and x.interface.id == interface.id
|
||||
]
|
||||
# use the first available anycast ip
|
||||
if len(unassigned_anycast_ip):
|
||||
logging.info("Assigning existing Anycast IP {} to interface".format(ip))
|
||||
netbox_ip = unassigned_anycast_ip[0]
|
||||
netbox_ip.interface = interface
|
||||
netbox_ip.save()
|
||||
return netbox_ip
|
||||
# or if everything is assigned to other servers
|
||||
elif not len(assigned_anycast_ip):
|
||||
logging.info(
|
||||
"Creating Anycast IP {} and assigning it to interface".format(ip)
|
||||
)
|
||||
query_params = {
|
||||
"address": ip,
|
||||
"status": "active",
|
||||
"role": self.ipam_choices["ip-address:role"]["Anycast"],
|
||||
"tenant": self.tenant.id if self.tenant else None,
|
||||
"assigned_object_type": self.assigned_object_type,
|
||||
"assigned_object_id": interface.id,
|
||||
}
|
||||
netbox_ip = nb.ipam.ip_addresses.create(**query_params)
|
||||
return netbox_ip
|
||||
else:
|
||||
ip_interface = getattr(netbox_ip, "interface", None)
|
||||
assigned_object = getattr(netbox_ip, "assigned_object", None)
|
||||
if not ip_interface or not assigned_object:
|
||||
logging.info(
|
||||
"Assigning existing IP {ip} to {interface}".format(
|
||||
ip=ip, interface=interface
|
||||
)
|
||||
)
|
||||
elif (ip_interface and ip_interface.id != interface.id) or (
|
||||
assigned_object and assigned_object.id != interface.id
|
||||
):
|
||||
|
||||
old_interface = getattr(netbox_ip, "assigned_object", "n/a")
|
||||
logging.info(
|
||||
"Detected interface change for ip {ip}: old interface is "
|
||||
"{old_interface} (id: {old_id}), new interface is {new_interface} "
|
||||
" (id: {new_id})".format(
|
||||
old_interface=old_interface,
|
||||
new_interface=interface,
|
||||
old_id=netbox_ip.id,
|
||||
new_id=interface.id,
|
||||
ip=netbox_ip.address,
|
||||
)
|
||||
)
|
||||
else:
|
||||
return netbox_ip
|
||||
|
||||
netbox_ip.assigned_object_type = self.assigned_object_type
|
||||
netbox_ip.assigned_object_id = interface.id
|
||||
netbox_ip.save()
|
||||
|
||||
def create_or_update_netbox_network_cards(self):
|
||||
if config.update_all is None or config.update_network is None:
|
||||
return None
|
||||
logging.debug('Creating/Updating NIC...')
|
||||
logging.debug("Creating/Updating NIC...")
|
||||
|
||||
# delete unknown interface
|
||||
nb_nics = self.get_netbox_network_cards()
|
||||
local_nics = [x['name'] for x in self.nics]
|
||||
for nic in nb_nics[:]:
|
||||
nb_nics = list(self.get_netbox_network_cards())
|
||||
local_nics = [x["name"] for x in self.nics]
|
||||
for nic in nb_nics:
|
||||
if nic.name not in local_nics:
|
||||
logging.info('Deleting netbox interface {name} because not present locally'.format(
|
||||
name=nic.name
|
||||
))
|
||||
logging.info(
|
||||
"Deleting netbox interface {name} because not present locally".format(
|
||||
name=nic.name
|
||||
)
|
||||
)
|
||||
nb_nics.remove(nic)
|
||||
nic.delete()
|
||||
|
||||
# delete IP on netbox that are not known on this server
|
||||
if len(nb_nics):
|
||||
netbox_ips = nb.ipam.ip_addresses.filter(
|
||||
interface_id=[x.id for x in nb_nics],
|
||||
|
||||
def batched(it, n):
|
||||
while batch := tuple(islice(it, n)):
|
||||
yield batch
|
||||
|
||||
netbox_ips = []
|
||||
for ids in batched((x.id for x in nb_nics), 25):
|
||||
netbox_ips += list(nb.ipam.ip_addresses.filter(**{self.intf_type: ids}))
|
||||
|
||||
all_local_ips = list(
|
||||
chain.from_iterable([x["ip"] for x in self.nics if x["ip"] is not None])
|
||||
)
|
||||
all_local_ips = list(chain.from_iterable([
|
||||
x['ip'] for x in self.nics if x['ip'] is not None
|
||||
]))
|
||||
for netbox_ip in netbox_ips:
|
||||
if netbox_ip.address not in all_local_ips:
|
||||
logging.info('Unassigning IP {ip} from {interface}'.format(
|
||||
ip=netbox_ip.address, interface=netbox_ip.interface))
|
||||
netbox_ip.interface = None
|
||||
logging.info(
|
||||
"Unassigning IP {ip} from {interface}".format(
|
||||
ip=netbox_ip.address, interface=netbox_ip.assigned_object
|
||||
)
|
||||
)
|
||||
netbox_ip.assigned_object_type = None
|
||||
netbox_ip.ssigned_object_id = None
|
||||
netbox_ip.save()
|
||||
|
||||
# update each nic
|
||||
for nic in self.nics:
|
||||
interface = self.get_netbox_network_card(nic)
|
||||
if not interface:
|
||||
logging.info('Interface {mac_address} not found, creating..'.format(
|
||||
mac_address=nic['mac'])
|
||||
logging.info(
|
||||
"Interface {mac_address} not found, creating..".format(
|
||||
mac_address=nic["mac"]
|
||||
)
|
||||
)
|
||||
interface = self.create_netbox_nic(nic)
|
||||
|
||||
nic_update = 0
|
||||
if nic['name'] != interface.name:
|
||||
logging.info('Updating interface {interface} name to: {name}'.format(
|
||||
interface=interface, name=nic['name']))
|
||||
interface.name = nic['name']
|
||||
if nic["name"] != interface.name:
|
||||
logging.info(
|
||||
"Updating interface {interface} name to: {name}".format(
|
||||
interface=interface, name=nic["name"]
|
||||
)
|
||||
)
|
||||
interface.name = nic["name"]
|
||||
nic_update += 1
|
||||
|
||||
ret, interface = self.reset_vlan_on_interface(nic, interface)
|
||||
nic_update += ret
|
||||
|
||||
_type = self.get_netbox_type_for_nic(nic)
|
||||
if not interface.type or \
|
||||
_type != interface.type.value:
|
||||
logging.info('Interface type is wrong, resetting')
|
||||
interface.type = _type
|
||||
nic_update += 1
|
||||
if hasattr(interface, "mtu"):
|
||||
if nic["mtu"] != interface.mtu:
|
||||
logging.info(
|
||||
"Interface mtu is wrong, updating to: {mtu}".format(
|
||||
mtu=nic["mtu"]
|
||||
)
|
||||
)
|
||||
interface.mtu = nic["mtu"]
|
||||
nic_update += 1
|
||||
|
||||
if hasattr(interface, 'lag') and interface.lag is not None:
|
||||
if hasattr(interface, "type"):
|
||||
_type = self.get_netbox_type_for_nic(nic)
|
||||
if not interface.type or _type != interface.type.value:
|
||||
logging.info("Interface type is wrong, resetting")
|
||||
interface.type = _type
|
||||
nic_update += 1
|
||||
|
||||
if hasattr(interface, "lag") and interface.lag is not None:
|
||||
local_lag_int = next(
|
||||
item for item in self.nics if item['name'] == interface.lag.name
|
||||
item for item in self.nics if item["name"] == interface.lag.name
|
||||
)
|
||||
if nic['name'] not in local_lag_int['bonding_slaves']:
|
||||
logging.info('Interface has no LAG, resetting')
|
||||
if nic["name"] not in local_lag_int["bonding_slaves"]:
|
||||
logging.info("Interface has no LAG, resetting")
|
||||
nic_update += 1
|
||||
interface.lag = None
|
||||
|
||||
# cable the interface
|
||||
if config.network.lldp:
|
||||
if config.network.lldp and isinstance(self, ServerNetwork):
|
||||
switch_ip = self.lldp.get_switch_ip(interface.name)
|
||||
switch_interface = self.lldp.get_switch_port(interface.name)
|
||||
if switch_ip and switch_interface:
|
||||
|
@ -438,56 +540,66 @@ class Network(object):
|
|||
)
|
||||
nic_update += ret
|
||||
|
||||
if nic['ip']:
|
||||
if nic["ip"]:
|
||||
# sync local IPs
|
||||
for ip in nic['ip']:
|
||||
for ip in nic["ip"]:
|
||||
self.create_or_update_netbox_ip_on_interface(ip, interface)
|
||||
if nic_update > 0:
|
||||
interface.save()
|
||||
|
||||
self._set_bonding_interfaces()
|
||||
logging.debug('Finished updating NIC!')
|
||||
logging.debug("Finished updating NIC!")
|
||||
|
||||
|
||||
class ServerNetwork(Network):
|
||||
def __init__(self, server, *args, **kwargs):
|
||||
super(ServerNetwork, self).__init__(server, args, kwargs)
|
||||
self.ipmi = self.get_ipmi()
|
||||
|
||||
if config.network.ipmi:
|
||||
self.ipmi = self.get_ipmi()
|
||||
if self.ipmi:
|
||||
self.nics.append(self.ipmi)
|
||||
|
||||
self.server = server
|
||||
self.device = self.server.get_netbox_server()
|
||||
self.nb_net = nb.dcim
|
||||
self.custom_arg = {'device': getattr(self.device, "id", None)}
|
||||
self.custom_arg_id = {'device_id': getattr(self.device, "id", None)}
|
||||
self.custom_arg = {"device": getattr(self.device, "id", None)}
|
||||
self.custom_arg_id = {"device_id": getattr(self.device, "id", None)}
|
||||
self.intf_type = "interface_id"
|
||||
self.assigned_object_type = "dcim.interface"
|
||||
|
||||
def get_network_type(self):
|
||||
return 'server'
|
||||
return "server"
|
||||
|
||||
def get_ipmi(self):
|
||||
ipmi = IPMI().parse()
|
||||
return ipmi
|
||||
|
||||
def connect_interface_to_switch(self, switch_ip, switch_interface, nb_server_interface):
|
||||
logging.info('Interface {} is not connected to switch, trying to connect..'.format(
|
||||
nb_server_interface.name
|
||||
))
|
||||
def connect_interface_to_switch(
|
||||
self, switch_ip, switch_interface, nb_server_interface
|
||||
):
|
||||
logging.info(
|
||||
"Interface {} is not connected to switch, trying to connect..".format(
|
||||
nb_server_interface.name
|
||||
)
|
||||
)
|
||||
nb_mgmt_ip = nb.ipam.ip_addresses.get(
|
||||
address=switch_ip,
|
||||
)
|
||||
if not nb_mgmt_ip:
|
||||
logging.error('Switch IP {} cannot be found in Netbox'.format(switch_ip))
|
||||
logging.error("Switch IP {} cannot be found in Netbox".format(switch_ip))
|
||||
return nb_server_interface
|
||||
|
||||
try:
|
||||
nb_switch = nb_mgmt_ip.interface.device
|
||||
logging.info('Found a switch in Netbox based on LLDP infos: {} (id: {})'.format(
|
||||
switch_ip,
|
||||
nb_switch.id
|
||||
))
|
||||
nb_switch = nb_mgmt_ip.assigned_object.device
|
||||
logging.info(
|
||||
"Found a switch in Netbox based on LLDP infos: {} (id: {})".format(
|
||||
switch_ip, nb_switch.id
|
||||
)
|
||||
)
|
||||
except KeyError:
|
||||
logging.error(
|
||||
'Switch IP {} is found but not associated to a Netbox Switch Device'.format(
|
||||
"Switch IP {} is found but not associated to a Netbox Switch Device".format(
|
||||
switch_ip
|
||||
)
|
||||
)
|
||||
|
@ -495,26 +607,32 @@ class ServerNetwork(Network):
|
|||
|
||||
switch_interface = self.lldp.get_switch_port(nb_server_interface.name)
|
||||
nb_switch_interface = nb.dcim.interfaces.get(
|
||||
device=nb_switch,
|
||||
device_id=nb_switch.id,
|
||||
name=switch_interface,
|
||||
)
|
||||
if nb_switch_interface is None:
|
||||
logging.error('Switch interface {} cannot be found'.format(switch_interface))
|
||||
logging.error(
|
||||
"Switch interface {} cannot be found".format(switch_interface)
|
||||
)
|
||||
return nb_server_interface
|
||||
|
||||
logging.info('Found interface {} on switch {}'.format(
|
||||
switch_interface,
|
||||
switch_ip,
|
||||
))
|
||||
logging.info(
|
||||
"Found interface {} on switch {}".format(
|
||||
switch_interface,
|
||||
switch_ip,
|
||||
)
|
||||
)
|
||||
cable = nb.dcim.cables.create(
|
||||
termination_a_id=nb_server_interface.id,
|
||||
termination_a_type="dcim.interface",
|
||||
termination_b_id=nb_switch_interface.id,
|
||||
termination_b_type="dcim.interface",
|
||||
a_terminations=[
|
||||
{"object_type": "dcim.interface", "object_id": nb_server_interface.id},
|
||||
],
|
||||
b_terminations=[
|
||||
{"object_type": "dcim.interface", "object_id": nb_switch_interface.id},
|
||||
],
|
||||
)
|
||||
nb_server_interface.cable = cable
|
||||
logging.info(
|
||||
'Connected interface {interface} with {switch_interface} of {switch_ip}'.format(
|
||||
"Connected interface {interface} with {switch_interface} of {switch_ip}".format(
|
||||
interface=nb_server_interface.name,
|
||||
switch_interface=switch_interface,
|
||||
switch_ip=switch_ip,
|
||||
|
@ -530,40 +648,32 @@ class ServerNetwork(Network):
|
|||
switch_ip, switch_interface, nb_server_interface
|
||||
)
|
||||
else:
|
||||
nb_sw_int = nb_server_interface.cable.termination_b
|
||||
nb_sw_int = nb_server_interface.cable.b_terminations[0]
|
||||
nb_sw = nb_sw_int.device
|
||||
nb_mgmt_int = nb.dcim.interfaces.get(
|
||||
device_id=nb_sw.id,
|
||||
mgmt_only=True
|
||||
)
|
||||
nb_mgmt_ip = nb.ipam.ip_addresses.get(
|
||||
interface_id=nb_mgmt_int.id
|
||||
)
|
||||
nb_mgmt_int = nb.dcim.interfaces.get(device_id=nb_sw.id, mgmt_only=True)
|
||||
nb_mgmt_ip = nb.ipam.ip_addresses.get(interface_id=nb_mgmt_int.id)
|
||||
if nb_mgmt_ip is None:
|
||||
logging.error(
|
||||
'Switch {switch_ip} does not have IP on its management interface'.format(
|
||||
"Switch {switch_ip} does not have IP on its management interface".format(
|
||||
switch_ip=switch_ip,
|
||||
)
|
||||
)
|
||||
return update, nb_server_interface
|
||||
|
||||
# Netbox IP is always IP/Netmask
|
||||
nb_mgmt_ip = nb_mgmt_ip.address.split('/')[0]
|
||||
if nb_mgmt_ip != switch_ip or \
|
||||
nb_sw_int.name != switch_interface:
|
||||
logging.info('Netbox cable is not connected to correct ports, fixing..')
|
||||
nb_mgmt_ip = nb_mgmt_ip.address.split("/")[0]
|
||||
if nb_mgmt_ip != switch_ip or nb_sw_int.name != switch_interface:
|
||||
logging.info("Netbox cable is not connected to correct ports, fixing..")
|
||||
logging.info(
|
||||
'Deleting cable {cable_id} from {interface} to {switch_interface} of '
|
||||
'{switch_ip}'.format(
|
||||
"Deleting cable {cable_id} from {interface} to {switch_interface} of "
|
||||
"{switch_ip}".format(
|
||||
cable_id=nb_server_interface.cable.id,
|
||||
interface=nb_server_interface.name,
|
||||
switch_interface=nb_sw_int.name,
|
||||
switch_ip=nb_mgmt_ip,
|
||||
)
|
||||
)
|
||||
cable = nb.dcim.cables.get(
|
||||
nb_server_interface.cable.id
|
||||
)
|
||||
cable = nb.dcim.cables.get(nb_server_interface.cable.id)
|
||||
cable.delete()
|
||||
update = True
|
||||
nb_server_interface = self.connect_interface_to_switch(
|
||||
|
@ -578,15 +688,17 @@ class VirtualNetwork(Network):
|
|||
self.server = server
|
||||
self.device = self.server.get_netbox_vm()
|
||||
self.nb_net = nb.virtualization
|
||||
self.custom_arg = {'virtual_machine': getattr(self.device, "id", None)}
|
||||
self.custom_arg_id = {'virtual_machine_id': getattr(self.device, "id", None)}
|
||||
self.custom_arg = {"virtual_machine": getattr(self.device, "id", None)}
|
||||
self.custom_arg_id = {"virtual_machine_id": getattr(self.device, "id", None)}
|
||||
self.intf_type = "vminterface_id"
|
||||
self.assigned_object_type = "virtualization.vminterface"
|
||||
|
||||
dcim_c = nb.virtualization.interfaces.choices()
|
||||
for _choice_type in dcim_c:
|
||||
key = 'interface:{}'.format(_choice_type)
|
||||
key = "interface:{}".format(_choice_type)
|
||||
self.dcim_choices[key] = {}
|
||||
for choice in dcim_c[_choice_type]:
|
||||
self.dcim_choices[key][choice['display_name']] = choice['value']
|
||||
self.dcim_choices[key][choice["display_name"]] = choice["value"]
|
||||
|
||||
def get_network_type(self):
|
||||
return 'virtual'
|
||||
return "virtual"
|
||||
|
|
|
@ -6,61 +6,63 @@ from netbox_agent.config import netbox_instance as nb
|
|||
PSU_DMI_TYPE = 39
|
||||
|
||||
|
||||
class PowerSupply():
|
||||
class PowerSupply:
|
||||
def __init__(self, server=None):
|
||||
self.server = server
|
||||
self.netbox_server = self.server.get_netbox_server()
|
||||
if self.server.is_blade():
|
||||
self.device_id = self.netbox_server.parent_device.id if self.netbox_server else None
|
||||
self.device_id = (
|
||||
self.netbox_server.parent_device.id if self.netbox_server else None
|
||||
)
|
||||
else:
|
||||
self.device_id = self.netbox_server.id if self.netbox_server else None
|
||||
|
||||
def get_power_supply(self):
|
||||
power_supply = []
|
||||
for psu in dmidecode.get_by_type(self.server.dmi, PSU_DMI_TYPE):
|
||||
if 'Present' not in psu['Status'] or psu['Status'] == 'Not Present':
|
||||
if "Present" not in psu["Status"] or psu["Status"] == "Not Present":
|
||||
continue
|
||||
|
||||
try:
|
||||
max_power = int(psu.get('Max Power Capacity').split()[0])
|
||||
max_power = int(psu.get("Max Power Capacity").split()[0])
|
||||
except ValueError:
|
||||
max_power = None
|
||||
desc = '{} - {}'.format(
|
||||
psu.get('Manufacturer', 'No Manufacturer').strip(),
|
||||
psu.get('Name', 'No name').strip(),
|
||||
desc = "{} - {}".format(
|
||||
psu.get("Manufacturer", "No Manufacturer").strip(),
|
||||
psu.get("Name", "No name").strip(),
|
||||
)
|
||||
|
||||
sn = psu.get('Serial Number', '').strip()
|
||||
sn = psu.get("Serial Number", "").strip()
|
||||
# Let's assume that if no serial and no power reported we skip it
|
||||
if sn == '' and max_power is None:
|
||||
if sn == "" and max_power is None:
|
||||
continue
|
||||
if sn == '':
|
||||
sn = 'N/A'
|
||||
power_supply.append({
|
||||
'name': sn,
|
||||
'description': desc,
|
||||
'allocated_draw': None,
|
||||
'maximum_draw': max_power,
|
||||
'device': self.device_id,
|
||||
})
|
||||
if sn == "":
|
||||
sn = "N/A"
|
||||
power_supply.append(
|
||||
{
|
||||
"name": sn,
|
||||
"description": desc,
|
||||
"allocated_draw": None,
|
||||
"maximum_draw": max_power,
|
||||
"device": self.device_id,
|
||||
}
|
||||
)
|
||||
return power_supply
|
||||
|
||||
def get_netbox_power_supply(self):
|
||||
return nb.dcim.power_ports.filter(
|
||||
device_id=self.device_id
|
||||
)
|
||||
return nb.dcim.power_ports.filter(device_id=self.device_id)
|
||||
|
||||
def create_or_update_power_supply(self):
|
||||
nb_psus = self.get_netbox_power_supply()
|
||||
nb_psus = list(self.get_netbox_power_supply())
|
||||
psus = self.get_power_supply()
|
||||
|
||||
# Delete unknown PSU
|
||||
delete = False
|
||||
for nb_psu in nb_psus:
|
||||
if nb_psu.name not in [x['name'] for x in psus]:
|
||||
logging.info('Deleting unknown locally PSU {name}'.format(
|
||||
name=nb_psu.name
|
||||
))
|
||||
if nb_psu.name not in [x["name"] for x in psus]:
|
||||
logging.info(
|
||||
"Deleting unknown locally PSU {name}".format(name=nb_psu.name)
|
||||
)
|
||||
nb_psu.delete()
|
||||
delete = True
|
||||
|
||||
|
@ -69,27 +71,23 @@ class PowerSupply():
|
|||
|
||||
# sync existing Netbox PSU with local infos
|
||||
for nb_psu in nb_psus:
|
||||
local_psu = next(
|
||||
item for item in psus if item['name'] == nb_psu.name
|
||||
)
|
||||
local_psu = next(item for item in psus if item["name"] == nb_psu.name)
|
||||
update = False
|
||||
if nb_psu.description != local_psu['description']:
|
||||
if nb_psu.description != local_psu["description"]:
|
||||
update = True
|
||||
nb_psu.description = local_psu['description']
|
||||
if nb_psu.maximum_draw != local_psu['maximum_draw']:
|
||||
nb_psu.description = local_psu["description"]
|
||||
if nb_psu.maximum_draw != local_psu["maximum_draw"]:
|
||||
update = True
|
||||
nb_psu.maximum_draw = local_psu['maximum_draw']
|
||||
nb_psu.maximum_draw = local_psu["maximum_draw"]
|
||||
if update:
|
||||
nb_psu.save()
|
||||
|
||||
for psu in psus:
|
||||
if psu['name'] not in [x.name for x in nb_psus]:
|
||||
logging.info('Creating PSU {name} ({description}), {maximum_draw}W'.format(
|
||||
**psu
|
||||
))
|
||||
nb_psu = nb.dcim.power_ports.create(
|
||||
**psu
|
||||
if psu["name"] not in [x.name for x in nb_psus]:
|
||||
logging.info(
|
||||
"Creating PSU {name} ({description}), {maximum_draw}W".format(**psu)
|
||||
)
|
||||
nb_psu = nb.dcim.power_ports.create(**psu)
|
||||
|
||||
return True
|
||||
|
||||
|
@ -97,7 +95,7 @@ class PowerSupply():
|
|||
try:
|
||||
psu_cons = self.server.get_power_consumption()
|
||||
except NotImplementedError:
|
||||
logging.error('Cannot report power consumption for this vendor')
|
||||
logging.error("Cannot report power consumption for this vendor")
|
||||
return False
|
||||
nb_psus = self.get_netbox_power_supply()
|
||||
|
||||
|
@ -105,22 +103,27 @@ class PowerSupply():
|
|||
return False
|
||||
|
||||
# find power feeds for rack or dc
|
||||
voltage = None
|
||||
pwr_feeds = None
|
||||
if self.netbox_server.rack:
|
||||
pwr_feeds = nb.dcim.power_feeds.filter(
|
||||
rack=self.netbox_server.rack.id
|
||||
)
|
||||
if pwr_feeds is None or not len(pwr_feeds):
|
||||
logging.info('Could not find power feeds for Rack, defaulting value to 230')
|
||||
voltage = 230
|
||||
pwr_feeds = nb.dcim.power_feeds.filter(rack=self.netbox_server.rack.id)
|
||||
|
||||
if pwr_feeds:
|
||||
voltage = [p["voltage"] for p in pwr_feeds]
|
||||
else:
|
||||
logging.info("Could not find power feeds for Rack, defaulting value to 230")
|
||||
voltage = [230 for _ in nb_psus]
|
||||
|
||||
for i, nb_psu in enumerate(nb_psus):
|
||||
nb_psu.allocated_draw = float(psu_cons[i]) * voltage
|
||||
nb_psu.allocated_draw = int(float(psu_cons[i]) * voltage[i])
|
||||
if nb_psu.allocated_draw < 1:
|
||||
logging.info("PSU is not connected or in standby mode")
|
||||
continue
|
||||
nb_psu.save()
|
||||
logging.info('Updated power consumption for PSU {}: {}W'.format(
|
||||
nb_psu.name,
|
||||
nb_psu.allocated_draw,
|
||||
))
|
||||
logging.info(
|
||||
"Updated power consumption for PSU {}: {}W".format(
|
||||
nb_psu.name,
|
||||
nb_psu.allocated_draw,
|
||||
)
|
||||
)
|
||||
|
||||
return True
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
class RaidController():
|
||||
class RaidController:
|
||||
|
||||
def get_product_name(self):
|
||||
raise NotImplementedError
|
||||
|
@ -15,7 +15,10 @@ class RaidController():
|
|||
def get_physical_disks(self):
|
||||
raise NotImplementedError
|
||||
|
||||
def is_external(self):
|
||||
return False
|
||||
|
||||
class Raid():
|
||||
|
||||
class Raid:
|
||||
def get_controllers(self):
|
||||
raise NotImplementedError
|
||||
|
|
|
@ -1,173 +1,240 @@
|
|||
import logging
|
||||
import re
|
||||
import subprocess
|
||||
|
||||
from netbox_agent.misc import get_vendor
|
||||
from netbox_agent.raid.base import Raid, RaidController
|
||||
|
||||
REGEXP_CONTROLLER_HP = re.compile(r'Smart Array ([a-zA-Z0-9- ]+) in Slot ([0-9]+)')
|
||||
REGEXP_CONTROLLER_HP = re.compile(r"Smart Array ([a-zA-Z0-9- ]+) in Slot ([0-9]+)")
|
||||
|
||||
|
||||
def _get_indentation(string):
|
||||
"""Return the number of spaces before the current line."""
|
||||
return len(string) - len(string.lstrip(' '))
|
||||
class HPRaidControllerError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def _get_key_value(string):
|
||||
"""Return the (key, value) as a tuple from a string."""
|
||||
# Normally all properties look like this:
|
||||
# Unique Identifier: 600508B1001CE4ACF473EE9C826230FF
|
||||
# Disk Name: /dev/sda
|
||||
# Mount Points: None
|
||||
key = ''
|
||||
value = ''
|
||||
try:
|
||||
key, value = string.split(':')
|
||||
except ValueError:
|
||||
# This handles the case when the property of a logical drive
|
||||
# returned is as follows. Here we cannot split by ':' because
|
||||
# the disk id has colon in it. So if this is about disk,
|
||||
# then strip it accordingly.
|
||||
# Mirror Group 0: physicaldrive 6I:1:5
|
||||
string = string.lstrip(' ')
|
||||
if string.startswith('physicaldrive'):
|
||||
fields = string.split(' ')
|
||||
key = fields[0]
|
||||
value = fields[1]
|
||||
else:
|
||||
# TODO(rameshg87): Check if this ever occurs.
|
||||
return None, None
|
||||
def ssacli(sub_command):
|
||||
command = ["ssacli"]
|
||||
command.extend(sub_command.split())
|
||||
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
|
||||
stdout, stderr = p.communicate()
|
||||
stdout = stdout.decode("utf-8")
|
||||
if p.returncode != 0:
|
||||
mesg = "Failed to execute command '{}':\n{}".format(" ".join(command), stdout)
|
||||
raise HPRaidControllerError(mesg)
|
||||
|
||||
return key.lstrip(' ').rstrip(' '), value.lstrip(' ').rstrip(' ')
|
||||
if "does not have any physical" in stdout:
|
||||
return list()
|
||||
else:
|
||||
lines = stdout.split("\n")
|
||||
lines = list(filter(None, lines))
|
||||
return lines
|
||||
|
||||
|
||||
def _get_dict(lines, start_index, indentation):
|
||||
"""Recursive function for parsing hpssacli/ssacli output."""
|
||||
def _test_if_valid_line(line):
|
||||
ignore_patterns = [
|
||||
"Note:",
|
||||
"Error:",
|
||||
"is not loaded",
|
||||
"README",
|
||||
" failure",
|
||||
" cache",
|
||||
]
|
||||
for pattern in ignore_patterns:
|
||||
if not line or pattern in line:
|
||||
return None
|
||||
return line
|
||||
|
||||
info = {}
|
||||
current_item = None
|
||||
|
||||
i = start_index
|
||||
while i < len(lines):
|
||||
current_line = lines[i]
|
||||
if current_line.startswith('Note:'):
|
||||
i = i + 1
|
||||
def _parse_ctrl_output(lines):
|
||||
controllers = {}
|
||||
current_ctrl = None
|
||||
|
||||
for line in lines:
|
||||
line = line.strip()
|
||||
line = _test_if_valid_line(line)
|
||||
if line is None:
|
||||
continue
|
||||
ctrl = REGEXP_CONTROLLER_HP.search(line)
|
||||
if ctrl is not None:
|
||||
slot = ctrl.group(2)
|
||||
current_ctrl = "{} - Slot {}".format(ctrl.group(1), slot)
|
||||
controllers[current_ctrl] = {"Slot": slot}
|
||||
if "Embedded" not in line:
|
||||
controllers[current_ctrl]["External"] = True
|
||||
continue
|
||||
if ": " not in line:
|
||||
continue
|
||||
|
||||
current_line_indentation = _get_indentation(current_line)
|
||||
# This check ignore some useless information that make
|
||||
# crash the parsing
|
||||
product_name = REGEXP_CONTROLLER_HP.search(current_line)
|
||||
if current_line_indentation == 0 and not product_name:
|
||||
i = i + 1
|
||||
attr, val = line.split(": ", 1)
|
||||
attr = attr.strip()
|
||||
val = val.strip()
|
||||
controllers[current_ctrl][attr] = val
|
||||
return controllers
|
||||
|
||||
|
||||
def _parse_pd_output(lines):
|
||||
drives = {}
|
||||
current_array = None
|
||||
current_drv = None
|
||||
|
||||
for line in lines:
|
||||
line = line.strip()
|
||||
line = _test_if_valid_line(line)
|
||||
if line is None:
|
||||
continue
|
||||
|
||||
if current_line_indentation == indentation:
|
||||
current_item = current_line.lstrip(' ')
|
||||
|
||||
info[current_item] = {}
|
||||
i = i + 1
|
||||
# Parses the Array the drives are in
|
||||
if line.startswith("Array"):
|
||||
current_array = line.split(None, 1)[1]
|
||||
# Detects new physical drive
|
||||
if line.startswith("physicaldrive"):
|
||||
current_drv = line.split(None, 1)[1]
|
||||
drives[current_drv] = {}
|
||||
if current_array is not None:
|
||||
drives[current_drv]["Array"] = current_array
|
||||
continue
|
||||
if ": " not in line:
|
||||
continue
|
||||
attr, val = line.split(": ", 1)
|
||||
attr = attr.strip()
|
||||
val = val.strip()
|
||||
drives.setdefault(current_drv, {})[attr] = val
|
||||
return drives
|
||||
|
||||
if i >= len(lines) - 1:
|
||||
key, value = _get_key_value(current_line)
|
||||
# If this is some unparsable information, then
|
||||
# just skip it.
|
||||
if key:
|
||||
info[current_item][key] = value
|
||||
return info, i
|
||||
|
||||
next_line = lines[i + 1]
|
||||
next_line_indentation = _get_indentation(next_line)
|
||||
def _parse_ld_output(lines):
|
||||
drives = {}
|
||||
current_array = None
|
||||
current_drv = None
|
||||
|
||||
if current_line_indentation == next_line_indentation:
|
||||
key, value = _get_key_value(current_line)
|
||||
if key:
|
||||
info[current_item][key] = value
|
||||
i = i + 1
|
||||
elif next_line_indentation > current_line_indentation:
|
||||
ret_dict, j = _get_dict(lines, i, current_line_indentation)
|
||||
info[current_item].update(ret_dict)
|
||||
i = j + 1
|
||||
elif next_line_indentation < current_line_indentation:
|
||||
key, value = _get_key_value(current_line)
|
||||
if key:
|
||||
info[current_item][key] = value
|
||||
return info, i
|
||||
|
||||
return info, i
|
||||
for line in lines:
|
||||
line = line.strip()
|
||||
line = _test_if_valid_line(line)
|
||||
if line is None:
|
||||
continue
|
||||
# Parses the Array the drives are in
|
||||
if line.startswith("Array"):
|
||||
current_array = line.split(None, 1)[1]
|
||||
drives[current_array] = {}
|
||||
# Detects new physical drive
|
||||
if line.startswith("Logical Drive"):
|
||||
current_drv = line.split(": ", 1)[1]
|
||||
drives.setdefault(current_array, {})["LogicalDrive"] = current_drv
|
||||
continue
|
||||
if ": " not in line:
|
||||
continue
|
||||
attr, val = line.split(": ", 1)
|
||||
drives.setdefault(current_array, {})[attr] = val
|
||||
return drives
|
||||
|
||||
|
||||
class HPRaidController(RaidController):
|
||||
def __init__(self, controller_name, data):
|
||||
self.controller_name = controller_name
|
||||
self.data = data
|
||||
self.pdrives = self._get_physical_disks()
|
||||
arrays = [d["Array"] for d in self.pdrives.values() if d.get("Array")]
|
||||
if arrays:
|
||||
self.ldrives = self._get_logical_drives()
|
||||
self._get_virtual_drives_map()
|
||||
|
||||
def get_product_name(self):
|
||||
return self.controller_name
|
||||
|
||||
def get_manufacturer(self):
|
||||
return 'HP'
|
||||
return "HP"
|
||||
|
||||
def get_serial_number(self):
|
||||
return self.data['Serial Number']
|
||||
return self.data["Serial Number"]
|
||||
|
||||
def get_firmware_version(self):
|
||||
return self.data['Firmware Version']
|
||||
return self.data["Firmware Version"]
|
||||
|
||||
def is_external(self):
|
||||
return self.data.get("External", False)
|
||||
|
||||
def _get_physical_disks(self):
|
||||
lines = ssacli("ctrl slot={} pd all show detail".format(self.data["Slot"]))
|
||||
pdrives = _parse_pd_output(lines)
|
||||
ret = {}
|
||||
|
||||
for name, attrs in pdrives.items():
|
||||
array = attrs.get("Array", "")
|
||||
model = attrs.get("Model", "").strip()
|
||||
vendor = None
|
||||
if model.startswith("HP"):
|
||||
vendor = "HP"
|
||||
elif len(model.split()) > 1:
|
||||
vendor = get_vendor(model.split()[1])
|
||||
else:
|
||||
vendor = get_vendor(model)
|
||||
|
||||
ret[name] = {
|
||||
"Array": array,
|
||||
"Model": model,
|
||||
"Vendor": vendor,
|
||||
"SN": attrs.get("Serial Number", "").strip(),
|
||||
"Size": attrs.get("Size", "").strip(),
|
||||
"Type": (
|
||||
"SSD"
|
||||
if attrs.get("Interface Type") == "Solid State SATA"
|
||||
else "HDD"
|
||||
),
|
||||
"_src": self.__class__.__name__,
|
||||
"custom_fields": {
|
||||
"pd_identifier": name,
|
||||
"mount_point": attrs.get("Mount Points", "").strip(),
|
||||
"vd_device": attrs.get("Disk Name", "").strip(),
|
||||
"vd_size": attrs.get("Size", "").strip(),
|
||||
},
|
||||
}
|
||||
return ret
|
||||
|
||||
def _get_logical_drives(self):
|
||||
lines = ssacli("ctrl slot={} ld all show detail".format(self.data["Slot"]))
|
||||
ldrives = _parse_ld_output(lines)
|
||||
ret = {}
|
||||
|
||||
for array, attrs in ldrives.items():
|
||||
ret[array] = {
|
||||
"vd_array": array,
|
||||
"vd_size": attrs.get("Size", "").strip(),
|
||||
"vd_consistency": attrs.get("Status", "").strip(),
|
||||
"vd_raid_type": "RAID {}".format(
|
||||
attrs.get("Fault Tolerance", "N/A").strip()
|
||||
),
|
||||
"vd_device": attrs.get("LogicalDrive", "").strip(),
|
||||
"mount_point": attrs.get("Mount Points", "").strip(),
|
||||
}
|
||||
return ret
|
||||
|
||||
def _get_virtual_drives_map(self):
|
||||
for name, attrs in self.pdrives.items():
|
||||
array = attrs["Array"]
|
||||
ld = self.ldrives.get(array)
|
||||
if ld is None:
|
||||
logging.error(
|
||||
"Failed to find array information for physical drive {}."
|
||||
" Ignoring.".format(name)
|
||||
)
|
||||
continue
|
||||
attrs["custom_fields"].update(ld)
|
||||
|
||||
def get_physical_disks(self):
|
||||
ret = []
|
||||
output = subprocess.getoutput(
|
||||
'ssacli ctrl slot={slot} pd all show detail'.format(slot=self.data['Slot'])
|
||||
)
|
||||
lines = output.split('\n')
|
||||
lines = list(filter(None, lines))
|
||||
j = -1
|
||||
while j < len(lines):
|
||||
info_dict, j = _get_dict(lines, j + 1, 0)
|
||||
|
||||
key = next(iter(info_dict))
|
||||
for array, physical_disk in info_dict[key].items():
|
||||
for _, pd_attr in physical_disk.items():
|
||||
model = pd_attr.get('Model', '').strip()
|
||||
vendor = None
|
||||
if model.startswith('HP'):
|
||||
vendor = 'HP'
|
||||
elif len(model.split()) > 1:
|
||||
vendor = get_vendor(model.split()[1])
|
||||
else:
|
||||
vendor = get_vendor(model)
|
||||
|
||||
ret.append({
|
||||
'Model': model,
|
||||
'Vendor': vendor,
|
||||
'SN': pd_attr.get('Serial Number', '').strip(),
|
||||
'Size': pd_attr.get('Size', '').strip(),
|
||||
'Type': 'SSD' if pd_attr.get('Interface Type') == 'Solid State SATA'
|
||||
else 'HDD',
|
||||
'_src': self.__class__.__name__,
|
||||
})
|
||||
return ret
|
||||
return list(self.pdrives.values())
|
||||
|
||||
|
||||
class HPRaid(Raid):
|
||||
def __init__(self):
|
||||
self.output = subprocess.getoutput('ssacli ctrl all show detail')
|
||||
self.output = subprocess.getoutput("ssacli ctrl all show detail")
|
||||
self.controllers = []
|
||||
self.convert_to_dict()
|
||||
|
||||
def convert_to_dict(self):
|
||||
lines = self.output.split('\n')
|
||||
lines = self.output.split("\n")
|
||||
lines = list(filter(None, lines))
|
||||
j = -1
|
||||
while j < len(lines):
|
||||
info_dict, j = _get_dict(lines, j + 1, 0)
|
||||
if len(info_dict.keys()):
|
||||
_product_name = list(info_dict.keys())[0]
|
||||
product_name = REGEXP_CONTROLLER_HP.search(_product_name)
|
||||
if product_name:
|
||||
self.controllers.append(
|
||||
HPRaidController(product_name.group(1), info_dict[_product_name])
|
||||
)
|
||||
controllers = _parse_ctrl_output(lines)
|
||||
for controller, attrs in controllers.items():
|
||||
self.controllers.append(HPRaidController(controller, attrs))
|
||||
|
||||
def get_controllers(self):
|
||||
return self.controllers
|
||||
|
|
|
@ -1,25 +1,43 @@
|
|||
import logging
|
||||
import re
|
||||
import subprocess
|
||||
import xml.etree.ElementTree as ET # NOQA
|
||||
|
||||
from netbox_agent.misc import get_vendor
|
||||
from netbox_agent.misc import get_mount_points, get_vendor
|
||||
from netbox_agent.raid.base import Raid, RaidController
|
||||
|
||||
# Inspiration from https://github.com/asciiphil/perc-status/blob/master/perc-status
|
||||
|
||||
class OmreportControllerError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def get_field(obj, fieldname):
|
||||
f = obj.find(fieldname)
|
||||
if f is None:
|
||||
return None
|
||||
if f.attrib['type'] in ['u32', 'u64']:
|
||||
if re.search('Mask$', fieldname):
|
||||
return int(f.text, 2)
|
||||
else:
|
||||
return int(f.text)
|
||||
if f.attrib['type'] == 'astring':
|
||||
return f.text
|
||||
return f.text
|
||||
def omreport(sub_command):
|
||||
command = ["omreport"]
|
||||
command.extend(sub_command.split())
|
||||
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
|
||||
p.wait()
|
||||
stdout = p.stdout.read().decode("utf-8")
|
||||
if p.returncode != 0:
|
||||
mesg = "Failed to execute command '{}':\n{}".format(" ".join(command), stdout)
|
||||
raise OmreportControllerError(mesg)
|
||||
|
||||
res = {}
|
||||
section_re = re.compile("^[A-Z]")
|
||||
current_section = None
|
||||
current_obj = None
|
||||
|
||||
for line in stdout.split("\n"):
|
||||
if ": " in line:
|
||||
attr, value = line.split(": ", 1)
|
||||
attr = attr.strip()
|
||||
value = value.strip()
|
||||
if attr == "ID":
|
||||
obj = {}
|
||||
res.setdefault(current_section, []).append(obj)
|
||||
current_obj = obj
|
||||
current_obj[attr] = value
|
||||
elif section_re.search(line) is not None:
|
||||
current_section = line.strip()
|
||||
return res
|
||||
|
||||
|
||||
class OmreportController(RaidController):
|
||||
|
@ -28,50 +46,82 @@ class OmreportController(RaidController):
|
|||
self.controller_index = controller_index
|
||||
|
||||
def get_product_name(self):
|
||||
return get_field(self.data, 'Name')
|
||||
return self.data["Name"]
|
||||
|
||||
def get_manufacturer(self):
|
||||
return None
|
||||
|
||||
def get_serial_number(self):
|
||||
return get_field(self.data, 'DeviceSerialNumber')
|
||||
return self.data.get("DeviceSerialNumber")
|
||||
|
||||
def get_firmware_version(self):
|
||||
return get_field(self.data, 'Firmware Version')
|
||||
return self.data.get("Firmware Version")
|
||||
|
||||
def _get_physical_disks(self):
|
||||
pds = {}
|
||||
res = omreport("storage pdisk controller={}".format(self.controller_index))
|
||||
for pdisk in [d for d in list(res.values())[0]]:
|
||||
disk_id = pdisk["ID"]
|
||||
size = re.sub("B .*$", "B", pdisk["Capacity"])
|
||||
pds[disk_id] = {
|
||||
"Vendor": get_vendor(pdisk["Vendor ID"]),
|
||||
"Model": pdisk["Product ID"],
|
||||
"SN": pdisk["Serial No."],
|
||||
"Size": size,
|
||||
"Type": pdisk["Media"],
|
||||
"_src": self.__class__.__name__,
|
||||
}
|
||||
return pds
|
||||
|
||||
def _get_virtual_drives_map(self):
|
||||
pds = {}
|
||||
res = omreport("storage vdisk controller={}".format(self.controller_index))
|
||||
for vdisk in [d for d in list(res.values())[0]]:
|
||||
vdisk_id = vdisk["ID"]
|
||||
device = vdisk["Device Name"]
|
||||
mount_points = get_mount_points()
|
||||
mp = mount_points.get(device, "n/a")
|
||||
size = re.sub("B .*$", "B", vdisk["Size"])
|
||||
vd = {
|
||||
"vd_array": vdisk_id,
|
||||
"vd_size": size,
|
||||
"vd_consistency": vdisk["State"],
|
||||
"vd_raid_type": vdisk["Layout"],
|
||||
"vd_device": vdisk["Device Name"],
|
||||
"mount_point": ", ".join(sorted(mp)),
|
||||
}
|
||||
drives_res = omreport(
|
||||
"storage pdisk controller={} vdisk={}".format(
|
||||
self.controller_index, vdisk_id
|
||||
)
|
||||
)
|
||||
for pdisk in [d for d in list(drives_res.values())[0]]:
|
||||
pds[pdisk["ID"]] = vd
|
||||
return pds
|
||||
|
||||
def get_physical_disks(self):
|
||||
ret = []
|
||||
output = subprocess.getoutput(
|
||||
'omreport storage controller controller={} -fmt xml'.format(self.controller_index)
|
||||
)
|
||||
root = ET.fromstring(output)
|
||||
et_array_disks = root.find('ArrayDisks')
|
||||
if et_array_disks is not None:
|
||||
for obj in et_array_disks.findall('DCStorageObject'):
|
||||
ret.append({
|
||||
'Vendor': get_vendor(get_field(obj, 'Vendor')),
|
||||
'Model': get_field(obj, 'ProductID'),
|
||||
'SN': get_field(obj, 'DeviceSerialNumber'),
|
||||
'Size': '{:.0f}GB'.format(
|
||||
int(get_field(obj, 'Length')) / 1024 / 1024 / 1024
|
||||
),
|
||||
'Type': 'HDD' if int(get_field(obj, 'MediaType')) == 1 else 'SSD',
|
||||
'_src': self.__class__.__name__,
|
||||
})
|
||||
return ret
|
||||
pds = self._get_physical_disks()
|
||||
vds = self._get_virtual_drives_map()
|
||||
for pd_identifier, vd in vds.items():
|
||||
if pd_identifier not in pds:
|
||||
logging.error(
|
||||
"Physical drive {} listed in virtual drive {} not "
|
||||
"found in drives list".format(pd_identifier, vd["vd_array"])
|
||||
)
|
||||
continue
|
||||
pds[pd_identifier].setdefault("custom_fields", {}).update(vd)
|
||||
pds[pd_identifier]["custom_fields"]["pd_identifier"] = pd_identifier
|
||||
return list(pds.values())
|
||||
|
||||
|
||||
class OmreportRaid(Raid):
|
||||
def __init__(self):
|
||||
output = subprocess.getoutput('omreport storage controller -fmt xml')
|
||||
controller_xml = ET.fromstring(output)
|
||||
self.controllers = []
|
||||
res = omreport("storage controller")
|
||||
|
||||
for obj in controller_xml.find('Controllers').findall('DCStorageObject'):
|
||||
ctrl_index = get_field(obj, 'ControllerNum')
|
||||
self.controllers.append(
|
||||
OmreportController(ctrl_index, obj)
|
||||
)
|
||||
for controller in res["Controller"]:
|
||||
ctrl_index = controller["ID"]
|
||||
self.controllers.append(OmreportController(ctrl_index, controller))
|
||||
|
||||
def get_controllers(self):
|
||||
return self.controllers
|
||||
|
|
|
@ -1,73 +1,153 @@
|
|||
import json
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import subprocess
|
||||
|
||||
from netbox_agent.misc import get_vendor
|
||||
from netbox_agent.config import config
|
||||
from netbox_agent.misc import get_mount_points, get_vendor
|
||||
from netbox_agent.raid.base import Raid, RaidController
|
||||
|
||||
|
||||
class StorcliControllerError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def storecli(sub_command):
|
||||
command = ["storcli"]
|
||||
command.extend(sub_command.split())
|
||||
command.append("J")
|
||||
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
|
||||
|
||||
stdout, stderr = p.communicate()
|
||||
if stderr:
|
||||
mesg = "Failed to execute command '{}':\n{}".format(" ".join(command), stdout)
|
||||
raise StorcliControllerError(mesg)
|
||||
|
||||
stdout = stdout.decode("utf-8")
|
||||
data = json.loads(stdout)
|
||||
|
||||
controllers = dict(
|
||||
[
|
||||
(c["Command Status"]["Controller"], c["Response Data"])
|
||||
for c in data["Controllers"]
|
||||
if c["Command Status"]["Status"] == "Success"
|
||||
]
|
||||
)
|
||||
if not controllers:
|
||||
logging.error(
|
||||
"Failed to execute command '{}'. "
|
||||
"Ignoring data.".format(" ".join(command))
|
||||
)
|
||||
return {}
|
||||
return controllers
|
||||
|
||||
|
||||
class StorcliController(RaidController):
|
||||
def __init__(self, controller_index, data):
|
||||
self.data = data
|
||||
self.controller_index = controller_index
|
||||
|
||||
def get_product_name(self):
|
||||
return self.data['Product Name']
|
||||
return self.data["Product Name"]
|
||||
|
||||
def get_manufacturer(self):
|
||||
return None
|
||||
|
||||
def get_serial_number(self):
|
||||
return self.data['Serial Number']
|
||||
return self.data["Serial Number"]
|
||||
|
||||
def get_firmware_version(self):
|
||||
return self.data['FW Package Build']
|
||||
return self.data["FW Package Build"]
|
||||
|
||||
def _get_physical_disks(self):
|
||||
pds = {}
|
||||
cmd = "/c{}/eall/sall show all".format(self.controller_index)
|
||||
controllers = storecli(cmd)
|
||||
pd_info = controllers[self.controller_index]
|
||||
pd_re = re.compile(r"^Drive (/c\d+/e\d+/s\d+)$")
|
||||
|
||||
for section, attrs in pd_info.items():
|
||||
reg = pd_re.search(section)
|
||||
if reg is None:
|
||||
continue
|
||||
pd_name = reg.group(1)
|
||||
pd_attr = attrs[0]
|
||||
pd_identifier = pd_attr["EID:Slt"]
|
||||
size = pd_attr.get("Size", "").strip()
|
||||
media_type = pd_attr.get("Med", "").strip()
|
||||
pd_details = pd_info["{} - Detailed Information".format(section)]
|
||||
pd_dev_attr = pd_details["{} Device attributes".format(section)]
|
||||
model = pd_dev_attr.get("Model Number", "").strip()
|
||||
pd = {
|
||||
"Model": model,
|
||||
"Vendor": get_vendor(model),
|
||||
"SN": pd_dev_attr.get("SN", "").strip(),
|
||||
"Size": size,
|
||||
"Type": media_type,
|
||||
"_src": self.__class__.__name__,
|
||||
}
|
||||
if config.process_virtual_drives:
|
||||
pd.setdefault("custom_fields", {})["pd_identifier"] = pd_name
|
||||
pds[pd_identifier] = pd
|
||||
return pds
|
||||
|
||||
def _get_virtual_drives_map(self):
|
||||
vds = {}
|
||||
cmd = "/c{}/vall show all".format(self.controller_index)
|
||||
controllers = storecli(cmd)
|
||||
vd_info = controllers[self.controller_index]
|
||||
mount_points = get_mount_points()
|
||||
|
||||
for vd_identifier, vd_attrs in vd_info.items():
|
||||
if not vd_identifier.startswith("/c{}/v".format(self.controller_index)):
|
||||
continue
|
||||
volume = vd_identifier.split("/")[-1].lstrip("v")
|
||||
vd_attr = vd_attrs[0]
|
||||
vd_pd_identifier = "PDs for VD {}".format(volume)
|
||||
vd_pds = vd_info[vd_pd_identifier]
|
||||
vd_prop_identifier = "VD{} Properties".format(volume)
|
||||
vd_properties = vd_info[vd_prop_identifier]
|
||||
for pd in vd_pds:
|
||||
pd_identifier = pd["EID:Slt"]
|
||||
wwn = vd_properties["SCSI NAA Id"]
|
||||
wwn_path = "/dev/disk/by-id/wwn-0x{}".format(wwn)
|
||||
device = os.path.realpath(wwn_path)
|
||||
mp = mount_points.get(device, "n/a")
|
||||
vds[pd_identifier] = {
|
||||
"vd_array": vd_identifier,
|
||||
"vd_size": vd_attr["Size"],
|
||||
"vd_consistency": vd_attr["Consist"],
|
||||
"vd_raid_type": vd_attr["TYPE"],
|
||||
"vd_device": device,
|
||||
"mount_point": ", ".join(sorted(mp)),
|
||||
}
|
||||
return vds
|
||||
|
||||
def get_physical_disks(self):
|
||||
ret = []
|
||||
output = subprocess.getoutput(
|
||||
'storcli /c{}/eall/sall show all J'.format(self.controller_index)
|
||||
)
|
||||
drive_infos = json.loads(output)['Controllers'][self.controller_index]['Response Data']
|
||||
# Parses physical disks information
|
||||
pds = self._get_physical_disks()
|
||||
|
||||
for physical_drive in self.data['PD LIST']:
|
||||
enclosure = physical_drive.get('EID:Slt').split(':')[0]
|
||||
slot = physical_drive.get('EID:Slt').split(':')[1]
|
||||
size = physical_drive.get('Size').strip()
|
||||
media_type = physical_drive.get('Med').strip()
|
||||
drive_identifier = 'Drive /c{}/e{}/s{}'.format(
|
||||
str(self.controller_index), str(enclosure), str(slot)
|
||||
)
|
||||
drive_attr = drive_infos['{} - Detailed Information'.format(drive_identifier)][
|
||||
'{} Device attributes'.format(drive_identifier)]
|
||||
model = drive_attr.get('Model Number', '').strip()
|
||||
ret.append({
|
||||
'Model': model,
|
||||
'Vendor': get_vendor(model),
|
||||
'SN': drive_attr.get('SN', '').strip(),
|
||||
'Size': size,
|
||||
'Type': media_type,
|
||||
'_src': self.__class__.__name__,
|
||||
})
|
||||
return ret
|
||||
# Parses virtual drives information and maps them to physical disks
|
||||
vds = self._get_virtual_drives_map()
|
||||
for pd_identifier, vd in vds.items():
|
||||
if pd_identifier not in pds:
|
||||
logging.error(
|
||||
"Physical drive {} listed in virtual drive {} not "
|
||||
"found in drives list".format(pd_identifier, vd["vd_array"])
|
||||
)
|
||||
continue
|
||||
pds[pd_identifier].setdefault("custom_fields", {}).update(vd)
|
||||
|
||||
return list(pds.values())
|
||||
|
||||
|
||||
class StorcliRaid(Raid):
|
||||
def __init__(self):
|
||||
self.output = subprocess.getoutput('storcli /call show J')
|
||||
self.data = json.loads(self.output)
|
||||
self.controllers = []
|
||||
|
||||
if len([
|
||||
x for x in self.data['Controllers']
|
||||
if x['Command Status']['Status'] == 'Success'
|
||||
]) > 0:
|
||||
for controller in self.data['Controllers']:
|
||||
self.controllers.append(
|
||||
StorcliController(
|
||||
controller['Command Status']['Controller'],
|
||||
controller['Response Data']
|
||||
)
|
||||
)
|
||||
controllers = storecli("/call show")
|
||||
for controller_id, controller_data in controllers.items():
|
||||
self.controllers.append(StorcliController(controller_id, controller_data))
|
||||
|
||||
def get_controllers(self):
|
||||
return self.controllers
|
||||
|
|
|
@ -9,28 +9,49 @@ from netbox_agent.config import config
|
|||
from netbox_agent.config import netbox_instance as nb
|
||||
from netbox_agent.inventory import Inventory
|
||||
from netbox_agent.location import Datacenter, Rack, Tenant
|
||||
from netbox_agent.misc import create_netbox_tags, get_device_role, get_device_type
|
||||
from netbox_agent.misc import (
|
||||
create_netbox_tags,
|
||||
get_device_platform,
|
||||
get_device_role,
|
||||
get_device_type,
|
||||
)
|
||||
from netbox_agent.network import ServerNetwork
|
||||
from netbox_agent.power import PowerSupply
|
||||
|
||||
|
||||
class ServerBase():
|
||||
class ServerBase:
|
||||
def __init__(self, dmi=None):
|
||||
if dmi:
|
||||
self.dmi = dmi
|
||||
else:
|
||||
self.dmi = dmidecode.parse()
|
||||
|
||||
self.baseboard = dmidecode.get_by_type(self.dmi, 'Baseboard')
|
||||
self.bios = dmidecode.get_by_type(self.dmi, 'BIOS')
|
||||
self.chassis = dmidecode.get_by_type(self.dmi, 'Chassis')
|
||||
self.system = dmidecode.get_by_type(self.dmi, 'System')
|
||||
self.baseboard = dmidecode.get_by_type(self.dmi, "Baseboard")
|
||||
self.bios = dmidecode.get_by_type(self.dmi, "BIOS")
|
||||
self.chassis = dmidecode.get_by_type(self.dmi, "Chassis")
|
||||
self.system = dmidecode.get_by_type(self.dmi, "System")
|
||||
self.device_platform = get_device_platform(config.device.platform)
|
||||
|
||||
self.network = None
|
||||
|
||||
self.tags = list(set(config.device.tags.split(','))) if config.device.tags else []
|
||||
if self.tags and len(self.tags):
|
||||
create_netbox_tags(self.tags)
|
||||
self.tags = (
|
||||
list(set([x.strip() for x in config.device.tags.split(",") if x.strip()]))
|
||||
if config.device.tags
|
||||
else []
|
||||
)
|
||||
self.nb_tags = list(create_netbox_tags(self.tags))
|
||||
config_cf = set(
|
||||
[f.strip() for f in config.device.custom_fields.split(",") if f.strip()]
|
||||
)
|
||||
self.custom_fields = {}
|
||||
self.custom_fields.update(
|
||||
dict(
|
||||
[
|
||||
(k.strip(), v.strip())
|
||||
for k, v in [f.split("=", 1) for f in config_cf]
|
||||
]
|
||||
)
|
||||
)
|
||||
|
||||
def get_tenant(self):
|
||||
tenant = Tenant()
|
||||
|
@ -40,9 +61,7 @@ class ServerBase():
|
|||
tenant = self.get_tenant()
|
||||
if tenant is None:
|
||||
return None
|
||||
nb_tenant = nb.tenancy.tenants.get(
|
||||
slug=self.get_tenant()
|
||||
)
|
||||
nb_tenant = nb.tenancy.tenants.get(slug=self.get_tenant())
|
||||
return nb_tenant
|
||||
|
||||
def get_datacenter(self):
|
||||
|
@ -52,7 +71,7 @@ class ServerBase():
|
|||
def get_netbox_datacenter(self):
|
||||
dc = self.get_datacenter()
|
||||
if dc is None:
|
||||
logging.error("Specificing a datacenter (Site) is mandatory in Netbox")
|
||||
logging.error("Specifying a datacenter (Site) is mandatory in Netbox")
|
||||
sys.exit(1)
|
||||
|
||||
nb_dc = nb.dcim.sites.get(
|
||||
|
@ -66,24 +85,27 @@ class ServerBase():
|
|||
|
||||
def update_netbox_location(self, server):
|
||||
dc = self.get_datacenter()
|
||||
rack = self.get_rack()
|
||||
nb_rack = self.get_netbox_rack()
|
||||
nb_dc = self.get_netbox_datacenter()
|
||||
|
||||
update = False
|
||||
if dc and server.site and server.site.slug != nb_dc.slug:
|
||||
logging.info('Datacenter location has changed from {} to {}, updating'.format(
|
||||
server.site.slug,
|
||||
nb_dc.slug,
|
||||
))
|
||||
logging.info(
|
||||
"Datacenter location has changed from {} to {}, updating".format(
|
||||
server.site.slug,
|
||||
nb_dc.slug,
|
||||
)
|
||||
)
|
||||
update = True
|
||||
server.site = nb_dc.id
|
||||
|
||||
if rack and server.rack and server.rack.id != nb_rack.id:
|
||||
logging.info('Rack location has changed from {} to {}, updating'.format(
|
||||
server.rack,
|
||||
nb_rack,
|
||||
))
|
||||
if server.rack and nb_rack and server.rack.id != nb_rack.id:
|
||||
logging.info(
|
||||
"Rack location has changed from {} to {}, updating".format(
|
||||
server.rack,
|
||||
nb_rack,
|
||||
)
|
||||
)
|
||||
update = True
|
||||
server.rack = nb_rack
|
||||
if nb_rack is None:
|
||||
|
@ -91,6 +113,19 @@ class ServerBase():
|
|||
server.position = None
|
||||
return update, server
|
||||
|
||||
def update_netbox_expansion_location(self, server, expansion):
|
||||
update = False
|
||||
if expansion.tenant != server.tenant:
|
||||
expansion.tenant = server.tenant
|
||||
update = True
|
||||
if expansion.site != server.site:
|
||||
expansion.site = server.site
|
||||
update = True
|
||||
if expansion.rack != server.rack:
|
||||
expansion.rack = server.rack
|
||||
update = True
|
||||
return update
|
||||
|
||||
def get_rack(self):
|
||||
rack = Rack()
|
||||
return rack.get()
|
||||
|
@ -113,17 +148,24 @@ class ServerBase():
|
|||
"""
|
||||
Return the Chassis Name from dmidecode info
|
||||
"""
|
||||
return self.system[0]['Product Name'].strip()
|
||||
return self.system[0]["Product Name"].strip()
|
||||
|
||||
def get_service_tag(self):
|
||||
"""
|
||||
Return the Service Tag from dmidecode info
|
||||
"""
|
||||
return self.system[0]['Serial Number'].strip()
|
||||
return self.system[0]["Serial Number"].strip()
|
||||
|
||||
def get_expansion_service_tag(self):
|
||||
"""
|
||||
Return the virtual Service Tag from dmidecode info host
|
||||
with 'expansion'
|
||||
"""
|
||||
return self.system[0]["Serial Number"].strip() + " expansion"
|
||||
|
||||
def get_hostname(self):
|
||||
if config.hostname_cmd is None:
|
||||
return '{}'.format(socket.gethostname())
|
||||
return "{}".format(socket.gethostname())
|
||||
return subprocess.getoutput(config.hostname_cmd)
|
||||
|
||||
def is_blade(self):
|
||||
|
@ -153,21 +195,24 @@ class ServerBase():
|
|||
def get_power_consumption(self):
|
||||
raise NotImplementedError
|
||||
|
||||
def get_expansion_product(self):
|
||||
raise NotImplementedError
|
||||
|
||||
def _netbox_create_chassis(self, datacenter, tenant, rack):
|
||||
device_type = get_device_type(self.get_chassis())
|
||||
device_role = get_device_role(config.device.chassis_role)
|
||||
serial = self.get_chassis_service_tag()
|
||||
logging.info('Creating chassis blade (serial: {serial})'.format(
|
||||
serial=serial))
|
||||
logging.info("Creating chassis blade (serial: {serial})".format(serial=serial))
|
||||
new_chassis = nb.dcim.devices.create(
|
||||
name=self.get_chassis_name(),
|
||||
device_type=device_type.id,
|
||||
serial=serial,
|
||||
device_role=device_role.id,
|
||||
role=device_role.id,
|
||||
site=datacenter.id if datacenter else None,
|
||||
tenant=tenant.id if tenant else None,
|
||||
rack=rack.id if rack else None,
|
||||
tags=self.tags,
|
||||
tags=[{"name": x} for x in self.tags],
|
||||
custom_fields=self.custom_fields,
|
||||
)
|
||||
return new_chassis
|
||||
|
||||
|
@ -177,22 +222,54 @@ class ServerBase():
|
|||
serial = self.get_service_tag()
|
||||
hostname = self.get_hostname()
|
||||
logging.info(
|
||||
'Creating blade (serial: {serial}) {hostname} on chassis {chassis_serial}'.format(
|
||||
"Creating blade (serial: {serial}) {hostname} on chassis {chassis_serial}".format(
|
||||
serial=serial, hostname=hostname, chassis_serial=chassis.serial
|
||||
))
|
||||
)
|
||||
)
|
||||
new_blade = nb.dcim.devices.create(
|
||||
name=hostname,
|
||||
serial=serial,
|
||||
device_role=device_role.id,
|
||||
role=device_role.id,
|
||||
device_type=device_type.id,
|
||||
parent_device=chassis.id,
|
||||
site=datacenter.id if datacenter else None,
|
||||
tenant=tenant.id if tenant else None,
|
||||
rack=rack.id if rack else None,
|
||||
tags=self.tags,
|
||||
tags=[{"name": x} for x in self.tags],
|
||||
custom_fields=self.custom_fields,
|
||||
)
|
||||
return new_blade
|
||||
|
||||
def _netbox_create_blade_expansion(self, chassis, datacenter, tenant, rack):
|
||||
device_role = get_device_role(config.device.blade_role)
|
||||
device_type = get_device_type(self.get_expansion_product())
|
||||
serial = self.get_expansion_service_tag()
|
||||
hostname = self.get_hostname() + " expansion"
|
||||
logging.info(
|
||||
"Creating expansion (serial: {serial}) {hostname} on chassis {chassis_serial}".format(
|
||||
serial=serial, hostname=hostname, chassis_serial=chassis.serial
|
||||
)
|
||||
)
|
||||
new_blade = nb.dcim.devices.create(
|
||||
name=hostname,
|
||||
serial=serial,
|
||||
role=device_role.id,
|
||||
device_type=device_type.id,
|
||||
parent_device=chassis.id,
|
||||
site=datacenter.id if datacenter else None,
|
||||
tenant=tenant.id if tenant else None,
|
||||
rack=rack.id if rack else None,
|
||||
tags=[{"name": x} for x in self.tags],
|
||||
)
|
||||
return new_blade
|
||||
|
||||
def _netbox_deduplicate_server(self):
|
||||
serial = self.get_service_tag()
|
||||
hostname = self.get_hostname()
|
||||
server = nb.dcim.devices.get(name=hostname)
|
||||
if server and server.serial != serial:
|
||||
server.delete()
|
||||
|
||||
def _netbox_create_server(self, datacenter, tenant, rack):
|
||||
device_role = get_device_role(config.device.server_role)
|
||||
device_type = get_device_type(self.get_product_name())
|
||||
|
@ -200,55 +277,117 @@ class ServerBase():
|
|||
raise Exception('Chassis "{}" doesn\'t exist'.format(self.get_chassis()))
|
||||
serial = self.get_service_tag()
|
||||
hostname = self.get_hostname()
|
||||
logging.info('Creating server (serial: {serial}) {hostname}'.format(
|
||||
serial=serial, hostname=hostname))
|
||||
logging.info(
|
||||
"Creating server (serial: {serial}) {hostname}".format(
|
||||
serial=serial, hostname=hostname
|
||||
)
|
||||
)
|
||||
new_server = nb.dcim.devices.create(
|
||||
name=hostname,
|
||||
serial=serial,
|
||||
device_role=device_role.id,
|
||||
role=device_role.id,
|
||||
device_type=device_type.id,
|
||||
platform=self.device_platform.id,
|
||||
site=datacenter.id if datacenter else None,
|
||||
tenant=tenant.id if tenant else None,
|
||||
rack=rack.id if rack else None,
|
||||
tags=self.tags,
|
||||
tags=[{"name": x} for x in self.tags],
|
||||
)
|
||||
return new_server
|
||||
|
||||
def get_netbox_server(self):
|
||||
return nb.dcim.devices.get(serial=self.get_service_tag())
|
||||
def get_netbox_server(self, expansion=False):
|
||||
if expansion is False:
|
||||
return nb.dcim.devices.get(serial=self.get_service_tag())
|
||||
else:
|
||||
return nb.dcim.devices.get(serial=self.get_expansion_service_tag())
|
||||
|
||||
def _netbox_set_or_update_blade_slot(self, server, chassis, datacenter):
|
||||
# before everything check if right chassis
|
||||
actual_device_bay = server.parent_device.device_bay if server.parent_device else None
|
||||
actual_device_bay = (
|
||||
server.parent_device.device_bay if server.parent_device else None
|
||||
)
|
||||
actual_chassis = actual_device_bay.device if actual_device_bay else None
|
||||
slot = self.get_blade_slot()
|
||||
if actual_chassis and \
|
||||
actual_chassis.serial == chassis.serial and \
|
||||
actual_device_bay.name == slot:
|
||||
if (
|
||||
actual_chassis
|
||||
and actual_chassis.serial == chassis.serial
|
||||
and actual_device_bay.name == slot
|
||||
):
|
||||
return
|
||||
|
||||
real_device_bays = nb.dcim.device_bays.filter(
|
||||
device_id=chassis.id,
|
||||
name=slot,
|
||||
)
|
||||
if len(real_device_bays) > 0:
|
||||
real_device_bays = nb.dcim.device_bays.filter(
|
||||
device_id=chassis.id,
|
||||
name=slot,
|
||||
)
|
||||
if real_device_bays:
|
||||
logging.info(
|
||||
'Setting device ({serial}) new slot on {slot} '
|
||||
'(Chassis {chassis_serial})..'.format(
|
||||
"Setting device ({serial}) new slot on {slot} "
|
||||
"(Chassis {chassis_serial})..".format(
|
||||
serial=server.serial, slot=slot, chassis_serial=chassis.serial
|
||||
))
|
||||
)
|
||||
)
|
||||
# reset actual device bay if set
|
||||
if actual_device_bay:
|
||||
# Forces the evaluation of the installed_device attribute to
|
||||
# workaround a bug probably due to lazy loading optimization
|
||||
# that prevents the value change detection
|
||||
actual_device_bay.installed_device
|
||||
actual_device_bay.installed_device = None
|
||||
actual_device_bay.save()
|
||||
# setup new device bay
|
||||
real_device_bay = real_device_bays[0]
|
||||
real_device_bay = next(real_device_bays)
|
||||
real_device_bay.installed_device = server
|
||||
real_device_bay.save()
|
||||
else:
|
||||
logging.error('Could not find slot {slot} for chassis'.format(
|
||||
slot=slot
|
||||
))
|
||||
logging.error("Could not find slot {slot} for chassis".format(slot=slot))
|
||||
|
||||
def _netbox_set_or_update_blade_expansion_slot(
|
||||
self, expansion, chassis, datacenter
|
||||
):
|
||||
# before everything check if right chassis
|
||||
actual_device_bay = (
|
||||
expansion.parent_device.device_bay if expansion.parent_device else None
|
||||
)
|
||||
actual_chassis = actual_device_bay.device if actual_device_bay else None
|
||||
slot = self.get_blade_expansion_slot()
|
||||
if (
|
||||
actual_chassis
|
||||
and actual_chassis.serial == chassis.serial
|
||||
and actual_device_bay.name == slot
|
||||
):
|
||||
return
|
||||
|
||||
real_device_bays = nb.dcim.device_bays.filter(
|
||||
device_id=chassis.id,
|
||||
name=slot,
|
||||
)
|
||||
if not real_device_bays:
|
||||
logging.error(
|
||||
"Could not find slot {slot} expansion for chassis".format(slot=slot)
|
||||
)
|
||||
return
|
||||
logging.info(
|
||||
"Setting device expansion ({serial}) new slot on {slot} "
|
||||
"(Chassis {chassis_serial})..".format(
|
||||
serial=expansion.serial, slot=slot, chassis_serial=chassis.serial
|
||||
)
|
||||
)
|
||||
# reset actual device bay if set
|
||||
if actual_device_bay:
|
||||
# Forces the evaluation of the installed_device attribute to
|
||||
# workaround a bug probably due to lazy loading optimization
|
||||
# that prevents the value change detection
|
||||
actual_device_bay.installed_device
|
||||
actual_device_bay.installed_device = None
|
||||
actual_device_bay.save()
|
||||
# setup new device bay
|
||||
real_device_bay = next(real_device_bays)
|
||||
real_device_bay.installed_device = expansion
|
||||
real_device_bay.save()
|
||||
|
||||
def netbox_create_or_update(self, config):
|
||||
"""
|
||||
|
@ -266,10 +405,11 @@ class ServerBase():
|
|||
rack = self.get_netbox_rack()
|
||||
tenant = self.get_netbox_tenant()
|
||||
|
||||
if config.purge_old_devices:
|
||||
self._netbox_deduplicate_server()
|
||||
|
||||
if self.is_blade():
|
||||
chassis = nb.dcim.devices.get(
|
||||
serial=self.get_chassis_service_tag()
|
||||
)
|
||||
chassis = nb.dcim.devices.get(serial=self.get_chassis_service_tag())
|
||||
# Chassis does not exist
|
||||
if not chassis:
|
||||
chassis = self._netbox_create_chassis(datacenter, tenant, rack)
|
||||
|
@ -285,14 +425,17 @@ class ServerBase():
|
|||
if not server:
|
||||
server = self._netbox_create_server(datacenter, tenant, rack)
|
||||
|
||||
logging.debug('Updating Server...')
|
||||
logging.debug("Updating Server...")
|
||||
# check network cards
|
||||
if config.register or config.update_all or config.update_network:
|
||||
self.network = ServerNetwork(server=self)
|
||||
self.network.create_or_update_netbox_network_cards()
|
||||
update_inventory = config.inventory and (
|
||||
config.register or config.update_all or config.update_inventory
|
||||
)
|
||||
# update inventory if feature is enabled
|
||||
if config.inventory and (config.register or config.update_all or config.update_inventory):
|
||||
self.inventory = Inventory(server=self)
|
||||
self.inventory = Inventory(server=self)
|
||||
if update_inventory:
|
||||
self.inventory.create_or_update()
|
||||
# update psu
|
||||
if config.register or config.update_all or config.update_psu:
|
||||
|
@ -300,36 +443,104 @@ class ServerBase():
|
|||
self.power.create_or_update_power_supply()
|
||||
self.power.report_power_consumption()
|
||||
|
||||
expansion = nb.dcim.devices.get(serial=self.get_expansion_service_tag())
|
||||
if self.own_expansion_slot() and config.expansion_as_device:
|
||||
logging.debug("Update Server expansion...")
|
||||
if not expansion:
|
||||
expansion = self._netbox_create_blade_expansion(
|
||||
chassis, datacenter, tenant, rack
|
||||
)
|
||||
|
||||
# set slot for blade expansion
|
||||
self._netbox_set_or_update_blade_expansion_slot(
|
||||
expansion, chassis, datacenter
|
||||
)
|
||||
if update_inventory:
|
||||
# Updates expansion inventory
|
||||
inventory = Inventory(server=self, update_expansion=True)
|
||||
inventory.create_or_update()
|
||||
elif self.own_expansion_slot() and expansion:
|
||||
expansion.delete()
|
||||
expansion = None
|
||||
|
||||
update = 0
|
||||
# for every other specs
|
||||
# check hostname
|
||||
if server.name != self.get_hostname():
|
||||
update += 1
|
||||
server.name = self.get_hostname()
|
||||
update += 1
|
||||
|
||||
if sorted(set(server.tags)) != sorted(set(self.tags)):
|
||||
server.tags = self.tags
|
||||
server_tags = sorted(set([x.name for x in server.tags]))
|
||||
tags = sorted(set(self.tags))
|
||||
if server_tags != tags:
|
||||
new_tags_ids = [x.id for x in self.nb_tags]
|
||||
if not config.preserve_tags:
|
||||
server.tags = new_tags_ids
|
||||
else:
|
||||
server_tags_ids = [x.id for x in server.tags]
|
||||
server.tags = sorted(set(new_tags_ids + server_tags_ids))
|
||||
update += 1
|
||||
|
||||
if server.custom_fields != self.custom_fields:
|
||||
server.custom_fields = self.custom_fields
|
||||
update += 1
|
||||
|
||||
if config.update_all or config.update_location:
|
||||
ret, server = self.update_netbox_location(server)
|
||||
update += ret
|
||||
|
||||
if server.platform != self.device_platform:
|
||||
server.platform = self.device_platform
|
||||
update += 1
|
||||
|
||||
if update:
|
||||
server.save()
|
||||
logging.debug('Finished updating Server!')
|
||||
|
||||
if expansion:
|
||||
update = 0
|
||||
expansion_name = server.name + " expansion"
|
||||
if expansion.name != expansion_name:
|
||||
expansion.name = expansion_name
|
||||
update += 1
|
||||
if self.update_netbox_expansion_location(server, expansion):
|
||||
update += 1
|
||||
if update:
|
||||
expansion.save()
|
||||
logging.debug("Finished updating Server!")
|
||||
|
||||
def print_debug(self):
|
||||
self.network = ServerNetwork(server=self)
|
||||
print('Datacenter:', self.get_datacenter())
|
||||
print('Netbox Datacenter:', self.get_netbox_datacenter())
|
||||
print('Rack:', self.get_rack())
|
||||
print('Netbox Rack:', self.get_netbox_rack())
|
||||
print('Is blade:', self.is_blade())
|
||||
print('Product Name:', self.get_product_name())
|
||||
print('Chassis:', self.get_chassis())
|
||||
print('Chassis service tag:', self.get_chassis_service_tag())
|
||||
print('Service tag:', self.get_service_tag())
|
||||
print('NIC:',)
|
||||
print("Datacenter:", self.get_datacenter())
|
||||
print("Netbox Datacenter:", self.get_netbox_datacenter())
|
||||
print("Rack:", self.get_rack())
|
||||
print("Netbox Rack:", self.get_netbox_rack())
|
||||
print("Is blade:", self.is_blade())
|
||||
print("Got expansion:", self.own_expansion_slot())
|
||||
print("Product Name:", self.get_product_name())
|
||||
print("Platform:", self.device_platform)
|
||||
print("Chassis:", self.get_chassis())
|
||||
print("Chassis service tag:", self.get_chassis_service_tag())
|
||||
print("Service tag:", self.get_service_tag())
|
||||
print(
|
||||
"NIC:",
|
||||
)
|
||||
pprint(self.network.get_network_cards())
|
||||
pass
|
||||
|
||||
def own_expansion_slot(self):
|
||||
"""
|
||||
Indicates if the device hosts an expansion card
|
||||
"""
|
||||
return False
|
||||
|
||||
def own_gpu_expansion_slot(self):
|
||||
"""
|
||||
Indicates if the device hosts a GPU expansion card
|
||||
"""
|
||||
return False
|
||||
|
||||
def own_drive_expansion_slot(self):
|
||||
"""
|
||||
Indicates if the device hosts a drive expansion bay
|
||||
"""
|
||||
return False
|
||||
|
|
47
netbox_agent/vendors/dell.py
vendored
47
netbox_agent/vendors/dell.py
vendored
|
@ -8,10 +8,10 @@ from netbox_agent.server import ServerBase
|
|||
class DellHost(ServerBase):
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(DellHost, self).__init__(*args, **kwargs)
|
||||
self.manufacturer = 'Dell'
|
||||
self.manufacturer = "Dell"
|
||||
|
||||
def is_blade(self):
|
||||
return self.get_product_name().startswith('PowerEdge M')
|
||||
return self.get_product_name().startswith("PowerEdge M")
|
||||
|
||||
def get_blade_slot(self):
|
||||
"""
|
||||
|
@ -20,50 +20,69 @@ class DellHost(ServerBase):
|
|||
` Location In Chassis: Slot 03`
|
||||
"""
|
||||
if self.is_blade():
|
||||
return self.baseboard[0].get('Location In Chassis').strip()
|
||||
return self.baseboard[0].get("Location In Chassis").strip()
|
||||
return None
|
||||
|
||||
def get_chassis_name(self):
|
||||
if not self.is_blade():
|
||||
return None
|
||||
return 'Chassis {}'.format(self.get_service_tag())
|
||||
return "Chassis {}".format(self.get_service_tag())
|
||||
|
||||
def get_chassis(self):
|
||||
if self.is_blade():
|
||||
return self.chassis[0]['Version'].strip()
|
||||
return self.chassis[0]["Version"].strip()
|
||||
return self.get_product_name()
|
||||
|
||||
def get_chassis_service_tag(self):
|
||||
if self.is_blade():
|
||||
return self.chassis[0]['Serial Number'].strip()
|
||||
return self.chassis[0]["Serial Number"].strip()
|
||||
return self.get_service_tag()
|
||||
|
||||
def get_power_consumption(self):
|
||||
'''
|
||||
"""
|
||||
Parse omreport output like this
|
||||
|
||||
Amperage
|
||||
PS1 Current 1 : 1.8 A
|
||||
PS2 Current 2 : 1.4 A
|
||||
'''
|
||||
"""
|
||||
value = []
|
||||
|
||||
if not is_tool('omreport'):
|
||||
logging.error('omreport does not seem to be installed, please debug')
|
||||
if not is_tool("omreport"):
|
||||
logging.error("omreport does not seem to be installed, please debug")
|
||||
return value
|
||||
|
||||
data = subprocess.getoutput('omreport chassis pwrmonitoring')
|
||||
data = subprocess.getoutput("omreport chassis pwrmonitoring")
|
||||
amperage = False
|
||||
for line in data.splitlines():
|
||||
if line.startswith('Amperage'):
|
||||
if line.startswith("Amperage"):
|
||||
amperage = True
|
||||
continue
|
||||
|
||||
if amperage:
|
||||
if line.startswith('PS'):
|
||||
amp_value = line.split(':')[1].split()[0]
|
||||
if line.startswith("PS"):
|
||||
amp_value = line.split(":")[1].split()[0]
|
||||
value.append(amp_value)
|
||||
else:
|
||||
break
|
||||
|
||||
return value
|
||||
|
||||
def get_expansion_product(self):
|
||||
"""
|
||||
Get the extension slot that is on a pair slot number
|
||||
next to the compute slot that is on an odd slot number
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def is_expansion_slot(self, server):
|
||||
"""
|
||||
Return True if its an extension slot
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def get_blade_expansion_slot(self):
|
||||
"""
|
||||
Expansion slot are always the compute bay number + 1
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
|
6
netbox_agent/vendors/generic.py
vendored
6
netbox_agent/vendors/generic.py
vendored
|
@ -5,10 +5,12 @@ from netbox_agent.server import ServerBase
|
|||
class GenericHost(ServerBase):
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(GenericHost, self).__init__(*args, **kwargs)
|
||||
self.manufacturer = dmidecode.get_by_type(self.dmi, 'Baseboard')[0].get('Manufacturer')
|
||||
self.manufacturer = dmidecode.get_by_type(self.dmi, "Baseboard")[0].get(
|
||||
"Manufacturer"
|
||||
)
|
||||
|
||||
def is_blade(self):
|
||||
return None
|
||||
return False
|
||||
|
||||
def get_blade_slot(self):
|
||||
return None
|
||||
|
|
73
netbox_agent/vendors/hp.py
vendored
73
netbox_agent/vendors/hp.py
vendored
|
@ -1,4 +1,5 @@
|
|||
import netbox_agent.dmidecode as dmidecode
|
||||
from netbox_agent.inventory import Inventory
|
||||
from netbox_agent.server import ServerBase
|
||||
|
||||
|
||||
|
@ -11,12 +12,11 @@ class HPHost(ServerBase):
|
|||
self.hp_rack_locator = self._find_rack_locator()
|
||||
|
||||
def is_blade(self):
|
||||
if self.product.startswith("ProLiant BL"):
|
||||
return True
|
||||
elif self.product.startswith("ProLiant m") and self.product.endswith("Server Cartridge"):
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
blade = self.product.startswith("ProLiant BL")
|
||||
blade |= self.product.startswith("ProLiant m") and self.product.endswith(
|
||||
"Server Cartridge"
|
||||
)
|
||||
return blade
|
||||
|
||||
def _find_rack_locator(self):
|
||||
"""
|
||||
|
@ -27,7 +27,7 @@ class HPHost(ServerBase):
|
|||
# FIXME: make a dmidecode function get_by_dminame() ?
|
||||
if self.is_blade():
|
||||
locator = dmidecode.get_by_type(self.dmi, 204)
|
||||
if self.product == "ProLiant BL460c Gen10":
|
||||
if self.product.startswith("ProLiant BL460c Gen10"):
|
||||
locator = locator[0]["Strings"]
|
||||
return {
|
||||
"Enclosure Model": locator[2].strip(),
|
||||
|
@ -37,7 +37,9 @@ class HPHost(ServerBase):
|
|||
}
|
||||
|
||||
# HP ProLiant m750, m710x, m510 Server Cartridge
|
||||
if self.product.startswith("ProLiant m") and self.product.endswith("Server Cartridge"):
|
||||
if self.product.startswith("ProLiant m") and self.product.endswith(
|
||||
"Server Cartridge"
|
||||
):
|
||||
locator = dmidecode.get_by_type(self.dmi, 2)
|
||||
chassis = dmidecode.get_by_type(self.dmi, 3)
|
||||
return {
|
||||
|
@ -68,3 +70,58 @@ class HPHost(ServerBase):
|
|||
if self.is_blade():
|
||||
return self.hp_rack_locator["Enclosure Serial"].strip()
|
||||
return self.get_service_tag()
|
||||
|
||||
def get_blade_expansion_slot(self):
|
||||
"""
|
||||
Expansion slot are always the compute bay number + 1
|
||||
"""
|
||||
if (
|
||||
self.is_blade()
|
||||
and self.own_gpu_expansion_slot()
|
||||
or self.own_disk_expansion_slot()
|
||||
or True
|
||||
):
|
||||
return "Bay {}".format(
|
||||
str(int(self.hp_rack_locator["Server Bay"].strip()) + 1)
|
||||
)
|
||||
return None
|
||||
|
||||
def get_expansion_product(self):
|
||||
"""
|
||||
Get the extension slot that is on a pair slot number
|
||||
next to the compute slot that is on an odd slot number
|
||||
I only know on model of slot GPU extension card that.
|
||||
"""
|
||||
if self.own_gpu_expansion_slot():
|
||||
return "ProLiant BL460c Graphics Expansion Blade"
|
||||
elif self.own_disk_expansion_slot():
|
||||
return "ProLiant BL460c Disk Expansion Blade"
|
||||
return None
|
||||
|
||||
def own_expansion_slot(self):
|
||||
"""
|
||||
Indicates if the device hosts an expension card
|
||||
"""
|
||||
return self.own_gpu_expansion_slot() or self.own_disk_expansion_slot()
|
||||
|
||||
def own_gpu_expansion_slot(self):
|
||||
"""
|
||||
Indicates if the device hosts a GPU expansion card based
|
||||
on the product name
|
||||
"""
|
||||
return self.get_product_name().endswith("Graphics Exp")
|
||||
|
||||
def own_disk_expansion_slot(self):
|
||||
"""
|
||||
Indicates if the device hosts a drive expansion card based
|
||||
on raid card attributes.
|
||||
"""
|
||||
# Uses already parsed inventory if available
|
||||
# parses it otherwise
|
||||
inventory = getattr(self, "inventory", None)
|
||||
if inventory is None:
|
||||
inventory = Inventory(self)
|
||||
for raid_card in inventory.get_raid_cards():
|
||||
if self.is_blade() and raid_card.is_external():
|
||||
return True
|
||||
return False
|
||||
|
|
14
netbox_agent/vendors/qct.py
vendored
14
netbox_agent/vendors/qct.py
vendored
|
@ -4,29 +4,29 @@ from netbox_agent.server import ServerBase
|
|||
class QCTHost(ServerBase):
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(QCTHost, self).__init__(*args, **kwargs)
|
||||
self.manufacturer = 'QCT'
|
||||
self.manufacturer = "QCT"
|
||||
|
||||
def is_blade(self):
|
||||
return 'Location In Chassis' in self.baseboard[0].keys()
|
||||
return "Location In Chassis" in self.baseboard[0].keys()
|
||||
|
||||
def get_blade_slot(self):
|
||||
if self.is_blade():
|
||||
return 'Slot {}'.format(
|
||||
self.baseboard[0].get('Location In Chassis').strip()
|
||||
return "Slot {}".format(
|
||||
self.baseboard[0].get("Location In Chassis").strip()
|
||||
)
|
||||
return None
|
||||
|
||||
def get_chassis_name(self):
|
||||
if not self.is_blade():
|
||||
return None
|
||||
return 'Chassis {}'.format(self.get_service_tag())
|
||||
return "Chassis {}".format(self.get_service_tag())
|
||||
|
||||
def get_chassis(self):
|
||||
if self.is_blade():
|
||||
return self.chassis[0]['Version'].strip()
|
||||
return self.chassis[0]["Version"].strip()
|
||||
return self.get_product_name()
|
||||
|
||||
def get_chassis_service_tag(self):
|
||||
if self.is_blade():
|
||||
return self.chassis[0]['Serial Number'].strip()
|
||||
return self.chassis[0]["Serial Number"].strip()
|
||||
return self.get_service_tag()
|
||||
|
|
54
netbox_agent/vendors/supermicro.py
vendored
54
netbox_agent/vendors/supermicro.py
vendored
|
@ -4,34 +4,36 @@ from netbox_agent.server import ServerBase
|
|||
|
||||
class SupermicroHost(ServerBase):
|
||||
"""
|
||||
Supermicro DMI can be messed up. They depend on the vendor
|
||||
to set the correct values. The endusers cannot
|
||||
change them without buying a license from Supermicro.
|
||||
Supermicro DMI can be messed up. They depend on the vendor
|
||||
to set the correct values. The endusers cannot
|
||||
change them without buying a license from Supermicro.
|
||||
|
||||
There are 3 serial numbers in the system
|
||||
There are 3 serial numbers in the system
|
||||
|
||||
1) System - this is used for the chassis information.
|
||||
2) Baseboard - this is used for the blade.
|
||||
3) Chassis - this is ignored.
|
||||
1) System - this is used for the chassis information.
|
||||
2) Baseboard - this is used for the blade.
|
||||
3) Chassis - this is ignored.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(SupermicroHost, self).__init__(*args, **kwargs)
|
||||
self.manufacturer = 'Supermicro'
|
||||
self.manufacturer = "Supermicro"
|
||||
|
||||
def is_blade(self):
|
||||
product_name = self.get_product_name()
|
||||
product_name = self.system[0]["Product Name"].strip()
|
||||
# Blades
|
||||
blade = product_name.startswith('SBI')
|
||||
blade |= product_name.startswith('SBA')
|
||||
blade = product_name.startswith("SBI")
|
||||
blade |= product_name.startswith("SBA")
|
||||
# Twin
|
||||
blade |= 'TR-' in product_name
|
||||
blade |= "TR-" in product_name
|
||||
# TwinPro
|
||||
blade |= "TP-" in product_name
|
||||
# BigTwin
|
||||
blade |= 'BT-' in product_name
|
||||
blade |= "BT-" in product_name
|
||||
# Microcloud
|
||||
blade |= product_name.startswith('SYS-5039')
|
||||
blade |= product_name.startswith('SYS-5038')
|
||||
blade |= product_name.startswith("SYS-5039")
|
||||
blade |= product_name.startswith("SYS-5038")
|
||||
return blade
|
||||
|
||||
def get_blade_slot(self):
|
||||
|
@ -44,22 +46,34 @@ class SupermicroHost(ServerBase):
|
|||
return None
|
||||
|
||||
def get_service_tag(self):
|
||||
return self.system[0]['Serial Number'].strip()
|
||||
if self.is_blade():
|
||||
return self.baseboard[0]["Serial Number"].strip()
|
||||
return self.system[0]["Serial Number"].strip()
|
||||
|
||||
def get_product_name(self):
|
||||
return self.system[0]['Product Name'].strip()
|
||||
if self.is_blade():
|
||||
return self.baseboard[0]["Product Name"].strip()
|
||||
return self.system[0]["Product Name"].strip()
|
||||
|
||||
def get_chassis(self):
|
||||
if self.is_blade():
|
||||
return self.chassis[0]['Product Name'].strip()
|
||||
return self.system[0]["Product Name"].strip()
|
||||
return self.get_product_name()
|
||||
|
||||
def get_chassis_service_tag(self):
|
||||
if self.is_blade():
|
||||
return self.chassis[0]['Serial Number'].strip()
|
||||
return self.system[0]["Serial Number"].strip()
|
||||
return self.get_service_tag()
|
||||
|
||||
def get_chassis_name(self):
|
||||
if not self.is_blade():
|
||||
return None
|
||||
return 'Chassis {}'.format(self.get_chassis_service_tag())
|
||||
return "Chassis {}".format(self.get_chassis_service_tag())
|
||||
|
||||
def get_expansion_product(self):
|
||||
"""
|
||||
Get the extension slot that is on a pair slot number
|
||||
next to the compute slot that is on an odd slot number
|
||||
I only know on model of slot GPU extension card that.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
|
|
@ -1,26 +1,33 @@
|
|||
import os
|
||||
from pprint import pprint
|
||||
|
||||
import netbox_agent.dmidecode as dmidecode
|
||||
from netbox_agent.config import config
|
||||
from netbox_agent.config import netbox_instance as nb
|
||||
from netbox_agent.location import Tenant
|
||||
from netbox_agent.logging import logging # NOQA
|
||||
from netbox_agent.misc import create_netbox_tags, get_hostname
|
||||
from netbox_agent.misc import create_netbox_tags, get_device_platform, get_hostname
|
||||
from netbox_agent.network import VirtualNetwork
|
||||
|
||||
|
||||
def is_vm(dmi):
|
||||
bios = dmidecode.get_by_type(dmi, 'BIOS')
|
||||
system = dmidecode.get_by_type(dmi, 'System')
|
||||
bios = dmidecode.get_by_type(dmi, "BIOS")[0]
|
||||
system = dmidecode.get_by_type(dmi, "System")[0]
|
||||
|
||||
if 'Hyper-V' in bios[0]['Version'] or \
|
||||
'Xen' in bios[0]['Version'] or \
|
||||
'Google Compute Engine' in system[0]['Product Name'] or \
|
||||
'RHEV Hypervisor' in system[0]['Product Name'] or \
|
||||
'VirtualBox' in bios[0]['Version'] or \
|
||||
'VMware' in system[0]['Manufacturer']:
|
||||
return True
|
||||
return False
|
||||
return (
|
||||
"Hyper-V" in bios["Version"]
|
||||
or "Xen" in bios["Version"]
|
||||
or "Google Compute Engine" in system["Product Name"]
|
||||
) or (
|
||||
(
|
||||
"Amazon EC2" in system["Manufacturer"]
|
||||
and not system["Product Name"].endswith(".metal")
|
||||
)
|
||||
or "RHEV Hypervisor" in system["Product Name"]
|
||||
or "QEMU" in system["Manufacturer"]
|
||||
or "VirtualBox" in bios["Version"]
|
||||
or "VMware" in system["Manufacturer"]
|
||||
)
|
||||
|
||||
|
||||
class VirtualMachine(object):
|
||||
|
@ -30,14 +37,18 @@ class VirtualMachine(object):
|
|||
else:
|
||||
self.dmi = dmidecode.parse()
|
||||
self.network = None
|
||||
self.device_platform = get_device_platform(config.device.platform)
|
||||
|
||||
self.tags = list(set(config.device.tags.split(','))) if config.device.tags else []
|
||||
if self.tags and len(self.tags):
|
||||
create_netbox_tags(self.tags)
|
||||
self.tags = (
|
||||
list(set(config.device.tags.split(","))) if config.device.tags else []
|
||||
)
|
||||
self.nb_tags = create_netbox_tags(self.tags)
|
||||
|
||||
def get_memory(self):
|
||||
mem_bytes = os.sysconf('SC_PAGE_SIZE') * os.sysconf('SC_PHYS_PAGES') # e.g. 4015976448
|
||||
mem_gib = mem_bytes / (1024.**2) # e.g. 3.74
|
||||
mem_bytes = os.sysconf("SC_PAGE_SIZE") * os.sysconf(
|
||||
"SC_PHYS_PAGES"
|
||||
) # e.g. 4015976448
|
||||
mem_gib = mem_bytes / (1024.0**2) # e.g. 3.74
|
||||
return int(mem_gib)
|
||||
|
||||
def get_vcpus(self):
|
||||
|
@ -45,9 +56,7 @@ class VirtualMachine(object):
|
|||
|
||||
def get_netbox_vm(self):
|
||||
hostname = get_hostname(config)
|
||||
vm = nb.virtualization.virtual_machines.get(
|
||||
name=hostname
|
||||
)
|
||||
vm = nb.virtualization.virtual_machines.get(name=hostname)
|
||||
return vm
|
||||
|
||||
def get_netbox_cluster(self, name):
|
||||
|
@ -70,13 +79,11 @@ class VirtualMachine(object):
|
|||
tenant = self.get_tenant()
|
||||
if tenant is None:
|
||||
return None
|
||||
nb_tenant = nb.tenancy.tenants.get(
|
||||
slug=self.get_tenant()
|
||||
)
|
||||
nb_tenant = nb.tenancy.tenants.get(slug=self.get_tenant())
|
||||
return nb_tenant
|
||||
|
||||
def netbox_create_or_update(self, config):
|
||||
logging.debug('It\'s a virtual machine')
|
||||
logging.debug("It's a virtual machine")
|
||||
created = False
|
||||
updated = 0
|
||||
|
||||
|
@ -87,16 +94,17 @@ class VirtualMachine(object):
|
|||
memory = self.get_memory()
|
||||
tenant = self.get_netbox_tenant()
|
||||
if not vm:
|
||||
logging.debug('Creating Virtual machine..')
|
||||
logging.debug("Creating Virtual machine..")
|
||||
cluster = self.get_netbox_cluster(config.virtual.cluster_name)
|
||||
|
||||
vm = nb.virtualization.virtual_machines.create(
|
||||
name=hostname,
|
||||
cluster=cluster.id,
|
||||
platform=self.device_platform.id,
|
||||
vcpus=vcpus,
|
||||
memory=memory,
|
||||
tenant=tenant.id if tenant else None,
|
||||
tags=self.tags,
|
||||
tags=[{"name": x} for x in self.tags],
|
||||
)
|
||||
created = True
|
||||
|
||||
|
@ -110,9 +118,34 @@ class VirtualMachine(object):
|
|||
if vm.memory != memory:
|
||||
vm.memory = memory
|
||||
updated += 1
|
||||
if sorted(set(vm.tags)) != sorted(set(self.tags)):
|
||||
vm.tags = self.tags
|
||||
|
||||
vm_tags = sorted(set([x.name for x in vm.tags]))
|
||||
tags = sorted(set(self.tags))
|
||||
if vm_tags != tags:
|
||||
new_tags_ids = [x.id for x in self.nb_tags]
|
||||
if not config.preserve_tags:
|
||||
vm.tags = new_tags_ids
|
||||
else:
|
||||
vm_tags_ids = [x.id for x in vm.tags]
|
||||
vm.tags = sorted(set(new_tags_ids + vm_tags_ids))
|
||||
updated += 1
|
||||
|
||||
if vm.platform != self.device_platform:
|
||||
vm.platform = self.device_platform
|
||||
updated += 1
|
||||
|
||||
if updated:
|
||||
vm.save()
|
||||
|
||||
def print_debug(self):
|
||||
self.network = VirtualNetwork(server=self)
|
||||
print("Cluster:", self.get_netbox_cluster(config.virtual.cluster_name))
|
||||
print("Platform:", self.device_platform)
|
||||
print("VM:", self.get_netbox_vm())
|
||||
print("vCPU:", self.get_vcpus())
|
||||
print("Memory:", f"{self.get_memory()} MB")
|
||||
print(
|
||||
"NIC:",
|
||||
)
|
||||
pprint(self.network.get_network_cards())
|
||||
pass
|
||||
|
|
46
nix/netifaces2.nix
Normal file
46
nix/netifaces2.nix
Normal file
|
@ -0,0 +1,46 @@
|
|||
{
|
||||
lib,
|
||||
buildPythonPackage,
|
||||
fetchFromGitHub,
|
||||
cargo,
|
||||
rustPlatform,
|
||||
rustc,
|
||||
typing-extensions,
|
||||
}:
|
||||
|
||||
buildPythonPackage rec {
|
||||
pname = "netifaces-2";
|
||||
version = "0.0.22";
|
||||
pyproject = true;
|
||||
|
||||
src = fetchFromGitHub {
|
||||
owner = "SamuelYvon";
|
||||
repo = "netifaces-2";
|
||||
rev = "V${version}";
|
||||
hash = "sha256-XO3HWq8FOVzvpbK8mIBOup6hFMnhDpqOK/5bPziPZQ8=";
|
||||
};
|
||||
|
||||
cargoDeps = rustPlatform.fetchCargoTarball {
|
||||
inherit src;
|
||||
name = "${pname}-${version}";
|
||||
hash = "sha256-uoUa6DSBuIV3RrE7svT1TVLxPHdx8BFu/C6mbpRmor0=";
|
||||
};
|
||||
|
||||
build-system = [
|
||||
cargo
|
||||
rustPlatform.cargoSetupHook
|
||||
rustPlatform.maturinBuildHook
|
||||
rustc
|
||||
];
|
||||
|
||||
dependencies = [ typing-extensions ];
|
||||
|
||||
pythonImportsCheck = [ "netifaces" ];
|
||||
|
||||
meta = {
|
||||
description = "Netifaces reborn";
|
||||
homepage = "https://github.com/SamuelYvon/netifaces-2.git";
|
||||
license = lib.licenses.mit;
|
||||
maintainers = with lib.maintainers; [ ];
|
||||
};
|
||||
}
|
80
npins/default.nix
Normal file
80
npins/default.nix
Normal file
|
@ -0,0 +1,80 @@
|
|||
# Generated by npins. Do not modify; will be overwritten regularly
|
||||
let
|
||||
data = builtins.fromJSON (builtins.readFile ./sources.json);
|
||||
version = data.version;
|
||||
|
||||
mkSource =
|
||||
spec:
|
||||
assert spec ? type;
|
||||
let
|
||||
path =
|
||||
if spec.type == "Git" then
|
||||
mkGitSource spec
|
||||
else if spec.type == "GitRelease" then
|
||||
mkGitSource spec
|
||||
else if spec.type == "PyPi" then
|
||||
mkPyPiSource spec
|
||||
else if spec.type == "Channel" then
|
||||
mkChannelSource spec
|
||||
else
|
||||
builtins.throw "Unknown source type ${spec.type}";
|
||||
in
|
||||
spec // { outPath = path; };
|
||||
|
||||
mkGitSource =
|
||||
{
|
||||
repository,
|
||||
revision,
|
||||
url ? null,
|
||||
hash,
|
||||
branch ? null,
|
||||
...
|
||||
}:
|
||||
assert repository ? type;
|
||||
# At the moment, either it is a plain git repository (which has an url), or it is a GitHub/GitLab repository
|
||||
# In the latter case, there we will always be an url to the tarball
|
||||
if url != null then
|
||||
(builtins.fetchTarball {
|
||||
inherit url;
|
||||
sha256 = hash; # FIXME: check nix version & use SRI hashes
|
||||
})
|
||||
else
|
||||
assert repository.type == "Git";
|
||||
let
|
||||
urlToName =
|
||||
url: rev:
|
||||
let
|
||||
matched = builtins.match "^.*/([^/]*)(\\.git)?$" repository.url;
|
||||
|
||||
short = builtins.substring 0 7 rev;
|
||||
|
||||
appendShort = if (builtins.match "[a-f0-9]*" rev) != null then "-${short}" else "";
|
||||
in
|
||||
"${if matched == null then "source" else builtins.head matched}${appendShort}";
|
||||
name = urlToName repository.url revision;
|
||||
in
|
||||
builtins.fetchGit {
|
||||
url = repository.url;
|
||||
rev = revision;
|
||||
inherit name;
|
||||
# hash = hash;
|
||||
};
|
||||
|
||||
mkPyPiSource =
|
||||
{ url, hash, ... }:
|
||||
builtins.fetchurl {
|
||||
inherit url;
|
||||
sha256 = hash;
|
||||
};
|
||||
|
||||
mkChannelSource =
|
||||
{ url, hash, ... }:
|
||||
builtins.fetchTarball {
|
||||
inherit url;
|
||||
sha256 = hash;
|
||||
};
|
||||
in
|
||||
if version == 3 then
|
||||
builtins.mapAttrs (_: mkSource) data.pins
|
||||
else
|
||||
throw "Unsupported format version ${toString version} in sources.json. Try running `npins upgrade`"
|
23
npins/sources.json
Normal file
23
npins/sources.json
Normal file
|
@ -0,0 +1,23 @@
|
|||
{
|
||||
"pins": {
|
||||
"git-hooks": {
|
||||
"type": "Git",
|
||||
"repository": {
|
||||
"type": "GitHub",
|
||||
"owner": "cachix",
|
||||
"repo": "git-hooks.nix"
|
||||
},
|
||||
"branch": "master",
|
||||
"revision": "3c3e88f0f544d6bb54329832616af7eb971b6be6",
|
||||
"url": "https://github.com/cachix/git-hooks.nix/archive/3c3e88f0f544d6bb54329832616af7eb971b6be6.tar.gz",
|
||||
"hash": "04pwjz423iq2nkazkys905gvsm5j39722ngavrnx42b8msr5k555"
|
||||
},
|
||||
"nixpkgs": {
|
||||
"type": "Channel",
|
||||
"name": "nixpkgs-unstable",
|
||||
"url": "https://releases.nixos.org/nixpkgs/nixpkgs-24.11pre694416.ccc0c2126893/nixexprs.tar.xz",
|
||||
"hash": "0cn1z4wzps8nfqxzr6l5mbn81adcqy2cy2ic70z13fhzicmxfsbx"
|
||||
}
|
||||
},
|
||||
"version": 3
|
||||
}
|
2
pyproject.toml
Normal file
2
pyproject.toml
Normal file
|
@ -0,0 +1,2 @@
|
|||
[tool.isort]
|
||||
profile = "black"
|
|
@ -1,5 +1,8 @@
|
|||
pynetbox==5.0.5
|
||||
netaddr==0.8.0
|
||||
netifaces==0.10.9
|
||||
pyyaml==5.3.1
|
||||
jsonargparse==2.32.2
|
||||
pynetbox==7.3.4
|
||||
netaddr==1.3.0
|
||||
netifaces2==0.0.22
|
||||
pyyaml==6.0.1
|
||||
jsonargparse==4.32.0
|
||||
python-slugify==8.0.4
|
||||
packaging==23.2
|
||||
distro==1.9.0
|
||||
|
|
27
rpmenv.json
Normal file
27
rpmenv.json
Normal file
|
@ -0,0 +1,27 @@
|
|||
{
|
||||
"extensions": {
|
||||
"enabled": ["python_venv", "blocks"]
|
||||
},
|
||||
"core": {
|
||||
"group": "Application/System",
|
||||
"license": "Apache2",
|
||||
"name": "netbox-agent",
|
||||
"summary": "NetBox agent for server",
|
||||
"url": "https://github.com/Solvik/netbox-agent",
|
||||
"version": "0.7.0",
|
||||
"requires": ["lshw"]
|
||||
},
|
||||
"python_venv": {
|
||||
"python": "python3.6",
|
||||
"requirements": ["requirements.txt"],
|
||||
"name": "netbox-agent",
|
||||
"path": "/opt/"
|
||||
},
|
||||
"blocks": {
|
||||
"post": ["ln -sf /opt/netbox-agent/bin/netbox_agent /usr/bin/netbox_agent"],
|
||||
"desc": [
|
||||
"This project aims to create hardware automatically into Netbox based on standard tools (dmidecode, lldpd, parsing /sys/, etc).",
|
||||
"The goal is to generate an existing infrastructure on Netbox and have the ability to update it regularly by executing the agent."
|
||||
]
|
||||
}
|
||||
}
|
50
setup.py
50
setup.py
|
@ -1,34 +1,38 @@
|
|||
import os
|
||||
|
||||
from setuptools import find_packages, setup
|
||||
|
||||
|
||||
def get_requirements():
|
||||
reqs_path = os.path.join(os.path.dirname(__file__), "requirements.txt")
|
||||
with open(reqs_path, "r") as f:
|
||||
reqs = [r.strip() for r in f if r.strip()]
|
||||
return reqs
|
||||
|
||||
|
||||
setup(
|
||||
name='netbox_agent',
|
||||
version='0.6.2',
|
||||
description='NetBox agent for server',
|
||||
long_description=open('README.md', encoding="utf-8").read(),
|
||||
long_description_content_type='text/markdown',
|
||||
url='https://github.com/solvik/netbox_agent',
|
||||
author='Solvik Blum',
|
||||
author_email='solvik@solvik.fr',
|
||||
license='Apache2',
|
||||
name="netbox_agent",
|
||||
version="0.7.1",
|
||||
description="NetBox agent for server",
|
||||
long_description=open("README.md", encoding="utf-8").read(),
|
||||
long_description_content_type="text/markdown",
|
||||
url="https://github.com/solvik/netbox_agent",
|
||||
author="Solvik Blum",
|
||||
author_email="solvik@solvik.fr",
|
||||
license="Apache2",
|
||||
include_package_data=True,
|
||||
packages=find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"]),
|
||||
use_scm_version=True,
|
||||
install_requires=[
|
||||
'pynetbox==5.0.5',
|
||||
'netaddr==0.8.0',
|
||||
'netifaces==0.10.9',
|
||||
'pyyaml==5.3.1',
|
||||
'jsonargparse==2.32.2',
|
||||
],
|
||||
install_requires=get_requirements(),
|
||||
zip_safe=False,
|
||||
keywords=['netbox'],
|
||||
keywords=["netbox"],
|
||||
classifiers=[
|
||||
'Intended Audience :: Developers',
|
||||
'Development Status :: 5 - Production/Stable',
|
||||
'Programming Language :: Python :: 3',
|
||||
'Programming Language :: Python :: 3.6',
|
||||
"Intended Audience :: Developers",
|
||||
"Development Status :: 5 - Production/Stable",
|
||||
"Programming Language :: Python :: 3",
|
||||
"Programming Language :: Python :: 3.6",
|
||||
],
|
||||
entry_points={
|
||||
'console_scripts': ['netbox_agent=netbox_agent.cli:main'],
|
||||
}
|
||||
"console_scripts": ["netbox_agent=netbox_agent.cli:main"],
|
||||
},
|
||||
)
|
||||
|
|
1
shell.nix
Normal file
1
shell.nix
Normal file
|
@ -0,0 +1 @@
|
|||
(import ./. { }).devShell
|
|
@ -14,14 +14,15 @@ def get_fixture_paths(path):
|
|||
return fixture_paths
|
||||
|
||||
|
||||
def parametrize_with_fixtures(path, base_path='tests/fixtures',
|
||||
argname='fixture', only_filenames=None):
|
||||
def parametrize_with_fixtures(
|
||||
path, base_path="tests/fixtures", argname="fixture", only_filenames=None
|
||||
):
|
||||
path = os.path.join(base_path, path)
|
||||
fixture_paths = get_fixture_paths(path)
|
||||
argvalues = []
|
||||
for path in fixture_paths:
|
||||
with open(path, 'r') as f:
|
||||
content = ''.join(f.readlines())
|
||||
with open(path, "r") as f:
|
||||
content = "".join(f.readlines())
|
||||
filename = os.path.basename(path)
|
||||
if only_filenames and filename not in only_filenames:
|
||||
continue
|
||||
|
@ -30,4 +31,5 @@ def parametrize_with_fixtures(path, base_path='tests/fixtures',
|
|||
|
||||
def _decorator(test_function):
|
||||
return pytest.mark.parametrize(argname, argvalues)(test_function)
|
||||
|
||||
return _decorator
|
||||
|
|
1798
tests/fixtures/dmidecode/HP_ProLiant_BL460c_Gen10_Graphics_Exp
vendored
Normal file
1798
tests/fixtures/dmidecode/HP_ProLiant_BL460c_Gen10_Graphics_Exp
vendored
Normal file
File diff suppressed because it is too large
Load diff
|
@ -3,18 +3,22 @@ from tests.conftest import parametrize_with_fixtures
|
|||
|
||||
|
||||
@parametrize_with_fixtures(
|
||||
'lldp/', only_filenames=[
|
||||
'dedibox1.txt',
|
||||
])
|
||||
"lldp/",
|
||||
only_filenames=[
|
||||
"dedibox1.txt",
|
||||
],
|
||||
)
|
||||
def test_lldp_parse_with_port_desc(fixture):
|
||||
lldp = LLDP(fixture)
|
||||
assert lldp.get_switch_port('enp1s0f0') == 'RJ-9'
|
||||
assert lldp.get_switch_port("enp1s0f0") == "RJ-9"
|
||||
|
||||
|
||||
@parametrize_with_fixtures(
|
||||
'lldp/', only_filenames=[
|
||||
'qfx.txt',
|
||||
])
|
||||
"lldp/",
|
||||
only_filenames=[
|
||||
"qfx.txt",
|
||||
],
|
||||
)
|
||||
def test_lldp_parse_without_ifname(fixture):
|
||||
lldp = LLDP(fixture)
|
||||
assert lldp.get_switch_port('eth0') == 'xe-0/0/1'
|
||||
assert lldp.get_switch_port("eth0") == "xe-0/0/1"
|
||||
|
|
|
@ -6,7 +6,7 @@ from netbox_agent.vendors.supermicro import SupermicroHost
|
|||
from tests.conftest import parametrize_with_fixtures
|
||||
|
||||
|
||||
@parametrize_with_fixtures('dmidecode/')
|
||||
@parametrize_with_fixtures("dmidecode/")
|
||||
def test_init(fixture):
|
||||
dmi = parse(fixture)
|
||||
server = ServerBase(dmi)
|
||||
|
@ -14,78 +14,78 @@ def test_init(fixture):
|
|||
|
||||
|
||||
@parametrize_with_fixtures(
|
||||
'dmidecode/', only_filenames=[
|
||||
'HP_SL4540_Gen8',
|
||||
'HP_BL460c_Gen9',
|
||||
'HP_DL380p_Gen8',
|
||||
'HP_SL4540_Gen8'
|
||||
])
|
||||
"dmidecode/",
|
||||
only_filenames=[
|
||||
"HP_SL4540_Gen8",
|
||||
"HP_BL460c_Gen9",
|
||||
"HP_DL380p_Gen8",
|
||||
"HP_SL4540_Gen8" "HP_ProLiant_BL460c_Gen10_Graphics_Exp",
|
||||
],
|
||||
)
|
||||
def test_hp_service_tag(fixture):
|
||||
dmi = parse(fixture)
|
||||
server = HPHost(dmi)
|
||||
assert server.get_service_tag() == '4242'
|
||||
assert server.get_service_tag() == "4242"
|
||||
|
||||
|
||||
@parametrize_with_fixtures(
|
||||
'dmidecode/', only_filenames=[
|
||||
'HP_ProLiant_m710x'
|
||||
])
|
||||
@parametrize_with_fixtures("dmidecode/", only_filenames=["HP_ProLiant_m710x"])
|
||||
def test_moonshot_blade(fixture):
|
||||
dmi = parse(fixture)
|
||||
server = HPHost(dmi)
|
||||
assert server.get_service_tag() == 'CN66480BLA'
|
||||
assert server.get_chassis_service_tag() == 'CZ3702MD5K'
|
||||
assert server.get_service_tag() == "CN66480BLA"
|
||||
assert server.get_chassis_service_tag() == "CZ3702MD5K"
|
||||
assert server.is_blade() is True
|
||||
assert server.own_expansion_slot() is False
|
||||
|
||||
|
||||
@parametrize_with_fixtures(
|
||||
'dmidecode/', only_filenames=[
|
||||
'SYS-5039MS-H12TRF-OS012.txt'
|
||||
])
|
||||
@parametrize_with_fixtures("dmidecode/", only_filenames=["SYS-5039MS-H12TRF-OS012.txt"])
|
||||
def test_supermicro_blade(fixture):
|
||||
dmi = parse(fixture)
|
||||
server = SupermicroHost(dmi)
|
||||
assert server.get_service_tag() == 'E235735X6B01665'
|
||||
assert server.get_chassis_service_tag() == 'C9390AF40A20098'
|
||||
assert server.get_service_tag() == "E235735X6B01665"
|
||||
assert server.get_chassis_service_tag() == "C9390AF40A20098"
|
||||
assert server.get_chassis() == "SYS-5039MS-H12TRF-OS012"
|
||||
assert server.is_blade() is True
|
||||
|
||||
|
||||
@parametrize_with_fixtures(
|
||||
'dmidecode/', only_filenames=[
|
||||
'SM_SYS-6018R'
|
||||
])
|
||||
@parametrize_with_fixtures("dmidecode/", only_filenames=["SM_SYS-6018R"])
|
||||
def test_supermicro_pizza(fixture):
|
||||
dmi = parse(fixture)
|
||||
server = SupermicroHost(dmi)
|
||||
assert server.get_service_tag() == 'A177950X7709591'
|
||||
assert server.get_service_tag() == "A177950X7709591"
|
||||
assert server.get_chassis() == "SYS-6018R-TDTPR"
|
||||
assert server.is_blade() is False
|
||||
|
||||
|
||||
@parametrize_with_fixtures(
|
||||
'dmidecode/', only_filenames=[
|
||||
'QCT_X10E-9N'
|
||||
])
|
||||
@parametrize_with_fixtures("dmidecode/", only_filenames=["QCT_X10E-9N"])
|
||||
def test_qct_x10(fixture):
|
||||
dmi = parse(fixture)
|
||||
server = QCTHost(dmi)
|
||||
assert server.get_service_tag() == 'QTFCQ57140285'
|
||||
assert server.get_service_tag() == "QTFCQ57140285"
|
||||
|
||||
|
||||
@parametrize_with_fixtures(
|
||||
'dmidecode/', only_filenames=[
|
||||
'unknown.txt'
|
||||
])
|
||||
@parametrize_with_fixtures("dmidecode/", only_filenames=["unknown.txt"])
|
||||
def test_generic_host_service_tag(fixture):
|
||||
dmi = parse(fixture)
|
||||
server = ServerBase(dmi)
|
||||
assert server.get_service_tag() == '42'
|
||||
assert server.get_service_tag() == "42"
|
||||
|
||||
|
||||
@parametrize_with_fixtures(
|
||||
'dmidecode/', only_filenames=[
|
||||
'unknown.txt'
|
||||
])
|
||||
@parametrize_with_fixtures("dmidecode/", only_filenames=["unknown.txt"])
|
||||
def test_generic_host_product_name(fixture):
|
||||
dmi = parse(fixture)
|
||||
server = ServerBase(dmi)
|
||||
assert server.get_product_name() == 'SR'
|
||||
assert server.get_product_name() == "SR"
|
||||
|
||||
|
||||
@parametrize_with_fixtures(
|
||||
"dmidecode/", only_filenames=["HP_ProLiant_BL460c_Gen10_Graphics_Exp"]
|
||||
)
|
||||
def test_hp_blade_with_gpu_expansion(fixture):
|
||||
dmi = parse(fixture)
|
||||
server = HPHost(dmi)
|
||||
assert server.get_service_tag() == "4242"
|
||||
assert server.get_chassis_service_tag() == "4343"
|
||||
assert server.is_blade() is True
|
||||
assert server.own_expansion_slot() is True
|
||||
assert server.get_expansion_service_tag() == "4242 expansion"
|
||||
|
|
Loading…
Reference in a new issue