Merge pull request #177 from Solvik/176-netbox-2.9
support netbox >=2.9
This commit is contained in:
commit
a7c6ae68e2
21 changed files with 795 additions and 410 deletions
1
MANIFEST.in
Normal file
1
MANIFEST.in
Normal file
|
@ -0,0 +1 @@
|
|||
include requirements.txt
|
42
README.md
42
README.md
|
@ -92,10 +92,12 @@ netbox:
|
|||
token: supersecrettoken
|
||||
# uncomment to disable ssl verification
|
||||
# ssl_verify: false
|
||||
# uncomment to use the system's CA certificates
|
||||
# ssl_ca_certs_file: /etc/ssl/certs/ca-certificates.crt
|
||||
|
||||
# Network configuration
|
||||
network:
|
||||
# Regex to ignore interfaces
|
||||
# Regex to ignore interfaces
|
||||
ignore_interfaces: "(dummy.*|docker.*)"
|
||||
# Regex to ignore IP addresses
|
||||
ignore_ips: (127\.0\.0\..*)
|
||||
|
@ -111,7 +113,7 @@ network:
|
|||
# blade_role: "Blade"
|
||||
# server_role: "Server"
|
||||
# tags: server, blade, ,just a comma,delimited,list
|
||||
#
|
||||
# custom_fields: field1=value1,field2=value2#
|
||||
#
|
||||
# Can use this to set the tenant
|
||||
#
|
||||
|
@ -119,7 +121,7 @@ network:
|
|||
# driver: "file:/tmp/tenant"
|
||||
# regex: "(.*)"
|
||||
|
||||
## Enable virtual machine support
|
||||
## Enable virtual machine support
|
||||
# virtual:
|
||||
# # not mandatory, can be guessed
|
||||
# enabled: True
|
||||
|
@ -145,7 +147,7 @@ rack_location:
|
|||
# driver: "file:/tmp/datacenter"
|
||||
# regex: "(.*)"
|
||||
|
||||
# Enable local inventory reporting
|
||||
# Enable local inventory reporting
|
||||
inventory: true
|
||||
```
|
||||
|
||||
|
@ -160,6 +162,36 @@ The `get_blade_slot` method return the name of the `Device Bay`.
|
|||
|
||||
Certain vendors don't report the blade slot in `dmidecode`, so we can use the `slot_location` regex feature of the configuration file.
|
||||
|
||||
Some blade servers can be equipped with additional hardware using expansion blades, next to the processing blade, such as GPU expansion, or drives bay expansion. By default, the hardware from the expnasion is associated with the blade server itself, but it's possible to register the expansion as its own device using the `--expansion-as-device` command line parameter, or by setting `expansion_as_device` to `true` in the configuration file.
|
||||
|
||||
## Drives attributes processing
|
||||
|
||||
It is possible to process drives extended attributes such as the drive's physical or logical identifier, logical drive RAID type, size, consistency and so on.
|
||||
|
||||
Those attributes as set as `custom_fields` in Netbox, and need to be registered properly before being able to specify them during the inventory phase.
|
||||
|
||||
As the custom fields have to be created prior being able to register the disks extended attributes, this feature is only activated using the `--process-virtual-drives` command line parameter, or by setting `process_virtual_drives` to `true` in the configuration file.
|
||||
|
||||
The custom fields to create as `DCIM > inventory item` `Text` are described below.
|
||||
|
||||
```
|
||||
NAME LABEL DESCRIPTION
|
||||
mount_point Mount point Device mount point(s)
|
||||
pd_identifier Physical disk identifier Physical disk identifier in the RAID controller
|
||||
vd_array Virtual drive array Virtual drive array the disk is member of
|
||||
vd_consistency Virtual drive consistency Virtual disk array consistency
|
||||
vd_device Virtual drive device Virtual drive system device
|
||||
vd_raid_type Virtual drive RAID Virtual drive array RAID type
|
||||
vd_size Virtual drive size Virtual drive array size
|
||||
```
|
||||
|
||||
In the current implementation, the disks attributes ore not updated: if a disk with the correct serial number is found, it's sufficient to consider it as up to date.
|
||||
|
||||
To force the reprocessing of the disks extended attributes, the `--force-disk-refresh` command line option can be used: it removes all existing disks to before populating them with the correct parsing. Unless this option is specified, the extended attributes won't be modified unless a disk is replaced.
|
||||
|
||||
It is possible to dump the physical/virtual disks map on the filesystem under the JSON notation to ease or automate disks management. The file path has to be provided using the `--dump-disks-map` command line parameter.
|
||||
|
||||
|
||||
## Anycast IP
|
||||
|
||||
The default behavior of the agent is to assign an interface to an IP.
|
||||
|
@ -256,5 +288,5 @@ On a personal note, I use the docker image from [netbox-community/netbox-docker]
|
|||
# git clone https://github.com/netbox-community/netbox-docker
|
||||
# cd netbox-docker
|
||||
# docker-compose pull
|
||||
# docker-compose up
|
||||
# docker-compose up
|
||||
```
|
||||
|
|
|
@ -18,14 +18,15 @@ network:
|
|||
# blade_role: "Blade"
|
||||
# server_role: "Server"
|
||||
# tags: server, blade, ,just a comma,delimited,list
|
||||
|
||||
# custom_fields: field1=value1,field2=value2
|
||||
#
|
||||
#
|
||||
# Use this to set the tenant
|
||||
#
|
||||
#tenant:
|
||||
# driver: "file:/tmp/tenant"
|
||||
# regex: "(.*)"
|
||||

|
||||
|
||||
datacenter_location:
|
||||
driver: "cmd:cat /etc/qualification | tr [A-Z] [a-z]"
|
||||
regex: "datacenter: (?P<datacenter>[A-Za-z0-9]+)"
|
||||
|
|
|
@ -1,5 +1,7 @@
|
|||
from packaging import version
|
||||
import netbox_agent.dmidecode as dmidecode
|
||||
from netbox_agent.config import config
|
||||
from netbox_agent.config import netbox_instance as nb
|
||||
from netbox_agent.logging import logging # NOQA
|
||||
from netbox_agent.vendors.dell import DellHost
|
||||
from netbox_agent.vendors.generic import GenericHost
|
||||
|
@ -32,11 +34,15 @@ def run(config):
|
|||
except KeyError:
|
||||
server = GenericHost(dmi=dmi)
|
||||
|
||||
if version.parse(nb.version) < version.parse('2.9'):
|
||||
print('netbox-agent is not compatible with Netbox prior to verison 2.9')
|
||||
return False
|
||||
|
||||
if config.register or config.update_all or config.update_network or \
|
||||
config.update_location or config.update_inventory or config.update_psu:
|
||||
server.netbox_create_or_update(config)
|
||||
if config.debug:
|
||||
server.print_debug()
|
||||
if config.register or config.update_all or config.update_network or config.update_location or \
|
||||
config.update_inventory or config.update_psu:
|
||||
server.netbox_create_or_update(config)
|
||||
return True
|
||||
|
||||
|
||||
|
|
|
@ -34,6 +34,7 @@ def get_config():
|
|||
help='Manage blade expansions as external devices')
|
||||
|
||||
p.add_argument('--log_level', default='debug')
|
||||
p.add_argument('--netbox.ssl_ca_certs_file', help='SSL CA certificates file')
|
||||
p.add_argument('--netbox.url', help='Netbox URL')
|
||||
p.add_argument('--netbox.token', help='Netbox API Token')
|
||||
p.add_argument('--netbox.ssl_verify', default=True, action='store_true',
|
||||
|
@ -44,6 +45,8 @@ def get_config():
|
|||
help="Command to output hostname, used as Device's name in netbox")
|
||||
p.add_argument('--device.tags', default=r'',
|
||||
help='tags to use for a host')
|
||||
p.add_argument('--device.custom_fields', default=r'',
|
||||
help='custom_fields to use for a host, eg: field1=v1,field2=v2')
|
||||
p.add_argument('--device.blade_role', default=r'Blade',
|
||||
help='role to use for a blade server')
|
||||
p.add_argument('--device.chassis_role', default=r'Server Chassis',
|
||||
|
@ -75,13 +78,22 @@ def get_config():
|
|||
p.add_argument('--network.lldp', help='Enable auto-cabling feature through LLDP infos')
|
||||
p.add_argument('--inventory', action='store_true',
|
||||
help='Enable HW inventory (CPU, Memory, RAID Cards, Disks) feature')
|
||||
p.add_argument('--process-virtual-drives', action='store_true',
|
||||
help='Process virtual drives information from RAID '
|
||||
'controllers to fill disk custom_fields')
|
||||
p.add_argument('--force-disk-refresh', action='store_true',
|
||||
help='Forces disks detection reprocessing')
|
||||
p.add_argument('--dump-disks-map',
|
||||
help='File path to dump physical/virtual disks map')
|
||||
|
||||
options = p.parse_args()
|
||||
return options
|
||||
|
||||
|
||||
config = get_config()
|
||||
|
||||
|
||||
def get_netbox_instance():
|
||||
config = get_config()
|
||||
if config.netbox.url is None or config.netbox.token is None:
|
||||
logging.error('Netbox URL and token are mandatory')
|
||||
sys.exit(1)
|
||||
|
@ -90,7 +102,12 @@ def get_netbox_instance():
|
|||
url=get_config().netbox.url,
|
||||
token=get_config().netbox.token,
|
||||
)
|
||||
if get_config().netbox.ssl_verify is False:
|
||||
ca_certs_file = config.netbox.ssl_ca_certs_file
|
||||
if ca_certs_file is not None:
|
||||
session = requests.Session()
|
||||
session.verify = ca_certs_file
|
||||
nb.http_session = session
|
||||
elif config.netbox.ssl_verify is False:
|
||||
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
|
||||
session = requests.Session()
|
||||
session.verify = False
|
||||
|
@ -99,5 +116,4 @@ def get_netbox_instance():
|
|||
return nb
|
||||
|
||||
|
||||
config = get_config()
|
||||
netbox_instance = get_netbox_instance()
|
||||
|
|
|
@ -1,8 +1,3 @@
|
|||
import logging
|
||||
import re
|
||||
|
||||
import pynetbox
|
||||
|
||||
from netbox_agent.config import config
|
||||
from netbox_agent.config import netbox_instance as nb
|
||||
from netbox_agent.lshw import LSHW
|
||||
|
@ -10,6 +5,12 @@ from netbox_agent.misc import get_vendor, is_tool
|
|||
from netbox_agent.raid.hp import HPRaid
|
||||
from netbox_agent.raid.omreport import OmreportRaid
|
||||
from netbox_agent.raid.storcli import StorcliRaid
|
||||
import traceback
|
||||
import pynetbox
|
||||
import logging
|
||||
import json
|
||||
import re
|
||||
|
||||
|
||||
INVENTORY_TAG = {
|
||||
'cpu': {'name': 'hw:cpu', 'slug': 'hw-cpu'},
|
||||
|
@ -59,6 +60,7 @@ class Inventory():
|
|||
self.lshw = LSHW()
|
||||
|
||||
def create_netbox_tags(self):
|
||||
ret = []
|
||||
for key, tag in INVENTORY_TAG.items():
|
||||
nb_tag = nb.extras.tags.get(
|
||||
name=tag['name']
|
||||
|
@ -69,6 +71,8 @@ class Inventory():
|
|||
slug=tag['slug'],
|
||||
comments=tag['name'],
|
||||
)
|
||||
ret.append(nb_tag)
|
||||
return ret
|
||||
|
||||
def find_or_create_manufacturer(self, name):
|
||||
if name is None:
|
||||
|
@ -98,7 +102,7 @@ class Inventory():
|
|||
logging.info('Tag {tag} is missing, returning empty array.'.format(tag=tag))
|
||||
items = []
|
||||
|
||||
return items
|
||||
return list(items)
|
||||
|
||||
def create_netbox_inventory_item(self, device_id, tags, vendor, name, serial, description):
|
||||
manufacturer = self.find_or_create_manufacturer(vendor)
|
||||
|
@ -153,7 +157,7 @@ class Inventory():
|
|||
if motherboard.get('serial') not in [x.serial for x in nb_motherboards]:
|
||||
self.create_netbox_inventory_item(
|
||||
device_id=self.device_id,
|
||||
tags=[INVENTORY_TAG['motherboard']['name']],
|
||||
tags=[{'name': INVENTORY_TAG['motherboard']['name']}],
|
||||
vendor='{}'.format(motherboard.get('vendor', 'N/A')),
|
||||
serial='{}'.format(motherboard.get('serial', 'No SN')),
|
||||
name='{}'.format(motherboard.get('name')),
|
||||
|
@ -166,7 +170,7 @@ class Inventory():
|
|||
device=self.device_id,
|
||||
manufacturer=manufacturer.id,
|
||||
discovered=True,
|
||||
tags=[INVENTORY_TAG['interface']['name']],
|
||||
tags=[{'name': INVENTORY_TAG['interface']['name']}],
|
||||
name="{}".format(iface['product']),
|
||||
serial='{}'.format(iface['serial']),
|
||||
description='{} {}'.format(iface['description'], iface['name'])
|
||||
|
@ -199,7 +203,7 @@ class Inventory():
|
|||
device=self.device_id,
|
||||
manufacturer=manufacturer.id,
|
||||
discovered=True,
|
||||
tags=[INVENTORY_TAG['cpu']['name']],
|
||||
tags=[{'name': INVENTORY_TAG['cpu']['name']}],
|
||||
name=cpu['product'],
|
||||
description='CPU {}'.format(cpu['location']),
|
||||
# asset_tag=cpu['location']
|
||||
|
@ -223,7 +227,7 @@ class Inventory():
|
|||
|
||||
def get_raid_cards(self, filter_cards=False):
|
||||
raid_class = None
|
||||
if self.server.manufacturer == 'Dell':
|
||||
if self.server.manufacturer in ('Dell', 'Huawei'):
|
||||
if is_tool('omreport'):
|
||||
raid_class = OmreportRaid
|
||||
if is_tool('storcli'):
|
||||
|
@ -257,7 +261,7 @@ class Inventory():
|
|||
device=self.device_id,
|
||||
discovered=True,
|
||||
manufacturer=manufacturer.id if manufacturer else None,
|
||||
tags=[INVENTORY_TAG['raid_card']['name']],
|
||||
tags=[{'name': INVENTORY_TAG['raid_card']['name']}],
|
||||
name='{}'.format(name),
|
||||
serial='{}'.format(serial),
|
||||
description='RAID Card',
|
||||
|
@ -299,53 +303,60 @@ class Inventory():
|
|||
if raid_card.get_serial_number() not in [x.serial for x in nb_raid_cards]:
|
||||
self.create_netbox_raid_card(raid_card)
|
||||
|
||||
def is_virtual_disk(self, disk):
|
||||
def is_virtual_disk(self, disk, raid_devices):
|
||||
disk_type = disk.get('type')
|
||||
logicalname = disk.get('logicalname')
|
||||
description = disk.get('description')
|
||||
size = disk.get('size')
|
||||
product = disk.get('product')
|
||||
|
||||
if logicalname in raid_devices or disk_type is None:
|
||||
return True
|
||||
non_raid_disks = [
|
||||
'MR9361-8i',
|
||||
]
|
||||
|
||||
if size is None and logicalname is None or \
|
||||
'virtual' in product.lower() or 'logical' in product.lower() or \
|
||||
if logicalname in raid_devices or \
|
||||
disk_type is None or \
|
||||
product in non_raid_disks or \
|
||||
'virtual' in product.lower() or \
|
||||
'logical' in product.lower() or \
|
||||
'volume' in description.lower() or \
|
||||
description == 'SCSI Enclosure' or \
|
||||
'volume' in description.lower():
|
||||
(size is None and logicalname is None):
|
||||
return True
|
||||
return False
|
||||
|
||||
def get_hw_disks(self):
|
||||
disks = []
|
||||
|
||||
for raid_card in self.get_raid_cards(filter_cards=True):
|
||||
disks.extend(raid_card.get_physical_disks())
|
||||
|
||||
raid_devices = [
|
||||
d.get('custom_fields', {}).get('vd_device')
|
||||
for d in disks
|
||||
if d.get('custom_fields', {}).get('vd_device')
|
||||
]
|
||||
|
||||
for disk in self.lshw.get_hw_linux("storage"):
|
||||
if self.is_virtual_disk(disk):
|
||||
if self.is_virtual_disk(disk, raid_devices):
|
||||
continue
|
||||
|
||||
logicalname = disk.get('logicalname')
|
||||
description = disk.get('description')
|
||||
size = disk.get('size', 0)
|
||||
product = disk.get('product')
|
||||
serial = disk.get('serial')
|
||||
|
||||
d = {}
|
||||
d["name"] = ""
|
||||
d['Size'] = '{} GB'.format(int(size / 1024 / 1024 / 1024))
|
||||
d['logicalname'] = logicalname
|
||||
d['description'] = description
|
||||
d['SN'] = serial
|
||||
d['Model'] = product
|
||||
size =int(disk.get('size', 0)) / 1073741824
|
||||
d = {
|
||||
"name": "",
|
||||
'Size': '{} GB'.format(size),
|
||||
'logicalname': disk.get('logicalname'),
|
||||
'description': disk.get('description'),
|
||||
'SN': disk.get('serial'),
|
||||
'Model': disk.get('product'),
|
||||
'Type': disk.get('type'),
|
||||
}
|
||||
if disk.get('vendor'):
|
||||
d['Vendor'] = disk['vendor']
|
||||
else:
|
||||
d['Vendor'] = get_vendor(disk['product'])
|
||||
disks.append(d)
|
||||
|
||||
for raid_card in self.get_raid_cards(filter_cards=True):
|
||||
disks += raid_card.get_physical_disks()
|
||||
|
||||
# remove duplicate serials
|
||||
seen = set()
|
||||
uniq = [x for x in disks if x['SN'] not in seen and not seen.add(x['SN'])]
|
||||
|
@ -358,53 +369,79 @@ class Inventory():
|
|||
|
||||
logicalname = disk.get('logicalname')
|
||||
desc = disk.get('description')
|
||||
# nonraid disk
|
||||
if logicalname and desc:
|
||||
if type(logicalname) is list:
|
||||
logicalname = logicalname[0]
|
||||
name = '{} - {} ({})'.format(
|
||||
desc,
|
||||
logicalname,
|
||||
disk.get('Size', 0))
|
||||
description = 'Device {}'.format(disk.get('logicalname', 'Unknown'))
|
||||
else:
|
||||
name = '{} ({})'.format(disk['Model'], disk['Size'])
|
||||
description = '{}'.format(disk['Type'])
|
||||
name = '{} ({})'.format(disk['Model'], disk['Size'])
|
||||
description = disk['Type']
|
||||
|
||||
_ = nb.dcim.inventory_items.create(
|
||||
device=self.device_id,
|
||||
discovered=True,
|
||||
tags=[INVENTORY_TAG['disk']['name']],
|
||||
name=name,
|
||||
serial=disk['SN'],
|
||||
part_id=disk['Model'],
|
||||
description=description,
|
||||
manufacturer=manufacturer.id if manufacturer else None
|
||||
)
|
||||
parms = {
|
||||
'device': self.device_id,
|
||||
'discovered': True,
|
||||
'tags': [{'name': INVENTORY_TAG['disk']['name']}],
|
||||
'name': name,
|
||||
'serial': disk['SN'],
|
||||
'part_id': disk['Model'],
|
||||
'description': description,
|
||||
'manufacturer': getattr(manufacturer, "id", None),
|
||||
}
|
||||
if config.process_virtual_drives:
|
||||
parms['custom_fields'] = disk.get("custom_fields", {})
|
||||
|
||||
_ = nb.dcim.inventory_items.create(**parms)
|
||||
|
||||
logging.info('Creating Disk {model} {serial}'.format(
|
||||
model=disk['Model'],
|
||||
serial=disk['SN'],
|
||||
))
|
||||
|
||||
def dump_disks_map(self, disks):
|
||||
disk_map = [d['custom_fields'] for d in disks if 'custom_fields' in d]
|
||||
if config.dump_disks_map == "-":
|
||||
f = sys.stdout
|
||||
else:
|
||||
f = open(config.dump_disks_map, "w")
|
||||
f.write(
|
||||
json.dumps(
|
||||
disk_map,
|
||||
separators=(',', ':'),
|
||||
indent=4,
|
||||
sort_keys=True
|
||||
)
|
||||
)
|
||||
if config.dump_disks_map != "-":
|
||||
f.close()
|
||||
|
||||
def do_netbox_disks(self):
|
||||
nb_disks = self.get_netbox_inventory(
|
||||
device_id=self.device_id,
|
||||
tag=INVENTORY_TAG['disk']['slug'])
|
||||
tag=INVENTORY_TAG['disk']['slug']
|
||||
)
|
||||
disks = self.get_hw_disks()
|
||||
if config.dump_disks_map:
|
||||
try:
|
||||
self.dump_disks_map(disks)
|
||||
except Exception as e:
|
||||
logging.error("Failed to dump disks map: {}".format(e))
|
||||
logging.debug(traceback.format_exc())
|
||||
disk_serials = [d['SN'] for d in disks if 'SN' in d]
|
||||
|
||||
# delete disks that are in netbox but not locally
|
||||
# use the serial_number has the comparison element
|
||||
for nb_disk in nb_disks:
|
||||
if nb_disk.serial not in [x['SN'] for x in disks if x.get('SN')]:
|
||||
if nb_disk.serial not in disk_serials or \
|
||||
config.force_disk_refresh:
|
||||
logging.info('Deleting unknown locally Disk {serial}'.format(
|
||||
serial=nb_disk.serial,
|
||||
))
|
||||
nb_disk.delete()
|
||||
|
||||
if config.force_disk_refresh:
|
||||
nb_disks = self.get_netbox_inventory(
|
||||
device_id=self.device_id,
|
||||
tag=INVENTORY_TAG['disk']['slug']
|
||||
)
|
||||
|
||||
# create disks that are not in netbox
|
||||
for disk in disks:
|
||||
if disk.get('SN') not in [x.serial for x in nb_disks]:
|
||||
if disk.get('SN') not in [d.serial for d in nb_disks]:
|
||||
self.create_netbox_disk(disk)
|
||||
|
||||
def create_netbox_memory(self, memory):
|
||||
|
@ -414,7 +451,7 @@ class Inventory():
|
|||
device=self.device_id,
|
||||
discovered=True,
|
||||
manufacturer=manufacturer.id,
|
||||
tags=[INVENTORY_TAG['memory']['name']],
|
||||
tags=[{'name': INVENTORY_TAG['memory']['name']}],
|
||||
name=name,
|
||||
part_id=memory['product'],
|
||||
serial=memory['serial'],
|
||||
|
@ -447,37 +484,56 @@ class Inventory():
|
|||
if memory.get('serial') not in [x.serial for x in nb_memories]:
|
||||
self.create_netbox_memory(memory)
|
||||
|
||||
def create_netbox_gpus(self):
|
||||
for gpu in self.lshw.get_hw_linux('gpu'):
|
||||
def create_netbox_gpus(self, gpus):
|
||||
for gpu in gpus:
|
||||
if 'product' in gpu and len(gpu['product']) > 50:
|
||||
gpu['product'] = (gpu['product'][:48] + '..')
|
||||
|
||||
manufacturer = self.find_or_create_manufacturer(gpu["vendor"])
|
||||
_ = nb.dcim.inventory_items.create(
|
||||
device=self.device_id,
|
||||
manufacturer=manufacturer.id,
|
||||
discovered=True,
|
||||
tags=[INVENTORY_TAG['gpu']['name']],
|
||||
tags=[{'name': INVENTORY_TAG['gpu']['name']}],
|
||||
name=gpu['product'],
|
||||
description='GPU {}'.format(gpu['product']),
|
||||
description=gpu['description'],
|
||||
)
|
||||
|
||||
logging.info('Creating GPU model {}'.format(gpu['product']))
|
||||
|
||||
def is_external_gpu(self, gpu):
|
||||
is_3d_gpu = gpu['description'].startswith('3D')
|
||||
return self.server.is_blade() and \
|
||||
self.server.own_gpu_expansion_slot() and is_3d_gpu
|
||||
|
||||
def do_netbox_gpus(self):
|
||||
gpus = self.lshw.get_hw_linux('gpu')
|
||||
gpus = []
|
||||
gpu_models = {}
|
||||
for gpu in self.lshw.get_hw_linux('gpu'):
|
||||
# Filters GPU if an expansion bay is detected:
|
||||
# The internal (VGA) GPU only goes into the blade inventory,
|
||||
# the external (3D) GPU goes into the expansion blade.
|
||||
if config.expansion_as_device and \
|
||||
self.update_expansion ^ self.is_external_gpu(gpu):
|
||||
continue
|
||||
gpus.append(gpu)
|
||||
gpu_models.setdefault(gpu["product"], 0)
|
||||
gpu_models[gpu["product"]] += 1
|
||||
|
||||
nb_gpus = self.get_netbox_inventory(
|
||||
device_id=self.device_id,
|
||||
tag=INVENTORY_TAG['gpu']['slug'],
|
||||
)
|
||||
|
||||
if config.expansion_as_device and len(nb_gpus):
|
||||
nb_gpu_models = {}
|
||||
for gpu in nb_gpus:
|
||||
nb_gpu_models.setdefault(str(gpu), 0)
|
||||
nb_gpu_models[str(gpu)] += 1
|
||||
up_to_date = set(gpu_models) == set(nb_gpu_models)
|
||||
if not gpus or not up_to_date:
|
||||
for x in nb_gpus:
|
||||
x.delete()
|
||||
elif not len(nb_gpus) or \
|
||||
len(nb_gpus) and len(gpus) != len(nb_gpus):
|
||||
for x in nb_gpus:
|
||||
x.delete()
|
||||
self.create_netbox_gpus()
|
||||
if gpus and not up_to_date:
|
||||
self.create_netbox_gpus(gpus)
|
||||
|
||||
def create_or_update(self):
|
||||
if config.inventory is None or config.update_inventory is None:
|
||||
|
|
|
@ -1,9 +1,8 @@
|
|||
import json
|
||||
import logging
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
from netbox_agent.misc import is_tool
|
||||
import subprocess
|
||||
import logging
|
||||
import json
|
||||
import sys
|
||||
|
||||
|
||||
class LSHW():
|
||||
|
@ -15,7 +14,13 @@ class LSHW():
|
|||
data = subprocess.getoutput(
|
||||
'lshw -quiet -json'
|
||||
)
|
||||
self.hw_info = json.loads(data)
|
||||
json_data = json.loads(data)
|
||||
# Starting from version 02.18, `lshw -json` wraps its result in a list
|
||||
# rather than returning directly a dictionary
|
||||
if isinstance(json_data, list):
|
||||
self.hw_info = json_data[0]
|
||||
else:
|
||||
self.hw_info = json_data
|
||||
self.info = {}
|
||||
self.memories = []
|
||||
self.interfaces = []
|
||||
|
@ -77,42 +82,41 @@ class LSHW():
|
|||
def find_storage(self, obj):
|
||||
if "children" in obj:
|
||||
for device in obj["children"]:
|
||||
d = {}
|
||||
d["logicalname"] = device.get("logicalname")
|
||||
d["product"] = device.get("product")
|
||||
d["serial"] = device.get("serial")
|
||||
d["version"] = device.get("version")
|
||||
d["size"] = device.get("size")
|
||||
d["description"] = device.get("description")
|
||||
|
||||
self.disks.append(d)
|
||||
|
||||
self.disks.append({
|
||||
"logicalname": device.get("logicalname"),
|
||||
"product": device.get("product"),
|
||||
"serial": device.get("serial"),
|
||||
"version": device.get("version"),
|
||||
"size": device.get("size"),
|
||||
"description": device.get("description"),
|
||||
"type": device.get("description"),
|
||||
})
|
||||
elif "nvme" in obj["configuration"]["driver"]:
|
||||
if not is_tool('nvme'):
|
||||
logging.error('nvme-cli >= 1.0 does not seem to be installed')
|
||||
else:
|
||||
try:
|
||||
nvme = json.loads(
|
||||
subprocess.check_output(
|
||||
["nvme", '-list', '-o', 'json'],
|
||||
encoding='utf8')
|
||||
)
|
||||
|
||||
for device in nvme["Devices"]:
|
||||
d = {}
|
||||
d['logicalname'] = device["DevicePath"]
|
||||
d['product'] = device["ModelNumber"]
|
||||
d['serial'] = device["SerialNumber"]
|
||||
d["version"] = device["Firmware"]
|
||||
if "UsedSize" in device:
|
||||
d['size'] = device["UsedSize"]
|
||||
if "UsedBytes" in device:
|
||||
d['size'] = device["UsedBytes"]
|
||||
d['description'] = "NVME Disk"
|
||||
|
||||
self.disks.append(d)
|
||||
except Exception:
|
||||
pass
|
||||
return
|
||||
try:
|
||||
nvme = json.loads(
|
||||
subprocess.check_output(
|
||||
["nvme", '-list', '-o', 'json'],
|
||||
encoding='utf8')
|
||||
)
|
||||
for device in nvme["Devices"]:
|
||||
d = {
|
||||
'logicalname': device["DevicePath"],
|
||||
'product': device["ModelNumber"],
|
||||
'serial': device["SerialNumber"],
|
||||
"version": device["Firmware"],
|
||||
'description': "NVME",
|
||||
'type': "NVME",
|
||||
}
|
||||
if "UsedSize" in device:
|
||||
d['size'] = device["UsedSize"]
|
||||
if "UsedBytes" in device:
|
||||
d['size'] = device["UsedBytes"]
|
||||
self.disks.append(d)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def find_cpus(self, obj):
|
||||
if "product" in obj:
|
||||
|
|
|
@ -1,8 +1,9 @@
|
|||
import socket
|
||||
import subprocess
|
||||
from shutil import which
|
||||
|
||||
from netbox_agent.config import netbox_instance as nb
|
||||
from slugify import slugify
|
||||
from shutil import which
|
||||
import subprocess
|
||||
import socket
|
||||
import re
|
||||
|
||||
|
||||
def is_tool(name):
|
||||
|
@ -60,6 +61,7 @@ def get_hostname(config):
|
|||
|
||||
|
||||
def create_netbox_tags(tags):
|
||||
ret = []
|
||||
for tag in tags:
|
||||
nb_tag = nb.extras.tags.get(
|
||||
name=tag
|
||||
|
@ -67,5 +69,23 @@ def create_netbox_tags(tags):
|
|||
if not nb_tag:
|
||||
nb_tag = nb.extras.tags.create(
|
||||
name=tag,
|
||||
slug=tag,
|
||||
slug=slugify(tag),
|
||||
)
|
||||
ret.append(nb_tag)
|
||||
return ret
|
||||
|
||||
|
||||
def get_mount_points():
|
||||
mount_points = {}
|
||||
output = subprocess.getoutput('mount')
|
||||
for r in output.split("\n"):
|
||||
if not r.startswith("/dev/"):
|
||||
continue
|
||||
mount_info = r.split()
|
||||
device = mount_info[0]
|
||||
device = re.sub(r'\d+$', '', device)
|
||||
mp = mount_info[2]
|
||||
mount_points.setdefault(device, []).append(mp)
|
||||
return mount_points
|
||||
|
||||
|
||||
|
|
|
@ -56,6 +56,13 @@ class Network(object):
|
|||
|
||||
ip_addr = netifaces.ifaddresses(interface).get(netifaces.AF_INET, [])
|
||||
ip6_addr = netifaces.ifaddresses(interface).get(netifaces.AF_INET6, [])
|
||||
if config.network.ignore_ips:
|
||||
for i, ip in enumerate(ip_addr):
|
||||
if re.match(config.network.ignore_ips, ip['addr']):
|
||||
ip_addr.pop(i)
|
||||
for i, ip in enumerate(ip6_addr):
|
||||
if re.match(config.network.ignore_ips, ip['addr']):
|
||||
ip6_addr.pop(i)
|
||||
|
||||
# netifaces returns a ipv6 netmask that netaddr does not understand.
|
||||
# this strips the netmask down to the correct format for netaddr,
|
||||
|
@ -77,15 +84,11 @@ class Network(object):
|
|||
addr["netmask"] = addr["netmask"].split('/')[0]
|
||||
ip_addr.append(addr)
|
||||
|
||||
if config.network.ignore_ips and ip_addr:
|
||||
for i, ip in enumerate(ip_addr):
|
||||
if re.match(config.network.ignore_ips, ip['addr']):
|
||||
ip_addr.pop(i)
|
||||
|
||||
mac = open('/sys/class/net/{}/address'.format(interface), 'r').read().strip()
|
||||
vlan = None
|
||||
if len(interface.split('.')) > 1:
|
||||
vlan = int(interface.split('.')[1])
|
||||
|
||||
bonding = False
|
||||
bonding_slaves = []
|
||||
if os.path.isdir('/sys/class/net/{}/bonding'.format(interface)):
|
||||
|
@ -145,19 +148,19 @@ class Network(object):
|
|||
if nic['mac'] is None:
|
||||
interface = self.nb_net.interfaces.get(
|
||||
name=nic['name'],
|
||||
**self.custom_arg_id,
|
||||
**self.custom_arg_id
|
||||
)
|
||||
else:
|
||||
interface = self.nb_net.interfaces.get(
|
||||
mac_address=nic['mac'],
|
||||
name=nic['name'],
|
||||
**self.custom_arg_id,
|
||||
**self.custom_arg_id
|
||||
)
|
||||
return interface
|
||||
|
||||
def get_netbox_network_cards(self):
|
||||
return self.nb_net.interfaces.filter(
|
||||
**self.custom_arg_id,
|
||||
**self.custom_arg_id
|
||||
)
|
||||
|
||||
def get_netbox_type_for_nic(self, nic):
|
||||
|
@ -209,8 +212,12 @@ class Network(object):
|
|||
update = False
|
||||
vlan_id = nic['vlan']
|
||||
lldp_vlan = self.lldp.get_switch_vlan(nic['name']) if config.network.lldp else None
|
||||
# For strange reason, we need to get the object from scratch
|
||||
# The object returned by pynetbox's save isn't always working (since pynetbox 6)
|
||||
interface = nb.dcim.interfaces.get(id=interface.id)
|
||||
|
||||
# if local interface isn't a interface vlan or lldp doesn't report a vlan-id
|
||||
# Handle the case were the local interface isn't an interface vlan as reported by Netbox
|
||||
# and that LLDP doesn't report a vlan-id
|
||||
if vlan_id is None and lldp_vlan is None and \
|
||||
(interface.mode is not None or len(interface.tagged_vlans) > 0):
|
||||
logging.info('Interface {interface} is not tagged, reseting mode'.format(
|
||||
|
@ -219,13 +226,16 @@ class Network(object):
|
|||
interface.mode = None
|
||||
interface.tagged_vlans = []
|
||||
interface.untagged_vlan = None
|
||||
# if it's a vlan interface
|
||||
# if the local interface is configured with a vlan, it's supposed to be taggued
|
||||
# if mode is either not set or not correctly configured or vlan are not
|
||||
# correctly configured, we reset the vlan
|
||||
elif vlan_id and (
|
||||
interface.mode is None or
|
||||
type(interface.mode) is not int and (
|
||||
hasattr(interface.mode, 'value') and
|
||||
interface.mode.value == self.dcim_choices['interface:mode']['Access'] or
|
||||
len(interface.tagged_vlans) != 1 or
|
||||
interface.tagged_vlans[0].vid != vlan_id)):
|
||||
int(interface.tagged_vlans[0].vid) != int(vlan_id))):
|
||||
logging.info('Resetting tagged VLAN(s) on interface {interface}'.format(
|
||||
interface=interface))
|
||||
update = True
|
||||
|
@ -233,7 +243,7 @@ class Network(object):
|
|||
interface.mode = self.dcim_choices['interface:mode']['Tagged']
|
||||
interface.tagged_vlans = [nb_vlan] if nb_vlan else []
|
||||
interface.untagged_vlan = None
|
||||
# if lldp reports a vlan-id with pvid
|
||||
# Finally if LLDP reports a vlan-id with the pvid attribute
|
||||
elif lldp_vlan:
|
||||
pvid_vlan = [key for (key, value) in lldp_vlan.items() if value['pvid']]
|
||||
if len(pvid_vlan) > 0 and (
|
||||
|
@ -257,12 +267,12 @@ class Network(object):
|
|||
|
||||
nb_vlan = None
|
||||
|
||||
params = {
|
||||
params = dict(self.custom_arg)
|
||||
params.update({
|
||||
'name': nic['name'],
|
||||
'type': type,
|
||||
'mgmt_only': mgmt,
|
||||
**self.custom_arg,
|
||||
}
|
||||
})
|
||||
|
||||
if not nic.get('virtual', False):
|
||||
params['mac_address'] = nic['mac']
|
||||
|
@ -271,7 +281,7 @@ class Network(object):
|
|||
|
||||
if nic['vlan']:
|
||||
nb_vlan = self.get_or_create_vlan(nic['vlan'])
|
||||
interface.mode = 200
|
||||
interface.mode = self.dcim_choices['interface:mode']['Tagged']
|
||||
interface.tagged_vlans = [nb_vlan.id]
|
||||
interface.save()
|
||||
elif config.network.lldp and self.lldp.get_switch_vlan(nic['name']) is not None:
|
||||
|
@ -315,57 +325,71 @@ class Network(object):
|
|||
netbox_ips = nb.ipam.ip_addresses.filter(
|
||||
address=ip,
|
||||
)
|
||||
if not len(netbox_ips):
|
||||
if not netbox_ips:
|
||||
logging.info('Create new IP {ip} on {interface}'.format(
|
||||
ip=ip, interface=interface))
|
||||
query_params = {
|
||||
'address': ip,
|
||||
'status': "active",
|
||||
'assigned_object_type': self.assigned_object_type,
|
||||
'assigned_object_id': interface.id
|
||||
}
|
||||
|
||||
netbox_ip = nb.ipam.ip_addresses.create(
|
||||
address=ip,
|
||||
interface=interface.id,
|
||||
status=1,
|
||||
**query_params
|
||||
)
|
||||
else:
|
||||
netbox_ip = netbox_ips[0]
|
||||
# If IP exists in anycast
|
||||
if netbox_ip.role and netbox_ip.role.label == 'Anycast':
|
||||
logging.debug('IP {} is Anycast..'.format(ip))
|
||||
unassigned_anycast_ip = [x for x in netbox_ips if x.interface is None]
|
||||
assigned_anycast_ip = [x for x in netbox_ips if
|
||||
x.interface and x.interface.id == interface.id]
|
||||
# use the first available anycast ip
|
||||
if len(unassigned_anycast_ip):
|
||||
logging.info('Assigning existing Anycast IP {} to interface'.format(ip))
|
||||
netbox_ip = unassigned_anycast_ip[0]
|
||||
netbox_ip.interface = interface
|
||||
netbox_ip.save()
|
||||
# or if everything is assigned to other servers
|
||||
elif not len(assigned_anycast_ip):
|
||||
logging.info('Creating Anycast IP {} and assigning it to interface'.format(ip))
|
||||
netbox_ip = nb.ipam.ip_addresses.create(
|
||||
address=ip,
|
||||
interface=interface.id,
|
||||
status=1,
|
||||
role=self.ipam_choices['ip-address:role']['Anycast'],
|
||||
tenant=self.tenant.id if self.tenant else None,
|
||||
)
|
||||
return netbox_ip
|
||||
else:
|
||||
if netbox_ip.interface is None:
|
||||
logging.info('Assigning existing IP {ip} to {interface}'.format(
|
||||
ip=ip, interface=interface))
|
||||
elif netbox_ip.interface.id != interface.id:
|
||||
logging.info(
|
||||
'Detected interface change for ip {ip}: old interface is '
|
||||
'{old_interface} (id: {old_id}), new interface is {new_interface} '
|
||||
' (id: {new_id})'
|
||||
.format(
|
||||
old_interface=netbox_ip.interface, new_interface=interface,
|
||||
old_id=netbox_ip.id, new_id=interface.id, ip=netbox_ip.address
|
||||
))
|
||||
else:
|
||||
return netbox_ip
|
||||
return netbox_ip
|
||||
|
||||
netbox_ip = list(netbox_ips)[0]
|
||||
# If IP exists in anycast
|
||||
if netbox_ip.role and netbox_ip.role.label == 'Anycast':
|
||||
logging.debug('IP {} is Anycast..'.format(ip))
|
||||
unassigned_anycast_ip = [x for x in netbox_ips if x.interface is None]
|
||||
assigned_anycast_ip = [x for x in netbox_ips if
|
||||
x.interface and x.interface.id == interface.id]
|
||||
# use the first available anycast ip
|
||||
if len(unassigned_anycast_ip):
|
||||
logging.info('Assigning existing Anycast IP {} to interface'.format(ip))
|
||||
netbox_ip = unassigned_anycast_ip[0]
|
||||
netbox_ip.interface = interface
|
||||
netbox_ip.save()
|
||||
return netbox_ip
|
||||
# or if everything is assigned to other servers
|
||||
elif not len(assigned_anycast_ip):
|
||||
logging.info('Creating Anycast IP {} and assigning it to interface'.format(ip))
|
||||
query_params = {
|
||||
"address": ip,
|
||||
"status": "active",
|
||||
"role": self.ipam_choices['ip-address:role']['Anycast'],
|
||||
"tenant": self.tenant.id if self.tenant else None,
|
||||
"assigned_object_type": self.assigned_object_type,
|
||||
"assigned_object_id": interface.id
|
||||
}
|
||||
netbox_ip = nb.ipam.ip_addresses.create(**query_params)
|
||||
return netbox_ip
|
||||
else:
|
||||
ip_interface = getattr(netbox_ip, 'interface', None)
|
||||
assigned_object = getattr(netbox_ip, 'assigned_object', None)
|
||||
if not ip_interface or not assigned_object:
|
||||
logging.info('Assigning existing IP {ip} to {interface}'.format(
|
||||
ip=ip, interface=interface))
|
||||
elif (ip_interface and ip_interface.id != interface.id) or \
|
||||
(assigned_object and assigned_object_id != interface.id):
|
||||
|
||||
old_interface = getattr(netbox_ip, "assigned_object", "n/a")
|
||||
logging.info(
|
||||
'Detected interface change for ip {ip}: old interface is '
|
||||
'{old_interface} (id: {old_id}), new interface is {new_interface} '
|
||||
' (id: {new_id})'
|
||||
.format(
|
||||
old_interface=old_interface, new_interface=interface,
|
||||
old_id=netbox_ip.id, new_id=interface.id, ip=netbox_ip.address
|
||||
))
|
||||
else:
|
||||
return netbox_ip
|
||||
|
||||
netbox_ip.assigned_object_type = self.assigned_object_type
|
||||
netbox_ip.assigned_object_id = interface.id
|
||||
netbox_ip.save()
|
||||
|
||||
def create_or_update_netbox_network_cards(self):
|
||||
if config.update_all is None or config.update_network is None:
|
||||
|
@ -373,9 +397,9 @@ class Network(object):
|
|||
logging.debug('Creating/Updating NIC...')
|
||||
|
||||
# delete unknown interface
|
||||
nb_nics = self.get_netbox_network_cards()
|
||||
nb_nics = list(self.get_netbox_network_cards())
|
||||
local_nics = [x['name'] for x in self.nics]
|
||||
for nic in nb_nics[:]:
|
||||
for nic in nb_nics:
|
||||
if nic.name not in local_nics:
|
||||
logging.info('Deleting netbox interface {name} because not present locally'.format(
|
||||
name=nic.name
|
||||
|
@ -386,16 +410,19 @@ class Network(object):
|
|||
# delete IP on netbox that are not known on this server
|
||||
if len(nb_nics):
|
||||
netbox_ips = nb.ipam.ip_addresses.filter(
|
||||
interface_id=[x.id for x in nb_nics],
|
||||
**{self.intf_type: [x.id for x in nb_nics]}
|
||||
)
|
||||
|
||||
netbox_ips = list(netbox_ips)
|
||||
all_local_ips = list(chain.from_iterable([
|
||||
x['ip'] for x in self.nics if x['ip'] is not None
|
||||
]))
|
||||
for netbox_ip in netbox_ips:
|
||||
if netbox_ip.address not in all_local_ips:
|
||||
logging.info('Unassigning IP {ip} from {interface}'.format(
|
||||
ip=netbox_ip.address, interface=netbox_ip.interface))
|
||||
netbox_ip.interface = None
|
||||
ip=netbox_ip.address, interface=netbox_ip.assigned_object))
|
||||
netbox_ip.assigned_object_type = None
|
||||
netbox_ip.assigned_object_id = None
|
||||
netbox_ip.save()
|
||||
|
||||
# update each nic
|
||||
|
@ -417,12 +444,13 @@ class Network(object):
|
|||
ret, interface = self.reset_vlan_on_interface(nic, interface)
|
||||
nic_update += ret
|
||||
|
||||
_type = self.get_netbox_type_for_nic(nic)
|
||||
if not interface.type or \
|
||||
_type != interface.type.value:
|
||||
logging.info('Interface type is wrong, resetting')
|
||||
interface.type = _type
|
||||
nic_update += 1
|
||||
if hasattr(interface, 'type'):
|
||||
_type = self.get_netbox_type_for_nic(nic)
|
||||
if not interface.type or \
|
||||
_type != interface.type.value:
|
||||
logging.info('Interface type is wrong, resetting')
|
||||
interface.type = _type
|
||||
nic_update += 1
|
||||
|
||||
if hasattr(interface, 'lag') and interface.lag is not None:
|
||||
local_lag_int = next(
|
||||
|
@ -465,6 +493,8 @@ class ServerNetwork(Network):
|
|||
self.nb_net = nb.dcim
|
||||
self.custom_arg = {'device': getattr(self.device, "id", None)}
|
||||
self.custom_arg_id = {'device_id': getattr(self.device, "id", None)}
|
||||
self.intf_type = "interface_id"
|
||||
self.assigned_object_type = "dcim.interface"
|
||||
|
||||
def get_network_type(self):
|
||||
return 'server'
|
||||
|
@ -485,7 +515,7 @@ class ServerNetwork(Network):
|
|||
return nb_server_interface
|
||||
|
||||
try:
|
||||
nb_switch = nb_mgmt_ip.interface.device
|
||||
nb_switch = nb_mgmt_ip.assigned_object.device
|
||||
logging.info('Found a switch in Netbox based on LLDP infos: {} (id: {})'.format(
|
||||
switch_ip,
|
||||
nb_switch.id
|
||||
|
@ -585,6 +615,8 @@ class VirtualNetwork(Network):
|
|||
self.nb_net = nb.virtualization
|
||||
self.custom_arg = {'virtual_machine': getattr(self.device, "id", None)}
|
||||
self.custom_arg_id = {'virtual_machine_id': getattr(self.device, "id", None)}
|
||||
self.intf_type = "vminterface_id"
|
||||
self.assigned_object_type = "virtualization.vminterface"
|
||||
|
||||
dcim_c = nb.virtualization.interfaces.choices()
|
||||
for _choice_type in dcim_c:
|
||||
|
|
|
@ -51,7 +51,7 @@ class PowerSupply():
|
|||
)
|
||||
|
||||
def create_or_update_power_supply(self):
|
||||
nb_psus = self.get_netbox_power_supply()
|
||||
nb_psus = list(self.get_netbox_power_supply())
|
||||
psus = self.get_power_supply()
|
||||
|
||||
# Delete unknown PSU
|
||||
|
@ -105,18 +105,20 @@ class PowerSupply():
|
|||
return False
|
||||
|
||||
# find power feeds for rack or dc
|
||||
voltage = None
|
||||
pwr_feeds = None
|
||||
if self.netbox_server.rack:
|
||||
pwr_feeds = nb.dcim.power_feeds.filter(
|
||||
rack=self.netbox_server.rack.id
|
||||
)
|
||||
if pwr_feeds is None or not len(pwr_feeds):
|
||||
|
||||
if pwr_feeds:
|
||||
voltage = [p['voltage'] for p in pwr_feeds]
|
||||
else:
|
||||
logging.info('Could not find power feeds for Rack, defaulting value to 230')
|
||||
voltage = 230
|
||||
voltage = [230 for _ in nb_psus]
|
||||
|
||||
for i, nb_psu in enumerate(nb_psus):
|
||||
nb_psu.allocated_draw = float(psu_cons[i]) * voltage
|
||||
nb_psu.allocated_draw = int(float(psu_cons[i]) * voltage[i])
|
||||
if nb_psu.allocated_draw < 1:
|
||||
logging.info('PSU is not connected or in standby mode')
|
||||
continue
|
||||
|
|
|
@ -1,13 +1,20 @@
|
|||
import re
|
||||
import subprocess
|
||||
|
||||
from netbox_agent.config import config
|
||||
from netbox_agent.misc import get_vendor
|
||||
from netbox_agent.raid.base import Raid, RaidController
|
||||
from netbox_agent.misc import get_vendor
|
||||
from netbox_agent.config import config
|
||||
import subprocess
|
||||
import logging
|
||||
import re
|
||||
|
||||
REGEXP_CONTROLLER_HP = re.compile(r'Smart Array ([a-zA-Z0-9- ]+) in Slot ([0-9]+)')
|
||||
|
||||
|
||||
def ssacli(command):
|
||||
output = subprocess.getoutput('ssacli {}'.format(command) )
|
||||
lines = output.split('\n')
|
||||
lines = list(filter(None, lines))
|
||||
return lines
|
||||
|
||||
|
||||
def _parse_ctrl_output(lines):
|
||||
controllers = {}
|
||||
current_ctrl = None
|
||||
|
@ -18,11 +25,11 @@ def _parse_ctrl_output(lines):
|
|||
ctrl = REGEXP_CONTROLLER_HP.search(line)
|
||||
if ctrl is not None:
|
||||
current_ctrl = ctrl.group(1)
|
||||
controllers[current_ctrl] = {"Slot": ctrl.group(2)}
|
||||
if "Embedded" not in line:
|
||||
controllers[current_ctrl]["External"] = True
|
||||
controllers[current_ctrl] = {'Slot': ctrl.group(2)}
|
||||
if 'Embedded' not in line:
|
||||
controllers[current_ctrl]['External'] = True
|
||||
continue
|
||||
attr, val = line.split(": ", 1)
|
||||
attr, val = line.split(': ', 1)
|
||||
attr = attr.strip()
|
||||
val = val.strip()
|
||||
controllers[current_ctrl][attr] = val
|
||||
|
@ -39,27 +46,54 @@ def _parse_pd_output(lines):
|
|||
if not line or line.startswith('Note:'):
|
||||
continue
|
||||
# Parses the Array the drives are in
|
||||
if line.startswith("Array"):
|
||||
if line.startswith('Array'):
|
||||
current_array = line.split(None, 1)[1]
|
||||
# Detects new physical drive
|
||||
if line.startswith("physicaldrive"):
|
||||
if line.startswith('physicaldrive'):
|
||||
current_drv = line.split(None, 1)[1]
|
||||
drives[current_drv] = {}
|
||||
if current_array is not None:
|
||||
drives[current_drv]["Array"] = current_array
|
||||
drives[current_drv]['Array'] = current_array
|
||||
continue
|
||||
if ": " not in line:
|
||||
if ': ' not in line:
|
||||
continue
|
||||
attr, val = line.split(": ", 1)
|
||||
attr, val = line.split(': ', 1)
|
||||
drives.setdefault(current_drv, {})[attr] = val
|
||||
return drives
|
||||
|
||||
|
||||
def _parse_ld_output(lines):
|
||||
drives = {}
|
||||
current_array = None
|
||||
current_drv = None
|
||||
|
||||
for line in lines:
|
||||
line = line.strip()
|
||||
if not line or line.startswith('Note:'):
|
||||
continue
|
||||
# Parses the Array the drives are in
|
||||
if line.startswith('Array'):
|
||||
current_array = line.split(None, 1)[1]
|
||||
drives[current_array] = {}
|
||||
# Detects new physical drive
|
||||
if line.startswith('Logical Drive'):
|
||||
current_drv = line.split(': ', 1)[1]
|
||||
drives.setdefault(current_array, {})['LogicalDrive'] = current_drv
|
||||
continue
|
||||
if ': ' not in line:
|
||||
continue
|
||||
attr, val = line.split(': ', 1)
|
||||
drives.setdefault(current_array, {})[attr] = val
|
||||
return drives
|
||||
|
||||
|
||||
class HPRaidController(RaidController):
|
||||
def __init__(self, controller_name, data):
|
||||
self.controller_name = controller_name
|
||||
self.data = data
|
||||
self.drives = self._get_physical_disks()
|
||||
self.pdrives = self._get_physical_disks()
|
||||
self.ldrives = self._get_logical_drives()
|
||||
self._get_virtual_drives_map()
|
||||
|
||||
def get_product_name(self):
|
||||
return self.controller_name
|
||||
|
@ -77,15 +111,12 @@ class HPRaidController(RaidController):
|
|||
return self.data.get('External', False)
|
||||
|
||||
def _get_physical_disks(self):
|
||||
output = subprocess.getoutput(
|
||||
'ssacli ctrl slot={slot} pd all show detail'.format(slot=self.data['Slot'])
|
||||
)
|
||||
lines = output.split('\n')
|
||||
lines = list(filter(None, lines))
|
||||
drives = _parse_pd_output(lines)
|
||||
ret = []
|
||||
lines = ssacli('ctrl slot={} pd all show detail'.format(self.data['Slot']))
|
||||
pdrives = _parse_pd_output(lines)
|
||||
ret = {}
|
||||
|
||||
for name, attrs in drives.items():
|
||||
for name, attrs in pdrives.items():
|
||||
array = attrs.get('Array', '')
|
||||
model = attrs.get('Model', '').strip()
|
||||
vendor = None
|
||||
if model.startswith('HP'):
|
||||
|
@ -95,7 +126,8 @@ class HPRaidController(RaidController):
|
|||
else:
|
||||
vendor = get_vendor(model)
|
||||
|
||||
ret.append({
|
||||
ret[name] = {
|
||||
'Array': array,
|
||||
'Model': model,
|
||||
'Vendor': vendor,
|
||||
'SN': attrs.get('Serial Number', '').strip(),
|
||||
|
@ -103,11 +135,40 @@ class HPRaidController(RaidController):
|
|||
'Type': 'SSD' if attrs.get('Interface Type') == 'Solid State SATA'
|
||||
else 'HDD',
|
||||
'_src': self.__class__.__name__,
|
||||
})
|
||||
}
|
||||
return ret
|
||||
|
||||
def _get_logical_drives(self):
|
||||
lines = ssacli('ctrl slot={} ld all show detail'.format(self.data['Slot']))
|
||||
ldrives = _parse_ld_output(lines)
|
||||
ret = {}
|
||||
|
||||
for array, attrs in ldrives.items():
|
||||
ret[array] = {
|
||||
'vd_array': array,
|
||||
'vd_size': attrs['Size'],
|
||||
'vd_consistency': attrs['Status'],
|
||||
'vd_raid_type': 'RAID {}'.format(attrs['Fault Tolerance']),
|
||||
'vd_device': attrs['LogicalDrive'],
|
||||
'mount_point': attrs['Mount Points']
|
||||
}
|
||||
return ret
|
||||
|
||||
def _get_virtual_drives_map(self):
|
||||
for name, attrs in self.pdrives.items():
|
||||
array = attrs["Array"]
|
||||
ld = self.ldrives.get(array)
|
||||
if ld is None:
|
||||
logging.error(
|
||||
"Failed to find array information for physical drive {}."
|
||||
" Ignoring.".format(name)
|
||||
)
|
||||
continue
|
||||
attrs['custom_fields'] = ld
|
||||
attrs['custom_fields']['pd_identifier'] = name
|
||||
|
||||
def get_physical_disks(self):
|
||||
return self.drives
|
||||
return list(self.pdrives.values())
|
||||
|
||||
|
||||
class HPRaid(Raid):
|
||||
|
|
|
@ -1,25 +1,32 @@
|
|||
import re
|
||||
import subprocess
|
||||
import xml.etree.ElementTree as ET # NOQA
|
||||
|
||||
from netbox_agent.misc import get_vendor
|
||||
from netbox_agent.raid.base import Raid, RaidController
|
||||
|
||||
# Inspiration from https://github.com/asciiphil/perc-status/blob/master/perc-status
|
||||
from netbox_agent.misc import get_vendor, get_mount_points
|
||||
from netbox_agent.config import config
|
||||
import subprocess
|
||||
import logging
|
||||
import re
|
||||
|
||||
|
||||
def get_field(obj, fieldname):
|
||||
f = obj.find(fieldname)
|
||||
if f is None:
|
||||
return None
|
||||
if f.attrib['type'] in ['u32', 'u64']:
|
||||
if re.search('Mask$', fieldname):
|
||||
return int(f.text, 2)
|
||||
else:
|
||||
return int(f.text)
|
||||
if f.attrib['type'] == 'astring':
|
||||
return f.text
|
||||
return f.text
|
||||
def omreport(sub_command):
|
||||
command = 'omreport {}'.format(sub_command)
|
||||
output = subprocess.getoutput(command)
|
||||
res = {}
|
||||
section_re = re.compile('^[A-Z]')
|
||||
current_section = None
|
||||
current_obj = None
|
||||
|
||||
for line in output.split('\n'):
|
||||
if ': ' in line:
|
||||
attr, value = line.split(': ', 1)
|
||||
attr = attr.strip()
|
||||
value = value.strip()
|
||||
if attr == 'ID':
|
||||
obj = {}
|
||||
res.setdefault(current_section, []).append(obj)
|
||||
current_obj = obj
|
||||
current_obj[attr] = value
|
||||
elif section_re.search(line) is not None:
|
||||
current_section = line.strip()
|
||||
return res
|
||||
|
||||
|
||||
class OmreportController(RaidController):
|
||||
|
@ -28,49 +35,88 @@ class OmreportController(RaidController):
|
|||
self.controller_index = controller_index
|
||||
|
||||
def get_product_name(self):
|
||||
return get_field(self.data, 'Name')
|
||||
return self.data['Name']
|
||||
|
||||
def get_manufacturer(self):
|
||||
return None
|
||||
|
||||
def get_serial_number(self):
|
||||
return get_field(self.data, 'DeviceSerialNumber')
|
||||
return self.data.get('DeviceSerialNumber')
|
||||
|
||||
def get_firmware_version(self):
|
||||
return get_field(self.data, 'Firmware Version')
|
||||
return self.data.get('Firmware Version')
|
||||
|
||||
def _get_physical_disks(self):
|
||||
pds = {}
|
||||
res = omreport('storage pdisk controller={}'.format(
|
||||
self.controller_index
|
||||
))
|
||||
for pdisk in [d for d in list(res.values())[0]]:
|
||||
disk_id = pdisk['ID']
|
||||
size = re.sub('B .*$', 'B', pdisk['Capacity'])
|
||||
pds[disk_id] = {
|
||||
'Vendor': get_vendor(pdisk['Vendor ID']),
|
||||
'Model': pdisk['Product ID'],
|
||||
'SN': pdisk['Serial No.'],
|
||||
'Size': size,
|
||||
'Type': pdisk['Media'],
|
||||
'_src': self.__class__.__name__,
|
||||
}
|
||||
return pds
|
||||
|
||||
def _get_virtual_drives_map(self):
|
||||
pds = {}
|
||||
res = omreport('storage vdisk controller={}'.format(
|
||||
self.controller_index
|
||||
))
|
||||
for vdisk in [d for d in list(res.values())[0]]:
|
||||
vdisk_id = vdisk['ID']
|
||||
device = vdisk['Device Name']
|
||||
mount_points = get_mount_points()
|
||||
mp = mount_points.get(device, 'n/a')
|
||||
size = re.sub('B .*$', 'B', vdisk['Size'])
|
||||
vd = {
|
||||
'vd_array': vdisk_id,
|
||||
'vd_size': size,
|
||||
'vd_consistency': vdisk['State'],
|
||||
'vd_raid_type': vdisk['Layout'],
|
||||
'vd_device': vdisk['Device Name'],
|
||||
'mount_point': ', '.join(sorted(mp)),
|
||||
}
|
||||
drives_res = omreport(
|
||||
'storage pdisk controller={} vdisk={}'.format(
|
||||
self.controller_index, vdisk_id
|
||||
))
|
||||
for pdisk in [d for d in list(drives_res.values())[0]]:
|
||||
pds[pdisk['ID']] = vd
|
||||
return pds
|
||||
|
||||
def get_physical_disks(self):
|
||||
ret = []
|
||||
output = subprocess.getoutput(
|
||||
'omreport storage controller controller={} -fmt xml'.format(self.controller_index)
|
||||
)
|
||||
root = ET.fromstring(output)
|
||||
et_array_disks = root.find('ArrayDisks')
|
||||
if et_array_disks is not None:
|
||||
for obj in et_array_disks.findall('DCStorageObject'):
|
||||
ret.append({
|
||||
'Vendor': get_vendor(get_field(obj, 'Vendor')),
|
||||
'Model': get_field(obj, 'ProductID'),
|
||||
'SN': get_field(obj, 'DeviceSerialNumber'),
|
||||
'Size': '{:.0f}GB'.format(
|
||||
int(get_field(obj, 'Length')) / 1024 / 1024 / 1024
|
||||
),
|
||||
'Type': 'HDD' if int(get_field(obj, 'MediaType')) == 1 else 'SSD',
|
||||
'_src': self.__class__.__name__,
|
||||
})
|
||||
return ret
|
||||
pds = self._get_physical_disks()
|
||||
vds = self._get_virtual_drives_map()
|
||||
for pd_identifier, vd in vds.items():
|
||||
if pd_identifier not in pds:
|
||||
logging.error(
|
||||
'Physical drive {} listed in virtual drive {} not '
|
||||
'found in drives list'.format(
|
||||
pd_identifier, vd['vd_array']
|
||||
)
|
||||
)
|
||||
continue
|
||||
pds[pd_identifier].setdefault('custom_fields', {}).update(vd)
|
||||
pds[pd_identifier]['custom_fields']['pd_identifier'] = pd_identifier
|
||||
return list(pds.values())
|
||||
|
||||
|
||||
class OmreportRaid(Raid):
|
||||
def __init__(self):
|
||||
output = subprocess.getoutput('omreport storage controller -fmt xml')
|
||||
controller_xml = ET.fromstring(output)
|
||||
self.controllers = []
|
||||
res = omreport('storage controller')
|
||||
|
||||
for obj in controller_xml.find('Controllers').findall('DCStorageObject'):
|
||||
ctrl_index = get_field(obj, 'ControllerNum')
|
||||
for controller in res['Controller']:
|
||||
ctrl_index = controller['ID']
|
||||
self.controllers.append(
|
||||
OmreportController(ctrl_index, obj)
|
||||
OmreportController(ctrl_index, controller)
|
||||
)
|
||||
|
||||
def get_controllers(self):
|
||||
|
|
|
@ -1,8 +1,31 @@
|
|||
import json
|
||||
import subprocess
|
||||
|
||||
from netbox_agent.misc import get_vendor
|
||||
from netbox_agent.raid.base import Raid, RaidController
|
||||
from netbox_agent.misc import get_vendor, get_mount_points
|
||||
from netbox_agent.config import config
|
||||
import subprocess
|
||||
import logging
|
||||
import json
|
||||
import re
|
||||
import os
|
||||
|
||||
|
||||
def storecli(sub_command):
|
||||
command = 'storcli {} J'.format(sub_command)
|
||||
output = subprocess.getoutput(command)
|
||||
data = json.loads(output)
|
||||
controllers = dict([
|
||||
(
|
||||
c['Command Status']['Controller'],
|
||||
c['Response Data']
|
||||
) for c in data['Controllers']
|
||||
if c['Command Status']['Status'] == 'Success'
|
||||
])
|
||||
if not controllers:
|
||||
logging.error(
|
||||
"Failed to execute command '{}'. "
|
||||
"Ignoring data.".format(command)
|
||||
)
|
||||
return {}
|
||||
return controllers
|
||||
|
||||
|
||||
class StorcliController(RaidController):
|
||||
|
@ -22,52 +45,101 @@ class StorcliController(RaidController):
|
|||
def get_firmware_version(self):
|
||||
return self.data['FW Package Build']
|
||||
|
||||
def get_physical_disks(self):
|
||||
ret = []
|
||||
output = subprocess.getoutput(
|
||||
'storcli /c{}/eall/sall show all J'.format(self.controller_index)
|
||||
)
|
||||
drive_infos = json.loads(output)['Controllers'][self.controller_index]['Response Data']
|
||||
def _get_physical_disks(self):
|
||||
pds = {}
|
||||
cmd = '/c{}/eall/sall show all'.format(self.controller_index)
|
||||
controllers = storecli(cmd)
|
||||
pd_info = controllers[self.controller_index]
|
||||
pd_re = re.compile(r'^Drive (/c\d+/e\d+/s\d+)$')
|
||||
|
||||
for physical_drive in self.data['PD LIST']:
|
||||
enclosure = physical_drive.get('EID:Slt').split(':')[0]
|
||||
slot = physical_drive.get('EID:Slt').split(':')[1]
|
||||
size = physical_drive.get('Size').strip()
|
||||
media_type = physical_drive.get('Med').strip()
|
||||
drive_identifier = 'Drive /c{}/e{}/s{}'.format(
|
||||
str(self.controller_index), str(enclosure), str(slot)
|
||||
)
|
||||
drive_attr = drive_infos['{} - Detailed Information'.format(drive_identifier)][
|
||||
'{} Device attributes'.format(drive_identifier)]
|
||||
model = drive_attr.get('Model Number', '').strip()
|
||||
ret.append({
|
||||
for section, attrs in pd_info.items():
|
||||
reg = pd_re.search(section)
|
||||
if reg is None:
|
||||
continue
|
||||
pd_name = reg.group(1)
|
||||
pd_attr = attrs[0]
|
||||
pd_identifier = pd_attr['EID:Slt']
|
||||
size = pd_attr.get('Size', '').strip()
|
||||
media_type = pd_attr.get('Med', '').strip()
|
||||
pd_details = pd_info['{} - Detailed Information'.format(section)]
|
||||
pd_dev_attr = pd_details['{} Device attributes'.format(section)]
|
||||
model = pd_dev_attr.get('Model Number', '').strip()
|
||||
pd = {
|
||||
'Model': model,
|
||||
'Vendor': get_vendor(model),
|
||||
'SN': drive_attr.get('SN', '').strip(),
|
||||
'SN': pd_dev_attr.get('SN', '').strip(),
|
||||
'Size': size,
|
||||
'Type': media_type,
|
||||
'_src': self.__class__.__name__,
|
||||
})
|
||||
return ret
|
||||
}
|
||||
if config.process_virtual_drives:
|
||||
pd.setdefault('custom_fields', {})['pd_identifier'] = pd_name
|
||||
pds[pd_identifier] = pd
|
||||
return pds
|
||||
|
||||
def _get_virtual_drives_map(self):
|
||||
vds = {}
|
||||
cmd = '/c{}/vall show all'.format(self.controller_index)
|
||||
controllers = storecli(cmd)
|
||||
vd_info = controllers[self.controller_index]
|
||||
mount_points = get_mount_points()
|
||||
|
||||
for vd_identifier, vd_attrs in vd_info.items():
|
||||
if not vd_identifier.startswith("/c{}/v".format(self.controller_index)):
|
||||
continue
|
||||
volume = vd_identifier.split("/")[-1].lstrip("v")
|
||||
vd_attr = vd_attrs[0]
|
||||
vd_pd_identifier = 'PDs for VD {}'.format(volume)
|
||||
vd_pds = vd_info[vd_pd_identifier]
|
||||
vd_prop_identifier = 'VD{} Properties'.format(volume)
|
||||
vd_properties = vd_info[vd_prop_identifier]
|
||||
for pd in vd_pds:
|
||||
pd_identifier = pd["EID:Slt"]
|
||||
wwn = vd_properties["SCSI NAA Id"]
|
||||
wwn_path = "/dev/disk/by-id/wwn-0x{}".format(wwn)
|
||||
device = os.path.realpath(wwn_path)
|
||||
mp = mount_points.get(device, "n/a")
|
||||
vds[pd_identifier] = {
|
||||
"vd_array": vd_identifier,
|
||||
"vd_size": vd_attr["Size"],
|
||||
"vd_consistency": vd_attr["Consist"],
|
||||
"vd_raid_type": vd_attr["TYPE"],
|
||||
"vd_device": device,
|
||||
"mount_point": ", ".join(sorted(mp))
|
||||
}
|
||||
return vds
|
||||
|
||||
def get_physical_disks(self):
|
||||
# Parses physical disks information
|
||||
pds = self._get_physical_disks()
|
||||
|
||||
# Parses virtual drives information and maps them to physical disks
|
||||
vds = self._get_virtual_drives_map()
|
||||
for pd_identifier, vd in vds.items():
|
||||
if pd_identifier not in pds:
|
||||
logging.error(
|
||||
"Physical drive {} listed in virtual drive {} not "
|
||||
"found in drives list".format(
|
||||
pd_identifier, vd["vd_array"]
|
||||
)
|
||||
)
|
||||
continue
|
||||
pds[pd_identifier].setdefault("custom_fields", {}).update(vd)
|
||||
|
||||
return list(pds.values())
|
||||
|
||||
|
||||
class StorcliRaid(Raid):
|
||||
def __init__(self):
|
||||
self.output = subprocess.getoutput('storcli /call show J')
|
||||
self.data = json.loads(self.output)
|
||||
self.controllers = []
|
||||
|
||||
if len([
|
||||
x for x in self.data['Controllers']
|
||||
if x['Command Status']['Status'] == 'Success'
|
||||
]) > 0:
|
||||
for controller in self.data['Controllers']:
|
||||
self.controllers.append(
|
||||
StorcliController(
|
||||
controller['Command Status']['Controller'],
|
||||
controller['Response Data']
|
||||
)
|
||||
controllers = storecli('/call show')
|
||||
for controller_id, controller_data in controllers.items():
|
||||
self.controllers.append(
|
||||
StorcliController(
|
||||
controller_id,
|
||||
controller_data
|
||||
)
|
||||
)
|
||||
|
||||
def get_controllers(self):
|
||||
return self.controllers
|
||||
|
|
|
@ -1,9 +1,3 @@
|
|||
import logging
|
||||
import socket
|
||||
import subprocess
|
||||
import sys
|
||||
from pprint import pprint
|
||||
|
||||
import netbox_agent.dmidecode as dmidecode
|
||||
from netbox_agent.config import config
|
||||
from netbox_agent.config import netbox_instance as nb
|
||||
|
@ -12,6 +6,11 @@ from netbox_agent.location import Datacenter, Rack, Tenant
|
|||
from netbox_agent.misc import create_netbox_tags, get_device_role, get_device_type
|
||||
from netbox_agent.network import ServerNetwork
|
||||
from netbox_agent.power import PowerSupply
|
||||
from pprint import pprint
|
||||
import subprocess
|
||||
import logging
|
||||
import socket
|
||||
import sys
|
||||
|
||||
|
||||
class ServerBase():
|
||||
|
@ -25,13 +24,22 @@ class ServerBase():
|
|||
self.bios = dmidecode.get_by_type(self.dmi, 'BIOS')
|
||||
self.chassis = dmidecode.get_by_type(self.dmi, 'Chassis')
|
||||
self.system = dmidecode.get_by_type(self.dmi, 'System')
|
||||
self.inventory = Inventory(server=self)
|
||||
|
||||
self.network = None
|
||||
|
||||
self.tags = list(set(config.device.tags.split(','))) if config.device.tags else []
|
||||
if self.tags and len(self.tags):
|
||||
create_netbox_tags(self.tags)
|
||||
self.tags = list(set([
|
||||
x.strip() for x in config.device.tags.split(',') if x.strip()
|
||||
])) if config.device.tags else []
|
||||
self.nb_tags = list(create_netbox_tags(self.tags))
|
||||
config_cf = set([
|
||||
f.strip() for f in config.device.custom_fields.split(",")
|
||||
if f.strip()
|
||||
])
|
||||
self.custom_fields = {}
|
||||
self.custom_fields.update(dict([
|
||||
(k.strip(), v.strip()) for k, v in
|
||||
[f.split("=", 1) for f in config_cf]
|
||||
]))
|
||||
|
||||
def get_tenant(self):
|
||||
tenant = Tenant()
|
||||
|
@ -194,7 +202,8 @@ class ServerBase():
|
|||
site=datacenter.id if datacenter else None,
|
||||
tenant=tenant.id if tenant else None,
|
||||
rack=rack.id if rack else None,
|
||||
tags=self.tags,
|
||||
tags=[{'name': x} for x in self.tags],
|
||||
custom_fields=self.custom_fields,
|
||||
)
|
||||
return new_chassis
|
||||
|
||||
|
@ -216,7 +225,8 @@ class ServerBase():
|
|||
site=datacenter.id if datacenter else None,
|
||||
tenant=tenant.id if tenant else None,
|
||||
rack=rack.id if rack else None,
|
||||
tags=self.tags,
|
||||
tags=[{'name': x} for x in self.tags],
|
||||
custom_fields=self.custom_fields,
|
||||
)
|
||||
return new_blade
|
||||
|
||||
|
@ -238,7 +248,7 @@ class ServerBase():
|
|||
site=datacenter.id if datacenter else None,
|
||||
tenant=tenant.id if tenant else None,
|
||||
rack=rack.id if rack else None,
|
||||
tags=self.tags,
|
||||
tags=[{'name': x} for x in self.tags],
|
||||
)
|
||||
return new_blade
|
||||
|
||||
|
@ -266,7 +276,7 @@ class ServerBase():
|
|||
site=datacenter.id if datacenter else None,
|
||||
tenant=tenant.id if tenant else None,
|
||||
rack=rack.id if rack else None,
|
||||
tags=self.tags,
|
||||
tags=[{'name': x} for x in self.tags],
|
||||
)
|
||||
return new_server
|
||||
|
||||
|
@ -278,8 +288,10 @@ class ServerBase():
|
|||
|
||||
def _netbox_set_or_update_blade_slot(self, server, chassis, datacenter):
|
||||
# before everything check if right chassis
|
||||
actual_device_bay = server.parent_device.device_bay if server.parent_device else None
|
||||
actual_chassis = actual_device_bay.device if actual_device_bay else None
|
||||
actual_device_bay = server.parent_device.device_bay \
|
||||
if server.parent_device else None
|
||||
actual_chassis = actual_device_bay.device \
|
||||
if actual_device_bay else None
|
||||
slot = self.get_blade_slot()
|
||||
if actual_chassis and \
|
||||
actual_chassis.serial == chassis.serial and \
|
||||
|
@ -290,7 +302,11 @@ class ServerBase():
|
|||
device_id=chassis.id,
|
||||
name=slot,
|
||||
)
|
||||
if len(real_device_bays) > 0:
|
||||
real_device_bays = nb.dcim.device_bays.filter(
|
||||
device_id=chassis.id,
|
||||
name=slot,
|
||||
)
|
||||
if real_device_bays:
|
||||
logging.info(
|
||||
'Setting device ({serial}) new slot on {slot} '
|
||||
'(Chassis {chassis_serial})..'.format(
|
||||
|
@ -298,10 +314,14 @@ class ServerBase():
|
|||
))
|
||||
# reset actual device bay if set
|
||||
if actual_device_bay:
|
||||
# Forces the evaluation of the installed_device attribute to
|
||||
# workaround a bug probably due to lazy loading optimization
|
||||
# that prevents the value change detection
|
||||
actual_device_bay.installed_device
|
||||
actual_device_bay.installed_device = None
|
||||
actual_device_bay.save()
|
||||
# setup new device bay
|
||||
real_device_bay = real_device_bays[0]
|
||||
real_device_bay = next(real_device_bays)
|
||||
real_device_bay.installed_device = server
|
||||
real_device_bay.save()
|
||||
else:
|
||||
|
@ -323,7 +343,7 @@ class ServerBase():
|
|||
device_id=chassis.id,
|
||||
name=slot,
|
||||
)
|
||||
if len(real_device_bays) == 0:
|
||||
if not real_device_bays:
|
||||
logging.error('Could not find slot {slot} expansion for chassis'.format(
|
||||
slot=slot
|
||||
))
|
||||
|
@ -335,10 +355,14 @@ class ServerBase():
|
|||
))
|
||||
# reset actual device bay if set
|
||||
if actual_device_bay:
|
||||
# Forces the evaluation of the installed_device attribute to
|
||||
# workaround a bug probably due to lazy loading optimization
|
||||
# that prevents the value change detection
|
||||
actual_device_bay.installed_device
|
||||
actual_device_bay.installed_device = None
|
||||
actual_device_bay.save()
|
||||
# setup new device bay
|
||||
real_device_bay = real_device_bays[0]
|
||||
real_device_bay = next(real_device_bays)
|
||||
real_device_bay.installed_device = expansion
|
||||
real_device_bay.save()
|
||||
|
||||
|
@ -388,6 +412,7 @@ class ServerBase():
|
|||
update_inventory = config.inventory and (config.register or
|
||||
config.update_all or config.update_inventory)
|
||||
# update inventory if feature is enabled
|
||||
self.inventory = Inventory(server=self)
|
||||
if update_inventory:
|
||||
self.inventory.create_or_update()
|
||||
# update psu
|
||||
|
@ -416,12 +441,15 @@ class ServerBase():
|
|||
# for every other specs
|
||||
# check hostname
|
||||
if server.name != self.get_hostname():
|
||||
update += 1
|
||||
server.name = self.get_hostname()
|
||||
update += 1
|
||||
|
||||
if sorted(set([x.name for x in server.tags])) != sorted(set(self.tags)):
|
||||
server.tags = [x.id for x in self.nb_tags]
|
||||
update += 1
|
||||
|
||||
if sorted(set(server.tags)) != sorted(set(self.tags)):
|
||||
server.tags = self.tags
|
||||
if server.custom_fields != self.custom_fields:
|
||||
server.custom_fields = self.custom_fields
|
||||
update += 1
|
||||
|
||||
if config.update_all or config.update_location:
|
||||
|
@ -458,3 +486,21 @@ class ServerBase():
|
|||
print('NIC:',)
|
||||
pprint(self.network.get_network_cards())
|
||||
pass
|
||||
|
||||
def own_expansion_slot(self):
|
||||
"""
|
||||
Indicates if the device hosts an expansion card
|
||||
"""
|
||||
return False
|
||||
|
||||
def own_gpu_expansion_slot(self):
|
||||
"""
|
||||
Indicates if the device hosts a GPU expansion card
|
||||
"""
|
||||
return False
|
||||
|
||||
def own_drive_expansion_slot(self):
|
||||
"""
|
||||
Indicates if the device hosts a drive expansion bay
|
||||
"""
|
||||
return False
|
||||
|
|
7
netbox_agent/vendors/dell.py
vendored
7
netbox_agent/vendors/dell.py
vendored
|
@ -86,10 +86,3 @@ class DellHost(ServerBase):
|
|||
Expansion slot are always the compute bay number + 1
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def own_expansion_slot(self):
|
||||
"""
|
||||
Say if the device can host an extension card based
|
||||
on the product name
|
||||
"""
|
||||
pass
|
||||
|
|
28
netbox_agent/vendors/generic.py
vendored
28
netbox_agent/vendors/generic.py
vendored
|
@ -8,7 +8,7 @@ class GenericHost(ServerBase):
|
|||
self.manufacturer = dmidecode.get_by_type(self.dmi, 'Baseboard')[0].get('Manufacturer')
|
||||
|
||||
def is_blade(self):
|
||||
return None
|
||||
return False
|
||||
|
||||
def get_blade_slot(self):
|
||||
return None
|
||||
|
@ -21,29 +21,3 @@ class GenericHost(ServerBase):
|
|||
|
||||
def get_chassis_service_tag(self):
|
||||
return self.get_service_tag()
|
||||
|
||||
def get_expansion_product(self):
|
||||
"""
|
||||
Get the extension slot that is on a pair slot number
|
||||
next to the compute slot that is on an odd slot number
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def is_expansion_slot(self, server):
|
||||
"""
|
||||
Return True if its an extension slot
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def get_blade_expansion_slot(self):
|
||||
"""
|
||||
Expansion slot are always the compute bay number + 1
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def own_expansion_slot(self):
|
||||
"""
|
||||
Say if the device can host an extension card based
|
||||
on the product name
|
||||
"""
|
||||
pass
|
||||
|
|
21
netbox_agent/vendors/hp.py
vendored
21
netbox_agent/vendors/hp.py
vendored
|
@ -1,5 +1,6 @@
|
|||
import netbox_agent.dmidecode as dmidecode
|
||||
from netbox_agent.server import ServerBase
|
||||
from netbox_agent.inventory import Inventory
|
||||
|
||||
|
||||
class HPHost(ServerBase):
|
||||
|
@ -12,8 +13,8 @@ class HPHost(ServerBase):
|
|||
|
||||
def is_blade(self):
|
||||
blade = self.product.startswith("ProLiant BL")
|
||||
blade |= (self.product.startswith("ProLiant m")
|
||||
and self.product.endswith("Server Cartridge"))
|
||||
blade |= self.product.startswith("ProLiant m") and \
|
||||
self.product.endswith("Server Cartridge")
|
||||
return blade
|
||||
|
||||
def _find_rack_locator(self):
|
||||
|
@ -92,24 +93,28 @@ class HPHost(ServerBase):
|
|||
|
||||
def own_expansion_slot(self):
|
||||
"""
|
||||
Say if the device can host an extension card based
|
||||
on the product name
|
||||
Indicates if the device hosts an expension card
|
||||
"""
|
||||
return self.own_gpu_expansion_slot() or self.own_disk_expansion_slot()
|
||||
|
||||
def own_gpu_expansion_slot(self):
|
||||
"""
|
||||
Say if the device can host an extension card based
|
||||
Indicates if the device hosts a GPU expansion card based
|
||||
on the product name
|
||||
"""
|
||||
return self.get_product_name().endswith('Graphics Exp')
|
||||
|
||||
def own_disk_expansion_slot(self):
|
||||
"""
|
||||
Say if the device can host an extension card based
|
||||
on the product name
|
||||
Indicates if the device hosts a drive expansion card based
|
||||
on raid card attributes.
|
||||
"""
|
||||
for raid_card in self.inventory.get_raid_cards():
|
||||
# Uses already parsed inventory if available
|
||||
# parses it otherwise
|
||||
inventory = getattr(self, "inventory", None)
|
||||
if inventory is None:
|
||||
inventory = Inventory(self)
|
||||
for raid_card in inventory.get_raid_cards():
|
||||
if self.is_blade() and raid_card.is_external():
|
||||
return True
|
||||
return False
|
||||
|
|
19
netbox_agent/vendors/supermicro.py
vendored
19
netbox_agent/vendors/supermicro.py
vendored
|
@ -77,22 +77,3 @@ class SupermicroHost(ServerBase):
|
|||
I only know on model of slot GPU extension card that.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def is_expansion_slot(self, server):
|
||||
"""
|
||||
Return True if its an extension slot, based on the name
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def get_blade_expansion_slot(self):
|
||||
"""
|
||||
Expansion slot are always the compute bay number + 1
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def own_expansion_slot(self):
|
||||
"""
|
||||
Say if the device can host an extension card based
|
||||
on the product name
|
||||
"""
|
||||
pass
|
||||
|
|
|
@ -1,5 +1,7 @@
|
|||
pynetbox==5.0.5
|
||||
pynetbox==6.1.2
|
||||
netaddr==0.8.0
|
||||
netifaces==0.10.9
|
||||
pyyaml==5.4.1
|
||||
jsonargparse==3.11.2
|
||||
python-slugify==5.0.2
|
||||
packaging==20.9
|
||||
|
|
27
rpmenv.json
Normal file
27
rpmenv.json
Normal file
|
@ -0,0 +1,27 @@
|
|||
{
|
||||
"extensions": {
|
||||
"enabled": ["python_venv", "blocks"]
|
||||
},
|
||||
"core": {
|
||||
"group": "Application/System",
|
||||
"license": "Apache2",
|
||||
"name": "netbox-agent",
|
||||
"summary": "NetBox agent for server",
|
||||
"url": "https://github.com/Solvik/netbox-agent",
|
||||
"version": "0.7.0",
|
||||
"requires": ["lshw"]
|
||||
},
|
||||
"python_venv": {
|
||||
"python": "python3.6",
|
||||
"requirements": ["requirements.txt"],
|
||||
"name": "netbox-agent",
|
||||
"path": "/opt/"
|
||||
},
|
||||
"blocks": {
|
||||
"post": ["ln -sf /opt/netbox-agent/bin/netbox_agent /usr/bin/netbox_agent"],
|
||||
"desc": [
|
||||
"This project aims to create hardware automatically into Netbox based on standard tools (dmidecode, lldpd, parsing /sys/, etc).",
|
||||
"The goal is to generate an existing infrastructure on Netbox and have the ability to update it regularly by executing the agent."
|
||||
]
|
||||
}
|
||||
}
|
24
setup.py
24
setup.py
|
@ -1,8 +1,22 @@
|
|||
from setuptools import find_packages, setup
|
||||
import os
|
||||
|
||||
def get_requirements():
|
||||
reqs_path = os.path.join(
|
||||
os.path.dirname(__file__),
|
||||
'requirements.txt'
|
||||
)
|
||||
with open(reqs_path, 'r') as f:
|
||||
reqs = [
|
||||
r.strip() for r in f
|
||||
if r.strip()
|
||||
]
|
||||
return reqs
|
||||
|
||||
|
||||
setup(
|
||||
name='netbox_agent',
|
||||
version='0.6.2',
|
||||
version='0.7.0',
|
||||
description='NetBox agent for server',
|
||||
long_description=open('README.md', encoding="utf-8").read(),
|
||||
long_description_content_type='text/markdown',
|
||||
|
@ -13,13 +27,7 @@ setup(
|
|||
include_package_data=True,
|
||||
packages=find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"]),
|
||||
use_scm_version=True,
|
||||
install_requires=[
|
||||
'pynetbox==5.0.5',
|
||||
'netaddr==0.8.0',
|
||||
'netifaces==0.10.9',
|
||||
'pyyaml==5.4.1',
|
||||
'jsonargparse==2.32.2',
|
||||
],
|
||||
install_requires=get_requirements(),
|
||||
zip_safe=False,
|
||||
keywords=['netbox'],
|
||||
classifiers=[
|
||||
|
|
Loading…
Reference in a new issue