API Endpoints to submit arpnip and macsuck results (#942)
* Add macsuck worker to collect various PortAccessEntity (NAC) attributes * Incorporate PAE feedback on #937 * missing Result/Device.pm column added * pae_is... columns instead of pae_capabilities * moved most code to Util/PortAccessEntity.pm so the update can be done in discover and macsuck * Refactor PAE attributes during discover as separate Plugin * PortAccessEntity: don't use device->dns in log string * Fix "Experimental keys on scalar is now forbidden" test failure * Revamp pae_control and add missing attribute - device.pae_control (text) is now device.pae_is_enabled (bool) - also store pae_authconfig_port_control (port mode auto/force(un)Auth) * Fix "Experimental keys on scalar is now forbidden" test failure - ... again because of botched merge - at least perlgolfed away a set of curly braces * Update PortAccessEntity.pm * Incorporate @ollyg PR feedback * allow actions without transport to run when there are also no creds * initial refactor for separate gather, process, store phases for macsuck * factor out the vlan sanity check * additional help with log of action workers * cleanup logic in check macsuck * refactor to make main phases only * some fixes * implement file slurp. amazingly the whole thing works * remove outdated noop from test * treat error as critical, use cancel to suppress further drivers * big refactor to share mac sanity code to both paths * fix inverted logic on vlan sanity filter * some code tidy * fix error in default value * fix for vlan 0 nodes input from cli * ensure imported MACs are IEEE format * add api endpoint, no useful return status yet * exit status if error from nodes PUT * suppress other networked workers when direct workers are active * better log showing worker * fix status recording to get first error or last done message * implement arpnip API PUT * avoid package redeclaration error * make sure write API methods require admin status * add doc for passing JSON data to arpnip and macsuck * update manifest * remove option to do jobs in web handler; all by queue now * use job entry timestamp for offline queued jobs * fix store username and IP on api PUT * never de-duplicate user-submitted jobs; never reset DeviceSkip for offline jobs * myworker no longer needed * make logic cleaner Co-authored-by: Christian Ramseyer <ramseyer@netnea.com>
This commit is contained in:
2
MANIFEST
2
MANIFEST
@@ -609,5 +609,7 @@ xt/js/run_qunit.js
|
|||||||
xt/lib/App/NetdiscoX/Worker/Plugin/TestFive.pm
|
xt/lib/App/NetdiscoX/Worker/Plugin/TestFive.pm
|
||||||
xt/lib/App/NetdiscoX/Worker/Plugin/TestFour.pm
|
xt/lib/App/NetdiscoX/Worker/Plugin/TestFour.pm
|
||||||
xt/lib/App/NetdiscoX/Worker/Plugin/TestOne.pm
|
xt/lib/App/NetdiscoX/Worker/Plugin/TestOne.pm
|
||||||
|
xt/lib/App/NetdiscoX/Worker/Plugin/TestSeven.pm
|
||||||
|
xt/lib/App/NetdiscoX/Worker/Plugin/TestSix.pm
|
||||||
xt/lib/App/NetdiscoX/Worker/Plugin/TestThree.pm
|
xt/lib/App/NetdiscoX/Worker/Plugin/TestThree.pm
|
||||||
xt/lib/App/NetdiscoX/Worker/Plugin/TestTwo.pm
|
xt/lib/App/NetdiscoX/Worker/Plugin/TestTwo.pm
|
||||||
|
|||||||
@@ -236,6 +236,11 @@ Run a macsuck on the device (specified with C<-d>).
|
|||||||
|
|
||||||
~/bin/netdisco-do macsuck -d 192.0.2.1
|
~/bin/netdisco-do macsuck -d 192.0.2.1
|
||||||
|
|
||||||
|
Submit macsuck results directly to Netdisco by putting the JSON data in a
|
||||||
|
file and using the C<-p> option (see API web docs for data format example):
|
||||||
|
|
||||||
|
~/bin/netdisco-do macsuck -d 192.0.2.1 -p /tmp/mac-address-table.json
|
||||||
|
|
||||||
=head2 macwalk
|
=head2 macwalk
|
||||||
|
|
||||||
Queue a macsuck for all known devices.
|
Queue a macsuck for all known devices.
|
||||||
@@ -246,6 +251,11 @@ Run an arpnip on the device (specified with C<-d>).
|
|||||||
|
|
||||||
~/bin/netdisco-do arpnip -d 192.0.2.1
|
~/bin/netdisco-do arpnip -d 192.0.2.1
|
||||||
|
|
||||||
|
Submit arpnip results directly to Netdisco by putting the JSON data in a
|
||||||
|
file and using the C<-p> option (see API web docs for data format example):
|
||||||
|
|
||||||
|
~/bin/netdisco-do arpnip -d 192.0.2.1 -p /tmp/arp-table.json
|
||||||
|
|
||||||
=head2 arpwalk
|
=head2 arpwalk
|
||||||
|
|
||||||
Queue an arpnip for all known devices.
|
Queue an arpnip for all known devices.
|
||||||
|
|||||||
@@ -23,6 +23,7 @@ foreach my $slot (qw/
|
|||||||
device_key
|
device_key
|
||||||
job_priority
|
job_priority
|
||||||
is_cancelled
|
is_cancelled
|
||||||
|
is_offline
|
||||||
|
|
||||||
_current_phase
|
_current_phase
|
||||||
_last_namespace
|
_last_namespace
|
||||||
@@ -93,8 +94,6 @@ sub best_status {
|
|||||||
my $cur_level = 0;
|
my $cur_level = 0;
|
||||||
my $cur_status = '';
|
my $cur_status = '';
|
||||||
|
|
||||||
return Status->error()->status if $job->is_cancelled;
|
|
||||||
|
|
||||||
foreach my $status (reverse @{ $job->_statuslist }) {
|
foreach my $status (reverse @{ $job->_statuslist }) {
|
||||||
next if $status->phase
|
next if $status->phase
|
||||||
and $status->phase !~ m/^(?:early|main|store|late)$/;
|
and $status->phase !~ m/^(?:early|main|store|late)$/;
|
||||||
@@ -116,19 +115,13 @@ Find the best status and log it into the job's C<status> and C<log> slots.
|
|||||||
|
|
||||||
sub finalise_status {
|
sub finalise_status {
|
||||||
my $job = shift;
|
my $job = shift;
|
||||||
# use DDP; p $job->_statuslist;
|
# use DDP; p $job->_statuslist;
|
||||||
|
|
||||||
# fallback
|
# fallback
|
||||||
$job->status('error');
|
$job->status('error');
|
||||||
$job->log('failed to report from any worker!');
|
$job->log('failed to report from any worker!');
|
||||||
|
|
||||||
my $max_level = Status->info()->level;
|
my $max_level = 0;
|
||||||
|
|
||||||
if ($job->is_cancelled and scalar @{ $job->_statuslist }) {
|
|
||||||
$job->status( $job->_statuslist->[-1]->status );
|
|
||||||
$job->log( $job->_statuslist->[-1]->log );
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
foreach my $status (reverse @{ $job->_statuslist }) {
|
foreach my $status (reverse @{ $job->_statuslist }) {
|
||||||
next if $status->phase
|
next if $status->phase
|
||||||
@@ -137,7 +130,12 @@ sub finalise_status {
|
|||||||
# done() from check phase should not be the action's done()
|
# done() from check phase should not be the action's done()
|
||||||
next if $status->phase eq 'check' and $status->is_ok;
|
next if $status->phase eq 'check' and $status->is_ok;
|
||||||
|
|
||||||
if ($status->level >= $max_level) {
|
# for done() we want the latest log message
|
||||||
|
# for error() (and others) we want the earliest log message
|
||||||
|
|
||||||
|
if (($max_level != Status->done()->level and $status->level >= $max_level)
|
||||||
|
or ($status->level > $max_level)) {
|
||||||
|
|
||||||
$job->status( $status->status );
|
$job->status( $status->status );
|
||||||
$job->log( $status->log );
|
$job->log( $status->log );
|
||||||
$max_level = $status->level;
|
$max_level = $status->level;
|
||||||
|
|||||||
@@ -29,6 +29,11 @@ __PACKAGE__->result_source_instance->view_definition(<<ENDSQL
|
|||||||
SELECT username, 'api' AS role FROM users
|
SELECT username, 'api' AS role FROM users
|
||||||
WHERE token IS NOT NULL AND token_from IS NOT NULL
|
WHERE token IS NOT NULL AND token_from IS NOT NULL
|
||||||
AND token_from > (EXTRACT(EPOCH FROM now()) - ?)
|
AND token_from > (EXTRACT(EPOCH FROM now()) - ?)
|
||||||
|
UNION
|
||||||
|
SELECT username, 'api_admin' AS role FROM users
|
||||||
|
WHERE token IS NOT NULL AND token_from IS NOT NULL
|
||||||
|
AND token_from > (EXTRACT(EPOCH FROM now()) - ?)
|
||||||
|
AND admin
|
||||||
ENDSQL
|
ENDSQL
|
||||||
);
|
);
|
||||||
|
|
||||||
|
|||||||
@@ -152,6 +152,9 @@ sub jq_getsome {
|
|||||||
{ device => $job->device },
|
{ device => $job->device },
|
||||||
($job->device_key ? ({ device_key => $job->device_key }) : ()),
|
($job->device_key ? ({ device_key => $job->device_key }) : ()),
|
||||||
],
|
],
|
||||||
|
# never de-duplicate user-submitted jobs
|
||||||
|
username => { '=' => undef },
|
||||||
|
userip => { '=' => undef },
|
||||||
);
|
);
|
||||||
|
|
||||||
my $gone = $jobs->search({
|
my $gone = $jobs->search({
|
||||||
@@ -270,7 +273,7 @@ sub jq_complete {
|
|||||||
|
|
||||||
try {
|
try {
|
||||||
schema(vars->{'tenant'})->txn_do(sub {
|
schema(vars->{'tenant'})->txn_do(sub {
|
||||||
if ($job->device) {
|
if ($job->device and not $job->is_offline) {
|
||||||
schema(vars->{'tenant'})->resultset('DeviceSkip')->find_or_create({
|
schema(vars->{'tenant'})->resultset('DeviceSkip')->find_or_create({
|
||||||
backend => setting('workers')->{'BACKEND'}, device => $job->device,
|
backend => setting('workers')->{'BACKEND'}, device => $job->device,
|
||||||
},{ key => 'device_skip_pkey' })->update({ deferrals => 0 });
|
},{ key => 'device_skip_pkey' })->update({ deferrals => 0 });
|
||||||
|
|||||||
@@ -168,7 +168,7 @@ sub store_arp {
|
|||||||
my ($hash_ref, $now) = @_;
|
my ($hash_ref, $now) = @_;
|
||||||
$now ||= 'now()';
|
$now ||= 'now()';
|
||||||
my $ip = $hash_ref->{'ip'};
|
my $ip = $hash_ref->{'ip'};
|
||||||
my $mac = NetAddr::MAC->new(mac => ($hash_ref->{'node'} || ''));
|
my $mac = NetAddr::MAC->new(mac => ($hash_ref->{'node'} || $hash_ref->{'mac'} || ''));
|
||||||
my $name = $hash_ref->{'dns'};
|
my $name = $hash_ref->{'dns'};
|
||||||
|
|
||||||
return if !defined $mac or $mac->errstr;
|
return if !defined $mac or $mac->errstr;
|
||||||
|
|||||||
@@ -5,6 +5,7 @@ use Dancer::Plugin::DBIC;
|
|||||||
use Dancer::Plugin::Swagger;
|
use Dancer::Plugin::Swagger;
|
||||||
use Dancer::Plugin::Auth::Extensible;
|
use Dancer::Plugin::Auth::Extensible;
|
||||||
|
|
||||||
|
use App::Netdisco::JobQueue 'jq_insert';
|
||||||
use Try::Tiny;
|
use Try::Tiny;
|
||||||
|
|
||||||
swagger_path {
|
swagger_path {
|
||||||
@@ -151,6 +152,54 @@ swagger_path {
|
|||||||
return to_json [ map {$_->TO_JSON} $rows->all ];
|
return to_json [ map {$_->TO_JSON} $rows->all ];
|
||||||
};
|
};
|
||||||
|
|
||||||
|
swagger_path {
|
||||||
|
tags => ['Objects'],
|
||||||
|
path => (setting('api_base') || '').'/object/device/{ip}/nodes',
|
||||||
|
description => "Queue a job to store the nodes found on a given Device",
|
||||||
|
parameters => [
|
||||||
|
ip => {
|
||||||
|
description => 'Canonical IP of the Device. Use Search methods to find this.',
|
||||||
|
required => 1,
|
||||||
|
in => 'path',
|
||||||
|
},
|
||||||
|
nodes => {
|
||||||
|
description => 'List of node tuples (port, VLAN, MAC)',
|
||||||
|
default => '[]',
|
||||||
|
schema => {
|
||||||
|
type => 'array',
|
||||||
|
items => {
|
||||||
|
type => 'object',
|
||||||
|
properties => {
|
||||||
|
port => {
|
||||||
|
type => 'string'
|
||||||
|
},
|
||||||
|
vlan => {
|
||||||
|
type => 'integer',
|
||||||
|
default => '1'
|
||||||
|
},
|
||||||
|
mac => {
|
||||||
|
type => 'string'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
in => 'body',
|
||||||
|
},
|
||||||
|
],
|
||||||
|
responses => { default => {} },
|
||||||
|
}, put '/api/v1/object/device/:ip/nodes' => require_role api_admin => sub {
|
||||||
|
|
||||||
|
jq_insert([{
|
||||||
|
action => 'macsuck',
|
||||||
|
device => params->{ip},
|
||||||
|
subaction => request->body,
|
||||||
|
username => session('logged_in_user'),
|
||||||
|
userip => request->remote_address,
|
||||||
|
}]);
|
||||||
|
|
||||||
|
return to_json {};
|
||||||
|
};
|
||||||
|
|
||||||
swagger_path {
|
swagger_path {
|
||||||
tags => ['Objects'],
|
tags => ['Objects'],
|
||||||
path => (setting('api_base') || '').'/object/vlan/{vlan}/nodes',
|
path => (setting('api_base') || '').'/object/vlan/{vlan}/nodes',
|
||||||
@@ -178,4 +227,54 @@ swagger_path {
|
|||||||
return to_json [ map {$_->TO_JSON} $rows->all ];
|
return to_json [ map {$_->TO_JSON} $rows->all ];
|
||||||
};
|
};
|
||||||
|
|
||||||
|
swagger_path {
|
||||||
|
tags => ['Objects'],
|
||||||
|
path => (setting('api_base') || '').'/object/device/{ip}/arps',
|
||||||
|
description => "Queue a job to store the ARP entries found on a given Device",
|
||||||
|
parameters => [
|
||||||
|
ip => {
|
||||||
|
description => 'Canonical IP of the Device. Use Search methods to find this.',
|
||||||
|
required => 1,
|
||||||
|
in => 'path',
|
||||||
|
},
|
||||||
|
arps => {
|
||||||
|
description => 'List of arp tuples (MAC, IP, DNS?). IPs will be resolved to FQDN by Netdisco.',
|
||||||
|
default => '[]',
|
||||||
|
schema => {
|
||||||
|
type => 'array',
|
||||||
|
items => {
|
||||||
|
type => 'object',
|
||||||
|
properties => {
|
||||||
|
mac => {
|
||||||
|
type => 'string',
|
||||||
|
required => 1,
|
||||||
|
},
|
||||||
|
ip => {
|
||||||
|
type => 'string',
|
||||||
|
required => 1,
|
||||||
|
},
|
||||||
|
dns => {
|
||||||
|
type => 'string',
|
||||||
|
required => 0,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
in => 'body',
|
||||||
|
},
|
||||||
|
],
|
||||||
|
responses => { default => {} },
|
||||||
|
}, put '/api/v1/object/device/:ip/arps' => require_role api_admin => sub {
|
||||||
|
|
||||||
|
jq_insert([{
|
||||||
|
action => 'arpnip',
|
||||||
|
device => params->{ip},
|
||||||
|
subaction => request->body,
|
||||||
|
username => session('logged_in_user'),
|
||||||
|
userip => request->remote_address,
|
||||||
|
}]);
|
||||||
|
|
||||||
|
return to_json {};
|
||||||
|
};
|
||||||
|
|
||||||
true;
|
true;
|
||||||
|
|||||||
@@ -96,7 +96,7 @@ sub get_user_roles {
|
|||||||
my $role_column = $settings->{role_column} || 'role';
|
my $role_column = $settings->{role_column} || 'role';
|
||||||
|
|
||||||
return [ try {
|
return [ try {
|
||||||
$user->$roles->search({}, { bind => [setting('api_token_lifetime')] })
|
$user->$roles->search({}, { bind => [setting('api_token_lifetime'), setting('api_token_lifetime')] })
|
||||||
->get_column( $role_column )->all;
|
->get_column( $role_column )->all;
|
||||||
} ];
|
} ];
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -14,7 +14,8 @@ has [qw/workers_check
|
|||||||
workers_main
|
workers_main
|
||||||
workers_user
|
workers_user
|
||||||
workers_store
|
workers_store
|
||||||
workers_late/] => ( is => 'rw' );
|
workers_late
|
||||||
|
transport_required/] => ( is => 'rw' );
|
||||||
|
|
||||||
sub load_workers {
|
sub load_workers {
|
||||||
my $self = shift;
|
my $self = shift;
|
||||||
@@ -37,6 +38,7 @@ sub load_workers {
|
|||||||
|
|
||||||
# now vars->{workers} is populated, we set the dispatch order
|
# now vars->{workers} is populated, we set the dispatch order
|
||||||
my $workers = vars->{'workers'}->{$action} || {};
|
my $workers = vars->{'workers'}->{$action} || {};
|
||||||
|
my $driverless_main = 0;
|
||||||
#use DDP; p vars->{'workers'};
|
#use DDP; p vars->{'workers'};
|
||||||
|
|
||||||
foreach my $phase (qw/check early main user store late/) {
|
foreach my $phase (qw/check early main user store late/) {
|
||||||
@@ -46,12 +48,17 @@ sub load_workers {
|
|||||||
foreach my $namespace (sort keys %{ $workers->{$phase} }) {
|
foreach my $namespace (sort keys %{ $workers->{$phase} }) {
|
||||||
foreach my $priority (sort {$b <=> $a}
|
foreach my $priority (sort {$b <=> $a}
|
||||||
keys %{ $workers->{$phase}->{$namespace} }) {
|
keys %{ $workers->{$phase}->{$namespace} }) {
|
||||||
|
|
||||||
|
++$driverless_main if $phase eq 'main'
|
||||||
|
and ($priority == 0 or $priority == setting('driver_priority')->{'direct'});
|
||||||
push @wset, @{ $workers->{$phase}->{$namespace}->{$priority} };
|
push @wset, @{ $workers->{$phase}->{$namespace}->{$priority} };
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
$self->$pname( \@wset );
|
$self->$pname( \@wset );
|
||||||
}
|
}
|
||||||
|
|
||||||
|
$self->transport_required( $driverless_main ? false : true );
|
||||||
}
|
}
|
||||||
|
|
||||||
true;
|
true;
|
||||||
|
|||||||
@@ -24,6 +24,7 @@ register 'register_worker' => sub {
|
|||||||
return error "failed to parse action in '$package'"
|
return error "failed to parse action in '$package'"
|
||||||
unless $workerconf->{action};
|
unless $workerconf->{action};
|
||||||
|
|
||||||
|
( $workerconf->{title} ||= lc($package) ) =~ s/.+plugin:://;
|
||||||
$workerconf->{phase} ||= 'user';
|
$workerconf->{phase} ||= 'user';
|
||||||
$workerconf->{namespace} ||= '_base_';
|
$workerconf->{namespace} ||= '_base_';
|
||||||
$workerconf->{priority} ||= (exists $workerconf->{driver}
|
$workerconf->{priority} ||= (exists $workerconf->{driver}
|
||||||
@@ -33,10 +34,20 @@ register 'register_worker' => sub {
|
|||||||
my $job = shift or die 'missing job param';
|
my $job = shift or die 'missing job param';
|
||||||
# use DDP; p $workerconf;
|
# use DDP; p $workerconf;
|
||||||
|
|
||||||
debug sprintf '-> run worker %s/%s/%s',
|
debug sprintf '-> run worker %s/%s "%s"',
|
||||||
@$workerconf{qw/phase namespace priority/};
|
@$workerconf{qw/phase priority title/};
|
||||||
|
|
||||||
return if $job->is_cancelled;
|
if ($job->is_cancelled) {
|
||||||
|
return $job->add_status( Status->info('skip: job is cancelled') );
|
||||||
|
}
|
||||||
|
|
||||||
|
if ($job->is_offline
|
||||||
|
and $workerconf->{phase} eq 'main'
|
||||||
|
and $workerconf->{priority} > 0
|
||||||
|
and $workerconf->{priority} < setting('driver_priority')->{'direct'}) {
|
||||||
|
|
||||||
|
return $job->add_status( Status->info('skip: networked worker but job is running offline') );
|
||||||
|
}
|
||||||
|
|
||||||
# check to see if this namespace has already passed at higher priority
|
# check to see if this namespace has already passed at higher priority
|
||||||
# and also update job's record of namespace and priority
|
# and also update job's record of namespace and priority
|
||||||
@@ -79,7 +90,9 @@ register 'register_worker' => sub {
|
|||||||
|
|
||||||
# per-device action but no device creds available
|
# per-device action but no device creds available
|
||||||
return $job->add_status( Status->info('skip: driver or action not applicable') )
|
return $job->add_status( Status->info('skip: driver or action not applicable') )
|
||||||
if 0 == scalar @newuserconf && $job->action ne "delete";
|
if 0 == scalar @newuserconf
|
||||||
|
and $workerconf->{priority} > 0
|
||||||
|
and $workerconf->{priority} < setting('driver_priority')->{'direct'};
|
||||||
}
|
}
|
||||||
|
|
||||||
# back up and restore device_auth
|
# back up and restore device_auth
|
||||||
|
|||||||
@@ -16,8 +16,14 @@ register_worker({ phase => 'check' }, sub {
|
|||||||
return Status->error("arpnip skipped: $device not yet discovered")
|
return Status->error("arpnip skipped: $device not yet discovered")
|
||||||
unless $device->in_storage;
|
unless $device->in_storage;
|
||||||
|
|
||||||
return Status->info("arpnip skipped: $device is not arpnipable")
|
if ($job->port or $job->extra) {
|
||||||
unless is_arpnipable_now($device);
|
$job->is_offline(true);
|
||||||
|
debug 'arpnip offline: will update from CLI or API';
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
return Status->info("arpnip skipped: $device is not arpnipable")
|
||||||
|
unless is_arpnipable_now($device);
|
||||||
|
}
|
||||||
|
|
||||||
# support for Hooks
|
# support for Hooks
|
||||||
vars->{'hook_data'} = { $device->get_columns };
|
vars->{'hook_data'} = { $device->get_columns };
|
||||||
|
|||||||
@@ -12,29 +12,60 @@ use App::Netdisco::Transport::SNMP ();
|
|||||||
use App::Netdisco::Util::Node qw/check_mac store_arp/;
|
use App::Netdisco::Util::Node qw/check_mac store_arp/;
|
||||||
use App::Netdisco::Util::FastResolver 'hostnames_resolve_async';
|
use App::Netdisco::Util::FastResolver 'hostnames_resolve_async';
|
||||||
|
|
||||||
|
use File::Slurper 'read_text';
|
||||||
use NetAddr::IP::Lite ':lower';
|
use NetAddr::IP::Lite ':lower';
|
||||||
|
use Regexp::Common 'net';
|
||||||
|
use NetAddr::MAC ();
|
||||||
use Time::HiRes 'gettimeofday';
|
use Time::HiRes 'gettimeofday';
|
||||||
|
|
||||||
|
register_worker({ phase => 'early',
|
||||||
|
title => 'prepare common data' }, sub {
|
||||||
|
|
||||||
|
my ($job, $workerconf) = @_;
|
||||||
|
my $device = $job->device;
|
||||||
|
|
||||||
|
# would be possible just to use now() on updated records, but by using this
|
||||||
|
# same value for them all, we can if we want add a job at the end to
|
||||||
|
# select and do something with the updated set (see set archive, below)
|
||||||
|
vars->{'timestamp'} = ($job->is_offline and $job->entered)
|
||||||
|
? (schema('netdisco')->storage->dbh->quote($job->entered) .'::timestamp')
|
||||||
|
: 'to_timestamp('. (join '.', gettimeofday) .')';
|
||||||
|
|
||||||
|
# initialise the cache
|
||||||
|
vars->{'arps'} ||= [];
|
||||||
|
});
|
||||||
|
|
||||||
register_worker({ phase => 'store' }, sub {
|
register_worker({ phase => 'store' }, sub {
|
||||||
my ($job, $workerconf) = @_;
|
my ($job, $workerconf) = @_;
|
||||||
my $device = $job->device;
|
my $device = $job->device;
|
||||||
|
|
||||||
# would be possible just to use now() on updated records, but by using this
|
vars->{'arps'} = [ grep { check_mac(($_->{mac} || $_->{node}), $device) }
|
||||||
# same value for them all, we _can_ if we want add a job at the end to
|
@{ vars->{'arps'} } ];
|
||||||
# select and do something with the updated set (no reason to yet, though)
|
|
||||||
my $now = 'to_timestamp('. (join '.', gettimeofday) .')';
|
|
||||||
|
|
||||||
# update node_ip with ARP and Neighbor Cache entries
|
debug sprintf ' resolving %d ARP entries with max %d outstanding requests',
|
||||||
|
scalar @{ vars->{'arps'} }, $ENV{'PERL_ANYEVENT_MAX_OUTSTANDING_DNS'};
|
||||||
|
vars->{'arps'} = hostnames_resolve_async( vars->{'arps'} );
|
||||||
|
|
||||||
|
my ($v4, $v6) = (0, 0);
|
||||||
|
foreach my $a_entry (@{ vars->{'arps'} }) {
|
||||||
|
my $a_ip = NetAddr::IP::Lite->new($a_entry->{ip});
|
||||||
|
|
||||||
|
if ($a_ip) {
|
||||||
|
++$v4 if $a_ip->bits == 32;;
|
||||||
|
++$v6 if $a_ip->bits == 128;;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
my $now = vars->{'timestamp'};
|
||||||
|
store_arp(\%$_, $now) for @{ vars->{'arps'} };
|
||||||
|
|
||||||
store_arp(\%$_, $now) for @{ vars->{'v4arps'} };
|
|
||||||
debug sprintf ' [%s] arpnip - processed %s ARP Cache entries',
|
debug sprintf ' [%s] arpnip - processed %s ARP Cache entries',
|
||||||
$device->ip, scalar @{ vars->{'v4arps'} };
|
$device->ip, $v4;
|
||||||
|
|
||||||
store_arp(\%$_, $now) for @{ vars->{'v6arps'} };
|
|
||||||
debug sprintf ' [%s] arpnip - processed %s IPv6 Neighbor Cache entries',
|
debug sprintf ' [%s] arpnip - processed %s IPv6 Neighbor Cache entries',
|
||||||
$device->ip, scalar @{ vars->{'v6arps'} };
|
$device->ip, $v6;
|
||||||
|
|
||||||
$device->update({last_arpnip => \$now});
|
$device->update({last_arpnip => \$now});
|
||||||
|
$device->update({layers => \[q{overlay(layers placing '1' from 6 for 1)}]});
|
||||||
|
|
||||||
my $status = $job->best_status;
|
my $status = $job->best_status;
|
||||||
return Status->$status("Ended arpnip for $device");
|
return Status->$status("Ended arpnip for $device");
|
||||||
@@ -48,14 +79,13 @@ register_worker({ phase => 'main', driver => 'snmp' }, sub {
|
|||||||
or return Status->defer("arpnip failed: could not SNMP connect to $device");
|
or return Status->defer("arpnip failed: could not SNMP connect to $device");
|
||||||
|
|
||||||
# cache v4 arp table
|
# cache v4 arp table
|
||||||
push @{ vars->{'v4arps'} },
|
push @{ vars->{'arps'} },
|
||||||
@{ get_arps_snmp($device, $snmp->at_paddr, $snmp->at_netaddr) };
|
get_arps_snmp($device, $snmp->at_paddr, $snmp->at_netaddr);
|
||||||
|
|
||||||
# cache v6 neighbor cache
|
# cache v6 neighbor cache
|
||||||
push @{ vars->{'v6arps'} },
|
push @{ vars->{'arps'} },
|
||||||
@{get_arps_snmp($device, $snmp->ipv6_n2p_mac, $snmp->ipv6_n2p_addr) };
|
get_arps_snmp($device, $snmp->ipv6_n2p_mac, $snmp->ipv6_n2p_addr);
|
||||||
|
|
||||||
$device->update({layers => \[q{overlay(layers placing '1' from 6 for 1)}]});
|
|
||||||
return Status->done("Gathered arp caches from $device");
|
return Status->done("Gathered arp caches from $device");
|
||||||
});
|
});
|
||||||
|
|
||||||
@@ -65,21 +95,15 @@ sub get_arps_snmp {
|
|||||||
my @arps = ();
|
my @arps = ();
|
||||||
|
|
||||||
while (my ($arp, $node) = each %$paddr) {
|
while (my ($arp, $node) = each %$paddr) {
|
||||||
my $ip = $netaddr->{$arp};
|
my $ip = $netaddr->{$arp} or next;
|
||||||
next unless defined $ip;
|
|
||||||
next unless check_mac($node, $device);
|
|
||||||
push @arps, {
|
push @arps, {
|
||||||
node => $node,
|
mac => $node,
|
||||||
ip => $ip,
|
ip => $ip,
|
||||||
dns => undef,
|
dns => undef,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
debug sprintf ' resolving %d ARP entries with max %d outstanding requests',
|
return @arps;
|
||||||
scalar @arps, $ENV{'PERL_ANYEVENT_MAX_OUTSTANDING_DNS'};
|
|
||||||
my $resolved_ips = hostnames_resolve_async(\@arps);
|
|
||||||
|
|
||||||
return $resolved_ips;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
register_worker({ phase => 'main', driver => 'cli' }, sub {
|
register_worker({ phase => 'main', driver => 'cli' }, sub {
|
||||||
@@ -89,51 +113,51 @@ register_worker({ phase => 'main', driver => 'cli' }, sub {
|
|||||||
my $cli = App::Netdisco::Transport::SSH->session_for($device)
|
my $cli = App::Netdisco::Transport::SSH->session_for($device)
|
||||||
or return Status->defer("arpnip failed: could not SSH connect to $device");
|
or return Status->defer("arpnip failed: could not SSH connect to $device");
|
||||||
|
|
||||||
my $a_entry;
|
|
||||||
my $a_ip;
|
|
||||||
my $a_mac;
|
|
||||||
|
|
||||||
# should be both v4 and v6
|
# should be both v4 and v6
|
||||||
my @arps = @{ get_arps_cli($device, [$cli->arpnip]) };
|
vars->{'arps'} = [ $cli->arpnip ];
|
||||||
|
|
||||||
foreach $a_entry (@arps) {
|
|
||||||
$a_ip = NetAddr::IP::Lite->new($a_entry->{ip});
|
|
||||||
|
|
||||||
if (defined($a_ip)) {
|
|
||||||
# IPv4
|
|
||||||
if ($a_ip->bits == 32 ) {
|
|
||||||
push @{ vars->{"v4arps"} }, $a_entry;
|
|
||||||
}
|
|
||||||
# IPv6
|
|
||||||
if ($a_ip->bits == 128 ) {
|
|
||||||
push @{ vars->{"v6arps"} }, $a_entry;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
$device->update({layers => \[q{overlay(layers placing '1' from 6 for 1)}]});
|
|
||||||
return Status->done("Gathered arp caches from $device");
|
return Status->done("Gathered arp caches from $device");
|
||||||
});
|
});
|
||||||
|
|
||||||
sub get_arps_cli {
|
register_worker({ phase => 'main', driver => 'direct' }, sub {
|
||||||
my ($device, $entries) = @_;
|
my ($job, $workerconf) = @_;
|
||||||
my @arps = ();
|
my $device = $job->device;
|
||||||
$entries ||= [];
|
|
||||||
|
|
||||||
foreach my $entry (@$entries) {
|
return Status->info('skip: arp table data supplied by other source')
|
||||||
next unless check_mac($entry->{mac}, $device);
|
unless $job->is_offline;
|
||||||
push @arps, {
|
|
||||||
node => $entry->{mac},
|
# load cache from file or copy from job param
|
||||||
ip => $entry->{ip},
|
my $data = $job->extra;
|
||||||
dns => $entry->{dns},
|
|
||||||
};
|
if ($job->port) {
|
||||||
|
return $job->cancel(sprintf 'could not open data source "%s"', $job->port)
|
||||||
|
unless -f $job->port;
|
||||||
|
|
||||||
|
$data = read_text($job->port)
|
||||||
|
or return $job->cancel(sprintf 'problem reading from file "%s"', $job->port);
|
||||||
}
|
}
|
||||||
|
|
||||||
debug sprintf ' resolving %d ARP entries with max %d outstanding requests',
|
my @arps = (length $data ? @{ from_json($data) } : ());
|
||||||
scalar @arps, $ENV{'PERL_ANYEVENT_MAX_OUTSTANDING_DNS'};
|
|
||||||
my $resolved_ips = hostnames_resolve_async(\@arps);
|
|
||||||
|
|
||||||
return $resolved_ips;
|
return $job->cancel('data provided but 0 arp entries found')
|
||||||
}
|
unless scalar @arps;
|
||||||
|
|
||||||
|
debug sprintf ' [%s] arpnip - %s arp table entries provided',
|
||||||
|
$device->ip, scalar @arps;
|
||||||
|
|
||||||
|
# sanity check
|
||||||
|
foreach my $a_entry (@arps) {
|
||||||
|
my $ip = NetAddr::IP::Lite->new($a_entry->{'ip'} || '');
|
||||||
|
my $mac = NetAddr::MAC->new(mac => ($a_entry->{'mac'} || ''));
|
||||||
|
|
||||||
|
next unless $ip and $mac;
|
||||||
|
next if (($ip->addr eq '0.0.0.0') or ($ip !~ m{^(?:$RE{net}{IPv4}|$RE{net}{IPv6})(?:/\d+)?$}i));
|
||||||
|
next if (($mac->as_ieee eq '00:00:00:00:00:00') or ($mac->as_ieee !~ m{^$RE{net}{MAC}$}i));
|
||||||
|
|
||||||
|
push @{ vars->{'arps'} }, $a_entry;
|
||||||
|
}
|
||||||
|
|
||||||
|
return Status->done("Received arp cache for $device");
|
||||||
|
});
|
||||||
|
|
||||||
true;
|
true;
|
||||||
|
|||||||
@@ -54,7 +54,7 @@ register_worker({ phase => 'early', driver => 'snmp' }, sub {
|
|||||||
$device->set_column( contact => Encode::decode('UTF-8', $snmp->contact) );
|
$device->set_column( contact => Encode::decode('UTF-8', $snmp->contact) );
|
||||||
$device->set_column( location => Encode::decode('UTF-8', $snmp->location) );
|
$device->set_column( location => Encode::decode('UTF-8', $snmp->location) );
|
||||||
|
|
||||||
$device->set_column( num_ports => $snmp->ports );
|
$device->set_column( num_ports => ($snmp->ports || 0) );
|
||||||
$device->set_column( snmp_class => $snmp->class );
|
$device->set_column( snmp_class => $snmp->class );
|
||||||
$device->set_column( snmp_engineid => unpack('H*', ($snmp->snmpEngineID || '')) );
|
$device->set_column( snmp_engineid => unpack('H*', ($snmp->snmpEngineID || '')) );
|
||||||
|
|
||||||
|
|||||||
@@ -16,8 +16,14 @@ register_worker({ phase => 'check' }, sub {
|
|||||||
return Status->error("macsuck skipped: $device not yet discovered")
|
return Status->error("macsuck skipped: $device not yet discovered")
|
||||||
unless $device->in_storage;
|
unless $device->in_storage;
|
||||||
|
|
||||||
return Status->info("macsuck skipped: $device is not macsuckable")
|
if ($job->port or $job->extra) {
|
||||||
unless is_macsuckable_now($device);
|
$job->is_offline(true);
|
||||||
|
debug 'macsuck offline: will update from CLI or API';
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
return Status->info("macsuck skipped: $device is not macsuckable")
|
||||||
|
unless is_macsuckable_now($device);
|
||||||
|
}
|
||||||
|
|
||||||
# support for Hooks
|
# support for Hooks
|
||||||
vars->{'hook_data'} = { $device->get_columns };
|
vars->{'hook_data'} = { $device->get_columns };
|
||||||
|
|||||||
@@ -10,85 +10,189 @@ use App::Netdisco::Util::PortMAC 'get_port_macs';
|
|||||||
use App::Netdisco::Util::Device 'match_to_setting';
|
use App::Netdisco::Util::Device 'match_to_setting';
|
||||||
use App::Netdisco::Util::Node 'check_mac';
|
use App::Netdisco::Util::Node 'check_mac';
|
||||||
use App::Netdisco::Util::SNMP 'snmp_comm_reindex';
|
use App::Netdisco::Util::SNMP 'snmp_comm_reindex';
|
||||||
|
|
||||||
use Dancer::Plugin::DBIC 'schema';
|
use Dancer::Plugin::DBIC 'schema';
|
||||||
use Time::HiRes 'gettimeofday';
|
use Time::HiRes 'gettimeofday';
|
||||||
|
use File::Slurper 'read_text';
|
||||||
use Scope::Guard 'guard';
|
use Scope::Guard 'guard';
|
||||||
|
use Regexp::Common 'net';
|
||||||
|
use NetAddr::MAC ();
|
||||||
|
use List::MoreUtils ();
|
||||||
|
|
||||||
|
register_worker({ phase => 'early',
|
||||||
|
title => 'prepare common data' }, sub {
|
||||||
|
|
||||||
register_worker({ phase => 'main', driver => 'snmp' }, sub {
|
|
||||||
my ($job, $workerconf) = @_;
|
my ($job, $workerconf) = @_;
|
||||||
|
|
||||||
my $device = $job->device;
|
my $device = $job->device;
|
||||||
my $snmp = App::Netdisco::Transport::SNMP->reader_for($device)
|
|
||||||
or return Status->defer("macsuck failed: could not SNMP connect to $device");
|
|
||||||
|
|
||||||
# would be possible just to use now() on updated records, but by using this
|
# would be possible just to use now() on updated records, but by using this
|
||||||
# same value for them all, we can if we want add a job at the end to
|
# same value for them all, we can if we want add a job at the end to
|
||||||
# select and do something with the updated set (see set archive, below)
|
# select and do something with the updated set (see set archive, below)
|
||||||
my $now = 'to_timestamp('. (join '.', gettimeofday) .')';
|
vars->{'timestamp'} = ($job->is_offline and $job->entered)
|
||||||
my $total_nodes = 0;
|
? (schema('netdisco')->storage->dbh->quote($job->entered) .'::timestamp')
|
||||||
|
: 'to_timestamp('. (join '.', gettimeofday) .')';
|
||||||
|
|
||||||
|
# initialise the cache
|
||||||
|
vars->{'fwtable'} ||= {};
|
||||||
|
|
||||||
# cache the device ports to save hitting the database for many single rows
|
# cache the device ports to save hitting the database for many single rows
|
||||||
my $device_ports = {map {($_->port => $_)}
|
vars->{'device_ports'} = {map {($_->port => $_)}
|
||||||
$device->ports(undef, {prefetch => {neighbor_alias => 'device'}})->all};
|
$device->ports(undef, {prefetch => {neighbor_alias => 'device'}})->all};
|
||||||
|
});
|
||||||
|
|
||||||
|
register_worker({ phase => 'main', driver => 'direct',
|
||||||
|
title => 'gather macs from file and set interfaces' }, sub {
|
||||||
|
|
||||||
|
my ($job, $workerconf) = @_;
|
||||||
|
my $device = $job->device;
|
||||||
|
|
||||||
|
return Status->info('skip: fwtable data supplied by other source')
|
||||||
|
unless $job->is_offline;
|
||||||
|
|
||||||
|
# load cache from file or copy from job param
|
||||||
|
my $data = $job->extra;
|
||||||
|
|
||||||
|
if ($job->port) {
|
||||||
|
return $job->cancel(sprintf 'could not open data source "%s"', $job->port)
|
||||||
|
unless -f $job->port;
|
||||||
|
|
||||||
|
$data = read_text($job->port)
|
||||||
|
or return $job->cancel(sprintf 'problem reading from file "%s"', $job->port);
|
||||||
|
}
|
||||||
|
|
||||||
|
my @fwtable = (length $data ? @{ from_json($data) } : ());
|
||||||
|
|
||||||
|
return $job->cancel('data provided but 0 fwd entries found')
|
||||||
|
unless scalar @fwtable;
|
||||||
|
|
||||||
|
debug sprintf ' [%s] macsuck - %s forwarding table entries provided',
|
||||||
|
$device->ip, scalar @fwtable;
|
||||||
|
|
||||||
|
# make sure ports are UP in netdisco (unless it's a lag master,
|
||||||
|
# because we can still see nodes without a functioning aggregate)
|
||||||
|
|
||||||
|
my %port_seen = ();
|
||||||
|
foreach my $node (@fwtable) {
|
||||||
|
my $port = $node->{port} or next;
|
||||||
|
next if $port_seen{$port};
|
||||||
|
next unless exists vars->{'device_ports'}->{$port};
|
||||||
|
next if vars->{'device_ports'}->{$port}->is_master;
|
||||||
|
|
||||||
|
debug sprintf ' [%s] macsuck - updating port %s status up/up due to node presence',
|
||||||
|
$device->ip, $port;
|
||||||
|
|
||||||
|
vars->{'device_ports'}->{$port}->update({
|
||||||
|
up => 'up',
|
||||||
|
up_admin => 'up',
|
||||||
|
});
|
||||||
|
|
||||||
|
++$port_seen{$port};
|
||||||
|
}
|
||||||
|
|
||||||
|
# rebuild fwtable in format for filtering more easily
|
||||||
|
foreach my $node (@fwtable) {
|
||||||
|
my $mac = NetAddr::MAC->new(mac => ($node->{'mac'} || ''));
|
||||||
|
next unless $node->{'port'} and $mac;
|
||||||
|
next if (($mac->as_ieee eq '00:00:00:00:00:00') or ($mac->as_ieee !~ m{^$RE{net}{MAC}$}));
|
||||||
|
|
||||||
|
vars->{'fwtable'}->{ $node->{'vlan'} || 0 }
|
||||||
|
->{ $node->{'port'} }
|
||||||
|
->{ $mac->as_ieee } += 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
# remove macs on forbidden vlans
|
||||||
|
my @vlans = (0, sanity_vlans($device, vars->{'fwtable'}, {}, {}));
|
||||||
|
foreach my $vlan (keys %{ vars->{'fwtable'} }) {
|
||||||
|
delete vars->{'fwtable'}->{$vlan}
|
||||||
|
unless scalar grep {$_ eq $vlan} @vlans;
|
||||||
|
}
|
||||||
|
|
||||||
|
return Status->done("Received MAC addresses for $device");
|
||||||
|
});
|
||||||
|
|
||||||
|
|
||||||
|
register_worker({ phase => 'main', driver => 'snmp',
|
||||||
|
title => 'gather macs from snmp and set interfaces'}, sub {
|
||||||
|
|
||||||
|
my ($job, $workerconf) = @_;
|
||||||
|
my $device = $job->device;
|
||||||
|
|
||||||
|
my $snmp = App::Netdisco::Transport::SNMP->reader_for($device)
|
||||||
|
or return Status->defer("macsuck failed: could not SNMP connect to $device");
|
||||||
|
|
||||||
my $interfaces = $snmp->interfaces;
|
my $interfaces = $snmp->interfaces;
|
||||||
my $reverse_interfaces = { reverse %{ $interfaces } }; # might squash but prob not
|
my $reverse_interfaces = { reverse %{ $interfaces } }; # might squash but prob not
|
||||||
my $i_up = $snmp->i_up;
|
my $i_up = $snmp->i_up;
|
||||||
my $i_up_admin = $snmp->i_up_admin;
|
my $i_up_admin = $snmp->i_up_admin;
|
||||||
|
|
||||||
|
# make sure ports reflect their latest state as reported by device
|
||||||
|
foreach my $port (keys %{ vars->{'device_ports'} }) {
|
||||||
|
my $iid = $reverse_interfaces->{$port} or next;
|
||||||
|
|
||||||
|
debug sprintf ' [%s] macsuck - updating port %s status : %s/%s',
|
||||||
|
$device->ip, $port, ($i_up_admin->{$iid} || '-'), ($i_up->{$iid} || '-');
|
||||||
|
|
||||||
|
vars->{'device_ports'}->{$port}->update({
|
||||||
|
up => $i_up->{$iid},
|
||||||
|
up_admin => $i_up_admin->{$iid},
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
# get forwarding table data via basic snmp connection
|
# get forwarding table data via basic snmp connection
|
||||||
my $fwtable = walk_fwtable($device, $interfaces, $device_ports);
|
vars->{'fwtable'} = walk_fwtable($snmp, $device, $interfaces);
|
||||||
|
|
||||||
# ...then per-vlan if supported
|
# ...then per-vlan if supported
|
||||||
my @vlan_list = get_vlan_list($device);
|
my @vlan_list = get_vlan_list($snmp, $device);
|
||||||
{
|
{
|
||||||
my $guard = guard { snmp_comm_reindex($snmp, $device, 0) };
|
my $guard = guard { snmp_comm_reindex($snmp, $device, 0) };
|
||||||
foreach my $vlan (@vlan_list) {
|
foreach my $vlan (@vlan_list) {
|
||||||
snmp_comm_reindex($snmp, $device, $vlan);
|
snmp_comm_reindex($snmp, $device, $vlan);
|
||||||
my $pv_fwtable =
|
my $pv_fwtable =
|
||||||
walk_fwtable($device, $interfaces, $device_ports, $vlan);
|
walk_fwtable($snmp, $device, $interfaces, $vlan);
|
||||||
$fwtable = {%$fwtable, %$pv_fwtable};
|
vars->{'fwtable'} = {%{ vars->{'fwtable'} }, %$pv_fwtable};
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
# now it's time to call store_node for every node discovered
|
return Status->done("Gathered MAC addresses for $device");
|
||||||
# on every port on every vlan on this device.
|
});
|
||||||
|
|
||||||
|
|
||||||
|
register_worker({ phase => 'store',
|
||||||
|
title => 'save macs to database'}, sub {
|
||||||
|
|
||||||
|
my ($job, $workerconf) = @_;
|
||||||
|
my $device = $job->device;
|
||||||
|
|
||||||
|
# sanity filter the MAC addresses from the device
|
||||||
|
|
||||||
|
vars->{'fwtable'} = sanity_macs( $device, vars->{'fwtable'}, vars->{'device_ports'} );
|
||||||
|
|
||||||
# reverse sort allows vlan 0 entries to be included only as fallback
|
# reverse sort allows vlan 0 entries to be included only as fallback
|
||||||
foreach my $vlan (reverse sort keys %$fwtable) {
|
|
||||||
foreach my $port (keys %{ $fwtable->{$vlan} }) {
|
my $node_count = 0;
|
||||||
|
foreach my $vlan (reverse sort keys %{ vars->{'fwtable'} }) {
|
||||||
|
foreach my $port (keys %{ vars->{'fwtable'}->{$vlan} }) {
|
||||||
my $vlabel = ($vlan ? $vlan : 'unknown');
|
my $vlabel = ($vlan ? $vlan : 'unknown');
|
||||||
debug sprintf ' [%s] macsuck - port %s vlan %s : %s nodes',
|
debug sprintf ' [%s] macsuck - port %s vlan %s : %s nodes',
|
||||||
$device->ip, $port, $vlabel, scalar keys %{ $fwtable->{$vlan}->{$port} };
|
$device->ip, $port, $vlabel, scalar keys %{ vars->{'fwtable'}->{$vlan}->{$port} };
|
||||||
|
|
||||||
# make sure this port is UP in netdisco (unless it's a lag master,
|
foreach my $mac (keys %{ vars->{'fwtable'}->{$vlan}->{$port} }) {
|
||||||
# because we can still see nodes without a functioning aggregate)
|
|
||||||
my $iid = $reverse_interfaces->{$port};
|
|
||||||
if ($iid and not $device_ports->{$port}->is_master) {
|
|
||||||
debug sprintf ' [%s] macsuck - updating port %s status : %s/%s',
|
|
||||||
$device->ip, $port, ($i_up_admin->{$iid} || '-'), ($i_up->{$iid} || '-');
|
|
||||||
$device_ports->{$port}->update({
|
|
||||||
up => $i_up->{$iid},
|
|
||||||
up_admin => $i_up_admin->{$iid},
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
foreach my $mac (keys %{ $fwtable->{$vlan}->{$port} }) {
|
|
||||||
|
|
||||||
# remove vlan 0 entry for this MAC addr
|
# remove vlan 0 entry for this MAC addr
|
||||||
delete $fwtable->{0}->{$_}->{$mac}
|
delete vars->{'fwtable'}->{0}->{$_}->{$mac}
|
||||||
for keys %{ $fwtable->{0} };
|
for keys %{ vars->{'fwtable'}->{0} };
|
||||||
|
|
||||||
++$total_nodes;
|
store_node($device->ip, $vlan, $port, $mac, vars->{'timestamp'});
|
||||||
store_node($device->ip, $vlan, $port, $mac, $now);
|
++$node_count;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
debug sprintf ' [%s] macsuck - %s updated forwarding table entries',
|
debug sprintf ' [%s] macsuck - stored %s forwarding table entries',
|
||||||
$device->ip, $total_nodes;
|
$device->ip, $node_count;
|
||||||
|
|
||||||
# a use for $now ... need to archive disappeared nodes
|
# a use for $now ... need to archive disappeared nodes
|
||||||
|
my $now = vars->{'timestamp'};
|
||||||
my $archived = 0;
|
my $archived = 0;
|
||||||
|
|
||||||
if (setting('node_freshness')) {
|
if (setting('node_freshness')) {
|
||||||
@@ -103,9 +207,14 @@ register_worker({ phase => 'main', driver => 'snmp' }, sub {
|
|||||||
$device->ip, $archived;
|
$device->ip, $archived;
|
||||||
|
|
||||||
$device->update({last_macsuck => \$now});
|
$device->update({last_macsuck => \$now});
|
||||||
return Status->done("Ended macsuck for $device");
|
|
||||||
|
my $status = $job->best_status;
|
||||||
|
return Status->$status("Ended macsuck for $device");
|
||||||
});
|
});
|
||||||
|
|
||||||
|
|
||||||
|
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
=head2 store_node( $ip, $vlan, $port, $mac, $now? )
|
=head2 store_node( $ip, $vlan, $port, $mac, $now? )
|
||||||
|
|
||||||
Writes a fresh entry to the Netdisco C<node> database table. Will mark old
|
Writes a fresh entry to the Netdisco C<node> database table. Will mark old
|
||||||
@@ -160,11 +269,7 @@ sub store_node {
|
|||||||
|
|
||||||
# return a list of vlan numbers which are OK to macsuck on this device
|
# return a list of vlan numbers which are OK to macsuck on this device
|
||||||
sub get_vlan_list {
|
sub get_vlan_list {
|
||||||
my $device = shift;
|
my ($snmp, $device) = @_;
|
||||||
|
|
||||||
my $snmp = App::Netdisco::Transport::SNMP->reader_for($device)
|
|
||||||
or return (); # already checked!
|
|
||||||
|
|
||||||
return () unless $snmp->cisco_comm_indexing;
|
return () unless $snmp->cisco_comm_indexing;
|
||||||
|
|
||||||
my (%vlans, %vlan_names, %vlan_states);
|
my (%vlans, %vlan_names, %vlan_states);
|
||||||
@@ -227,10 +332,16 @@ sub get_vlan_list {
|
|||||||
$vlan_states{$vlan} = $state;
|
$vlan_states{$vlan} = $state;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return sanity_vlans($device, \%vlans, \%vlan_names, \%vlan_states);
|
||||||
|
}
|
||||||
|
|
||||||
|
sub sanity_vlans {
|
||||||
|
my ($device, $vlans, $vlan_names, $vlan_states) = @_;
|
||||||
|
|
||||||
my @ok_vlans = ();
|
my @ok_vlans = ();
|
||||||
foreach my $vlan (sort keys %vlans) {
|
foreach my $vlan (sort keys %$vlans) {
|
||||||
my $name = $vlan_names{$vlan} || '(unnamed)';
|
my $name = $vlan_names->{$vlan} || '(unnamed)';
|
||||||
my $state = $vlan_states{$vlan} || '(unknown)';
|
my $state = $vlan_states->{$vlan} || '(unknown)';
|
||||||
|
|
||||||
if (ref [] eq ref setting('macsuck_no_vlan')) {
|
if (ref [] eq ref setting('macsuck_no_vlan')) {
|
||||||
my $ignore = setting('macsuck_no_vlan');
|
my $ignore = setting('macsuck_no_vlan');
|
||||||
@@ -274,7 +385,7 @@ sub get_vlan_list {
|
|||||||
next if $vlan == 0; # quietly skip
|
next if $vlan == 0; # quietly skip
|
||||||
|
|
||||||
# check in use by a port on this device
|
# check in use by a port on this device
|
||||||
if (!$vlans{$vlan} && !setting('macsuck_all_vlans')) {
|
if (not $vlans->{$vlan} and not setting('macsuck_all_vlans')) {
|
||||||
debug sprintf
|
debug sprintf
|
||||||
' [%s] macsuck VLAN %s/%s - not in use by any port - skipping.',
|
' [%s] macsuck VLAN %s/%s - not in use by any port - skipping.',
|
||||||
$device->ip, $vlan, $name;
|
$device->ip, $vlan, $name;
|
||||||
@@ -298,50 +409,20 @@ sub get_vlan_list {
|
|||||||
# walks the forwarding table (BRIDGE-MIB) for the device and returns a
|
# walks the forwarding table (BRIDGE-MIB) for the device and returns a
|
||||||
# table of node entries.
|
# table of node entries.
|
||||||
sub walk_fwtable {
|
sub walk_fwtable {
|
||||||
my ($device, $interfaces, $device_ports, $comm_vlan) = @_;
|
my ($snmp, $device, $interfaces, $comm_vlan) = @_;
|
||||||
my $skiplist = {}; # ports through which we can see another device
|
|
||||||
my $cache = {};
|
my $cache = {};
|
||||||
|
|
||||||
my $ignorelist = {}; # ports suppressed by macsuck_no_deviceports
|
|
||||||
if (scalar @{ setting('macsuck_no_deviceports') }) {
|
|
||||||
my @ignoremaps = @{ setting('macsuck_no_deviceports') };
|
|
||||||
|
|
||||||
foreach my $map (@ignoremaps) {
|
|
||||||
next unless ref {} eq ref $map;
|
|
||||||
|
|
||||||
foreach my $key (sort keys %$map) {
|
|
||||||
# lhs matches device, rhs matches port
|
|
||||||
next unless check_acl_only($device, $key);
|
|
||||||
|
|
||||||
foreach my $port (keys %$device_ports) {
|
|
||||||
next unless check_acl_only($device_ports->{$port}, $map->{$key});
|
|
||||||
|
|
||||||
++$ignorelist->{$port};
|
|
||||||
debug sprintf ' [%s] macsuck %s - port suppressed by macsuck_no_deviceports',
|
|
||||||
$device->ip, $port;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
my $snmp = App::Netdisco::Transport::SNMP->reader_for($device)
|
|
||||||
or return $cache; # already checked!
|
|
||||||
|
|
||||||
my $fw_mac = $snmp->fw_mac || {};
|
my $fw_mac = $snmp->fw_mac || {};
|
||||||
my $fw_port = $snmp->fw_port || {};
|
my $fw_port = $snmp->fw_port || {};
|
||||||
my $fw_vlan = ($snmp->can('cisco_comm_indexing') && $snmp->cisco_comm_indexing())
|
my $fw_vlan = ($snmp->can('cisco_comm_indexing') and $snmp->cisco_comm_indexing())
|
||||||
? {} : $snmp->qb_fw_vlan;
|
? {} : $snmp->qb_fw_vlan;
|
||||||
my $bp_index = $snmp->bp_index || {};
|
my $bp_index = $snmp->bp_index || {};
|
||||||
|
|
||||||
my @fw_mac_list = values %$fw_mac;
|
|
||||||
my $port_macs = get_port_macs(\@fw_mac_list);
|
|
||||||
|
|
||||||
# to map forwarding table port to device port we have
|
# to map forwarding table port to device port we have
|
||||||
# fw_port -> bp_index -> interfaces
|
# fw_port -> bp_index -> interfaces
|
||||||
|
|
||||||
MAC: while (my ($idx, $mac) = each %$fw_mac) {
|
MAC: while (my ($idx, $mac) = each %$fw_mac) {
|
||||||
my $bp_id = $fw_port->{$idx};
|
my $bp_id = $fw_port->{$idx};
|
||||||
next MAC unless check_mac($mac, $device);
|
|
||||||
|
|
||||||
unless (defined $bp_id) {
|
unless (defined $bp_id) {
|
||||||
debug sprintf
|
debug sprintf
|
||||||
@@ -371,136 +452,205 @@ sub walk_fwtable {
|
|||||||
next MAC;
|
next MAC;
|
||||||
}
|
}
|
||||||
|
|
||||||
# this uses the cached $ports resultset to limit hits on the db
|
|
||||||
my $device_port = $device_ports->{$port};
|
|
||||||
|
|
||||||
if (exists $ignorelist->{$port}) {
|
|
||||||
# stash in the skiplist so that node search works for neighbor
|
|
||||||
# (besides this, skiplist is not used for ignorelist ports)
|
|
||||||
$skiplist->{$port} = [ $vlan, $mac ] if exists $port_macs->{$mac};
|
|
||||||
|
|
||||||
debug sprintf
|
|
||||||
' [%s] macsuck %s - port %s is suppressed by config - skipping.',
|
|
||||||
$device->ip, $mac, $port;
|
|
||||||
next MAC;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (exists $skiplist->{$port}) {
|
|
||||||
debug sprintf
|
|
||||||
' [%s] macsuck %s - seen another device thru port %s - skipping.',
|
|
||||||
$device->ip, $mac, $port;
|
|
||||||
next MAC;
|
|
||||||
}
|
|
||||||
|
|
||||||
# WRT #475 ... see? :-)
|
|
||||||
unless (defined $device_port) {
|
|
||||||
debug sprintf
|
|
||||||
' [%s] macsuck %s - port %s is not in database - skipping.',
|
|
||||||
$device->ip, $mac, $port;
|
|
||||||
next MAC;
|
|
||||||
}
|
|
||||||
|
|
||||||
# check to see if the port is connected to another device
|
|
||||||
# and if we have that device in the database.
|
|
||||||
|
|
||||||
# carefully be aware: "uplink" here means "connected to another device"
|
|
||||||
# it does _not_ mean that the user wants nodes gathered on the remote dev.
|
|
||||||
|
|
||||||
# we have two ways to detect "uplink" port status:
|
|
||||||
# * a neighbor was discovered using CDP/LLDP
|
|
||||||
# * a mac addr is seen which belongs to any device port/interface
|
|
||||||
|
|
||||||
# allow to gather MACs on upstream port for some kinds of device that
|
|
||||||
# do not expose MAC address tables via SNMP. relies on prefetched
|
|
||||||
# neighbors otherwise it would kill the DB with device lookups.
|
|
||||||
|
|
||||||
my $neigh_cannot_macsuck = eval { # can fail
|
|
||||||
check_acl_no(($device_port->neighbor || "0 but true"), 'macsuck_unsupported') ||
|
|
||||||
match_to_setting($device_port->remote_type, 'macsuck_unsupported_type') };
|
|
||||||
|
|
||||||
# here, is_uplink comes from Discover::Neighbors finding LLDP remnants
|
|
||||||
if ($device_port->is_uplink) {
|
|
||||||
if ($neigh_cannot_macsuck) {
|
|
||||||
debug sprintf
|
|
||||||
' [%s] macsuck %s - port %s neighbor %s without macsuck support',
|
|
||||||
$device->ip, $mac, $port,
|
|
||||||
(eval { $device_port->neighbor->ip }
|
|
||||||
|| ($device_port->remote_ip
|
|
||||||
|| $device_port->remote_id || '?'));
|
|
||||||
# continue!!
|
|
||||||
}
|
|
||||||
elsif (my $neighbor = $device_port->neighbor) {
|
|
||||||
debug sprintf
|
|
||||||
' [%s] macsuck %s - port %s has neighbor %s - skipping.',
|
|
||||||
$device->ip, $mac, $port, $neighbor->ip;
|
|
||||||
next MAC;
|
|
||||||
}
|
|
||||||
elsif (my $remote = $device_port->remote_ip) {
|
|
||||||
debug sprintf
|
|
||||||
' [%s] macsuck %s - port %s has undiscovered neighbor %s',
|
|
||||||
$device->ip, $mac, $port, $remote;
|
|
||||||
# continue!!
|
|
||||||
}
|
|
||||||
elsif (not setting('macsuck_bleed')) {
|
|
||||||
debug sprintf
|
|
||||||
' [%s] macsuck %s - port %s is detected uplink - skipping.',
|
|
||||||
$device->ip, $mac, $port;
|
|
||||||
|
|
||||||
$skiplist->{$port} = [ $vlan, $mac ] # remember neighbor port mac
|
|
||||||
if exists $port_macs->{$mac};
|
|
||||||
next MAC;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
# here, the MAC is known as belonging to a device switchport
|
|
||||||
if (exists $port_macs->{$mac}) {
|
|
||||||
my $switch_ip = $port_macs->{$mac};
|
|
||||||
if ($device->ip eq $switch_ip) {
|
|
||||||
debug sprintf
|
|
||||||
' [%s] macsuck %s - port %s connects to self - skipping.',
|
|
||||||
$device->ip, $mac, $port;
|
|
||||||
next MAC;
|
|
||||||
}
|
|
||||||
|
|
||||||
debug sprintf ' [%s] macsuck %s - port %s is probably an uplink',
|
|
||||||
$device->ip, $mac, $port;
|
|
||||||
$device_port->update({is_uplink => \'true'});
|
|
||||||
|
|
||||||
# neighbor exists and Netdisco can speak to it, so we don't want
|
|
||||||
# its MAC address. however don't add to skiplist as that would
|
|
||||||
# clear all other MACs on the port.
|
|
||||||
next MAC if $neigh_cannot_macsuck;
|
|
||||||
|
|
||||||
# when there's no CDP/LLDP, we only want to gather macs at the
|
|
||||||
# topology edge, hence skip ports with known device macs.
|
|
||||||
if (not setting('macsuck_bleed')) {
|
|
||||||
debug sprintf ' [%s] macsuck %s - adding port %s to skiplist',
|
|
||||||
$device->ip, $mac, $port;
|
|
||||||
|
|
||||||
$skiplist->{$port} = [ $vlan, $mac ]; # remember for later
|
|
||||||
next MAC;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
# possibly move node to lag master
|
|
||||||
if (defined $device_port->slave_of
|
|
||||||
and exists $device_ports->{$device_port->slave_of}) {
|
|
||||||
my $parent = $device_port->slave_of;
|
|
||||||
$device_ports->{$parent}->update({is_uplink => \'true'});
|
|
||||||
|
|
||||||
# VLAN subinterfaces can be set uplink,
|
|
||||||
# but we don't want to move nodes there (so check is_master).
|
|
||||||
$port = $parent if $device_ports->{$parent}->is_master;
|
|
||||||
}
|
|
||||||
|
|
||||||
++$cache->{$vlan}->{$port}->{$mac};
|
++$cache->{$vlan}->{$port}->{$mac};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return $cache;
|
||||||
|
}
|
||||||
|
|
||||||
|
sub sanity_macs {
|
||||||
|
my ($device, $cache, $device_ports) = @_;
|
||||||
|
|
||||||
|
# note any of the MACs which are actually device or device_port MACs
|
||||||
|
# used to spot uplink ports (neighborport)
|
||||||
|
my @fw_mac_list = ();
|
||||||
|
foreach my $vlan (keys %{ $cache }) {
|
||||||
|
foreach my $port (keys %{ $cache->{$vlan} }) {
|
||||||
|
push @fw_mac_list, keys %{ $cache->{$vlan}->{$port} };
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@fw_mac_list = List::MoreUtils::uniq( @fw_mac_list );
|
||||||
|
my $port_macs = get_port_macs(\@fw_mac_list);
|
||||||
|
|
||||||
|
my $neighborport = {}; # ports through which we can see another device
|
||||||
|
my $ignoreport = {}; # ports suppressed by macsuck_no_deviceports
|
||||||
|
|
||||||
|
if (scalar @{ setting('macsuck_no_deviceports') }) {
|
||||||
|
my @ignoremaps = @{ setting('macsuck_no_deviceports') };
|
||||||
|
|
||||||
|
foreach my $map (@ignoremaps) {
|
||||||
|
next unless ref {} eq ref $map;
|
||||||
|
|
||||||
|
foreach my $key (sort keys %$map) {
|
||||||
|
# lhs matches device, rhs matches port
|
||||||
|
next unless check_acl_only($device, $key);
|
||||||
|
|
||||||
|
foreach my $port (keys %{ $device_ports }) {
|
||||||
|
next unless check_acl_only($device_ports->{$port}, $map->{$key});
|
||||||
|
|
||||||
|
++$ignoreport->{$port};
|
||||||
|
debug sprintf ' [%s] macsuck %s - port suppressed by macsuck_no_deviceports',
|
||||||
|
$device->ip, $port;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
foreach my $vlan (keys %{ $cache }) {
|
||||||
|
foreach my $port (keys %{ $cache->{$vlan} }) {
|
||||||
|
MAC: foreach my $mac (keys %{ $cache->{$vlan}->{$port} }) {
|
||||||
|
|
||||||
|
unless (check_mac($mac, $device)) {
|
||||||
|
delete $cache->{$vlan}->{$port}->{$mac};
|
||||||
|
next MAC;
|
||||||
|
}
|
||||||
|
|
||||||
|
# this uses the cached $ports resultset to limit hits on the db
|
||||||
|
my $device_port = $device_ports->{$port};
|
||||||
|
|
||||||
|
# WRT #475 ... see? :-)
|
||||||
|
unless (defined $device_port) {
|
||||||
|
debug sprintf
|
||||||
|
' [%s] macsuck %s - port %s is not in database - skipping.',
|
||||||
|
$device->ip, $mac, $port;
|
||||||
|
delete $cache->{$vlan}->{$port}->{$mac};
|
||||||
|
next MAC;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (exists $ignoreport->{$port}) {
|
||||||
|
# stash in the neighborport so that node search works for neighbor
|
||||||
|
# (besides this, neighborport is not used for ignoreport ports)
|
||||||
|
$neighborport->{$port} = [ $vlan, $mac ] if exists $port_macs->{$mac};
|
||||||
|
|
||||||
|
debug sprintf
|
||||||
|
' [%s] macsuck %s - port %s is suppressed by config - skipping.',
|
||||||
|
$device->ip, $mac, $port;
|
||||||
|
delete $cache->{$vlan}->{$port}->{$mac};
|
||||||
|
next MAC;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (exists $neighborport->{$port}) {
|
||||||
|
debug sprintf
|
||||||
|
' [%s] macsuck %s - seen another device thru port %s - skipping.',
|
||||||
|
$device->ip, $mac, $port;
|
||||||
|
delete $cache->{$vlan}->{$port}->{$mac};
|
||||||
|
next MAC;
|
||||||
|
}
|
||||||
|
|
||||||
|
# check to see if the port is connected to another device
|
||||||
|
# and if we have that device in the database.
|
||||||
|
|
||||||
|
# carefully be aware: "uplink" here means "connected to another device"
|
||||||
|
# it does _not_ mean that the user wants nodes gathered on the remote dev.
|
||||||
|
|
||||||
|
# we have two ways to detect "uplink" port status:
|
||||||
|
# * a neighbor was discovered using CDP/LLDP
|
||||||
|
# * a mac addr is seen which belongs to any device port/interface
|
||||||
|
|
||||||
|
# allow to gather MACs on upstream (local) port for some kinds
|
||||||
|
# of device that do not expose MAC address tables via SNMP.
|
||||||
|
# relies on prefetched neighbors otherwise it would kill the DB
|
||||||
|
# with device lookups.
|
||||||
|
|
||||||
|
my $neigh_cannot_macsuck = eval { # can fail
|
||||||
|
check_acl_no(($device_port->neighbor || "0 but true"), 'macsuck_unsupported') ||
|
||||||
|
match_to_setting($device_port->remote_type, 'macsuck_unsupported_type') };
|
||||||
|
|
||||||
|
# here, is_uplink comes from Discover::Neighbors finding LLDP remnants
|
||||||
|
if ($device_port->is_uplink) {
|
||||||
|
if ($neigh_cannot_macsuck) {
|
||||||
|
debug sprintf
|
||||||
|
' [%s] macsuck %s - port %s neighbor %s without macsuck support',
|
||||||
|
$device->ip, $mac, $port,
|
||||||
|
(eval { $device_port->neighbor->ip }
|
||||||
|
|| ($device_port->remote_ip
|
||||||
|
|| $device_port->remote_id || '?'));
|
||||||
|
# continue!!
|
||||||
|
}
|
||||||
|
elsif (my $neighbor = $device_port->neighbor) {
|
||||||
|
debug sprintf
|
||||||
|
' [%s] macsuck %s - port %s has neighbor %s - skipping.',
|
||||||
|
$device->ip, $mac, $port, $neighbor->ip;
|
||||||
|
delete $cache->{$vlan}->{$port}->{$mac};
|
||||||
|
next MAC;
|
||||||
|
}
|
||||||
|
elsif (my $remote = $device_port->remote_ip) {
|
||||||
|
debug sprintf
|
||||||
|
' [%s] macsuck %s - port %s has undiscovered neighbor %s',
|
||||||
|
$device->ip, $mac, $port, $remote;
|
||||||
|
# continue!!
|
||||||
|
}
|
||||||
|
elsif (not setting('macsuck_bleed')) {
|
||||||
|
debug sprintf
|
||||||
|
' [%s] macsuck %s - port %s is detected uplink - skipping.',
|
||||||
|
$device->ip, $mac, $port;
|
||||||
|
|
||||||
|
$neighborport->{$port} = [ $vlan, $mac ] # remember neighbor port mac
|
||||||
|
if exists $port_macs->{$mac};
|
||||||
|
delete $cache->{$vlan}->{$port}->{$mac};
|
||||||
|
next MAC;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# here, the MAC is known as belonging to a device switchport
|
||||||
|
if (exists $port_macs->{$mac}) {
|
||||||
|
my $switch_ip = $port_macs->{$mac};
|
||||||
|
if ($device->ip eq $switch_ip) {
|
||||||
|
debug sprintf
|
||||||
|
' [%s] macsuck %s - port %s connects to self - skipping.',
|
||||||
|
$device->ip, $mac, $port;
|
||||||
|
delete $cache->{$vlan}->{$port}->{$mac};
|
||||||
|
next MAC;
|
||||||
|
}
|
||||||
|
|
||||||
|
debug sprintf ' [%s] macsuck %s - port %s is probably an uplink',
|
||||||
|
$device->ip, $mac, $port;
|
||||||
|
$device_port->update({is_uplink => \'true'});
|
||||||
|
|
||||||
|
if ($neigh_cannot_macsuck) {
|
||||||
|
# neighbor exists and Netdisco can speak to it, so we don't want
|
||||||
|
# its MAC address. however don't add to neighborport as that would
|
||||||
|
# clear all other MACs on the port.
|
||||||
|
delete $cache->{$vlan}->{$port}->{$mac};
|
||||||
|
next MAC;
|
||||||
|
}
|
||||||
|
|
||||||
|
# when there's no CDP/LLDP, we only want to gather macs at the
|
||||||
|
# topology edge, hence skip ports with known device macs.
|
||||||
|
if (not setting('macsuck_bleed')) {
|
||||||
|
debug sprintf ' [%s] macsuck %s - port %s is at topology edge',
|
||||||
|
$device->ip, $mac, $port;
|
||||||
|
|
||||||
|
$neighborport->{$port} = [ $vlan, $mac ]; # remember for later
|
||||||
|
delete $cache->{$vlan}->{$port}->{$mac};
|
||||||
|
next MAC;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# possibly move node to lag master
|
||||||
|
if (defined $device_port->slave_of
|
||||||
|
and exists $device_ports->{$device_port->slave_of}) {
|
||||||
|
|
||||||
|
my $parent = $device_port->slave_of;
|
||||||
|
$device_ports->{$parent}->update({is_uplink => \'true'});
|
||||||
|
|
||||||
|
# VLAN subinterfaces can be set uplink,
|
||||||
|
# but we don't want to move nodes there (so check is_master).
|
||||||
|
if ($device_ports->{$parent}->is_master) {
|
||||||
|
delete $cache->{$vlan}->{$port}->{$mac};
|
||||||
|
++$cache->{$vlan}->{$parent}->{$mac};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
# restore MACs of neighbor devices.
|
# restore MACs of neighbor devices.
|
||||||
# this is when we have a "possible uplink" detected but we still want to
|
# this is when we have a "possible uplink" detected but we still want to
|
||||||
# record the single MAC of the neighbor device so it works in Node search.
|
# record the single MAC of the neighbor device so it works in Node search.
|
||||||
foreach my $port (keys %$skiplist) {
|
foreach my $port (keys %$neighborport) {
|
||||||
my ($vlan, $mac) = @{ $skiplist->{$port} };
|
my ($vlan, $mac) = @{ $neighborport->{$port} };
|
||||||
delete $cache->{$_}->{$port} for keys %$cache; # nuke nodes on all VLANs
|
delete $cache->{$_}->{$port} for keys %$cache; # nuke nodes on all VLANs
|
||||||
++$cache->{$vlan}->{$port}->{$mac};
|
++$cache->{$vlan}->{$port}->{$mac};
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -51,7 +51,7 @@ sub run {
|
|||||||
|
|
||||||
# per-device action but no device creds available
|
# per-device action but no device creds available
|
||||||
return $job->add_status( Status->defer('deferred job with no device creds') )
|
return $job->add_status( Status->defer('deferred job with no device creds') )
|
||||||
if 0 == scalar @newuserconf && $job->action ne "delete";
|
if 0 == scalar @newuserconf && $self->transport_required;
|
||||||
}
|
}
|
||||||
|
|
||||||
# back up and restore device_auth
|
# back up and restore device_auth
|
||||||
@@ -83,6 +83,7 @@ sub run {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
|
debug sprintf '%s: running with no timeout', $job->action;
|
||||||
$runner->($self, $job);
|
$runner->($self, $job);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -96,9 +96,9 @@ A numeric constant for the status, to allow comparison.
|
|||||||
|
|
||||||
sub level {
|
sub level {
|
||||||
my $self = shift;
|
my $self = shift;
|
||||||
return (($self->status eq 'done') ? 4
|
return (($self->status eq 'error') ? 4
|
||||||
: ($self->status eq 'defer') ? 3
|
: ($self->status eq 'done') ? 3
|
||||||
: ($self->status eq 'error') ? 2
|
: ($self->status eq 'defer') ? 2
|
||||||
: ($self->status eq 'info') ? 1 : 0);
|
: ($self->status eq 'info') ? 1 : 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -484,6 +484,7 @@ worker_plugins:
|
|||||||
extra_worker_plugins: []
|
extra_worker_plugins: []
|
||||||
|
|
||||||
driver_priority:
|
driver_priority:
|
||||||
|
direct: 1000000
|
||||||
restconf: 500
|
restconf: 500
|
||||||
netconf: 400
|
netconf: 400
|
||||||
eapi: 300
|
eapi: 300
|
||||||
|
|||||||
@@ -19,7 +19,7 @@ use Dancer qw/:moose :script !pass/;
|
|||||||
# configure logging to force console output
|
# configure logging to force console output
|
||||||
my $CONFIG = config();
|
my $CONFIG = config();
|
||||||
$CONFIG->{logger} = 'console';
|
$CONFIG->{logger} = 'console';
|
||||||
$CONFIG->{log} = 'error';
|
$CONFIG->{log} = ($ENV{'DANCER_DEBUG'} ? 'debug' : 'error');
|
||||||
Dancer::Logger->init('console', $CONFIG);
|
Dancer::Logger->init('console', $CONFIG);
|
||||||
|
|
||||||
{
|
{
|
||||||
@@ -93,6 +93,20 @@ is($j8->_last_priority, 100, 'priority is for snmp');
|
|||||||
is($j8->log, 'OK: SNMP driver is successful.',
|
is($j8->log, 'OK: SNMP driver is successful.',
|
||||||
'add to an action');
|
'add to an action');
|
||||||
|
|
||||||
|
config->{'device_auth'} = [];
|
||||||
|
|
||||||
|
my $j9 = do_job('TestSix');
|
||||||
|
is($j9->status, 'done', 'status is done');
|
||||||
|
is((scalar @{$j9->_statuslist}), 3, 'three workers ran');
|
||||||
|
is($j9->_last_priority, 0, 'priority is for driverless action');
|
||||||
|
is($j9->log, 'OK: second driverless action is successful.',
|
||||||
|
'driverless actions always run');
|
||||||
|
|
||||||
|
my $j9 = do_job('TestSeven');
|
||||||
|
is($j9->best_status, 'error', 'status is error');
|
||||||
|
is((scalar @{$j9->_statuslist}), 2, 'two workers ran');
|
||||||
|
is($j9->_last_priority, 1000000, 'priority is for direct action');
|
||||||
|
|
||||||
done_testing;
|
done_testing;
|
||||||
|
|
||||||
# TESTS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
# TESTS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|||||||
@@ -6,11 +6,11 @@ use aliased 'App::Netdisco::Worker::Status';
|
|||||||
|
|
||||||
# info 'test: add to an action';
|
# info 'test: add to an action';
|
||||||
|
|
||||||
register_worker({ phase => 'main', driver => 'snmp' }, sub {
|
register_worker({ phase => 'main', driver => 'snmp', title => 'NOT OK' }, sub {
|
||||||
return Status->error('NOT OK: additional worker at SNMP level.');
|
return Status->done('NOT OK: additional worker at SNMP level.');
|
||||||
});
|
});
|
||||||
|
|
||||||
register_worker({ phase => 'main', driver => 'snmp' }, sub {
|
register_worker({ phase => 'main', driver => 'snmp', title => 'OK' }, sub {
|
||||||
return Status->done('OK: SNMP driver is successful.');
|
return Status->done('OK: SNMP driver is successful.');
|
||||||
});
|
});
|
||||||
|
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ use aliased 'App::Netdisco::Worker::Status';
|
|||||||
# info 'test: workers are run in decreasing priority until done';
|
# info 'test: workers are run in decreasing priority until done';
|
||||||
|
|
||||||
register_worker({ phase => 'main', driver => 'cli' }, sub {
|
register_worker({ phase => 'main', driver => 'cli' }, sub {
|
||||||
return Status->noop('NOT OK: CLI driver is not the winner here.');
|
return Status->info('NOT OK: CLI driver is not the winner here.');
|
||||||
});
|
});
|
||||||
|
|
||||||
register_worker({ phase => 'main', driver => 'snmp' }, sub {
|
register_worker({ phase => 'main', driver => 'snmp' }, sub {
|
||||||
|
|||||||
17
xt/lib/App/NetdiscoX/Worker/Plugin/TestSeven.pm
Normal file
17
xt/lib/App/NetdiscoX/Worker/Plugin/TestSeven.pm
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
package App::NetdiscoX::Worker::Plugin::TestSeven;
|
||||||
|
|
||||||
|
use Dancer ':syntax';
|
||||||
|
use App::Netdisco::Worker::Plugin;
|
||||||
|
use aliased 'App::Netdisco::Worker::Status';
|
||||||
|
|
||||||
|
# info 'test: add to an action';
|
||||||
|
|
||||||
|
register_worker({ phase => 'main', driver => 'direct', title => 'cancelled' }, sub {
|
||||||
|
return (shift)->cancel('NOT OK: cancelled job at direct level.');
|
||||||
|
});
|
||||||
|
|
||||||
|
register_worker({ phase => 'main', driver => 'snmp', title => 'OK' }, sub {
|
||||||
|
return Status->done('OK: SNMP driver is successful.');
|
||||||
|
});
|
||||||
|
|
||||||
|
true;
|
||||||
21
xt/lib/App/NetdiscoX/Worker/Plugin/TestSix.pm
Normal file
21
xt/lib/App/NetdiscoX/Worker/Plugin/TestSix.pm
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
package App::NetdiscoX::Worker::Plugin::TestSix;
|
||||||
|
|
||||||
|
use Dancer ':syntax';
|
||||||
|
use App::Netdisco::Worker::Plugin;
|
||||||
|
use aliased 'App::Netdisco::Worker::Status';
|
||||||
|
|
||||||
|
# info 'test: driverless actions always run';
|
||||||
|
|
||||||
|
register_worker({ phase => 'main', title => 'first driverless action' }, sub {
|
||||||
|
return Status->done('OK: first driverless action is successful.');
|
||||||
|
});
|
||||||
|
|
||||||
|
register_worker({ phase => 'main', driver => 'snmp', title => 'worker at SNMP' }, sub {
|
||||||
|
return Status->error('NOT OK: additional worker at SNMP level.');
|
||||||
|
});
|
||||||
|
|
||||||
|
register_worker({ phase => 'main', title => 'second driverless action' }, sub {
|
||||||
|
return Status->done('OK: second driverless action is successful.');
|
||||||
|
});
|
||||||
|
|
||||||
|
true;
|
||||||
Reference in New Issue
Block a user