support loading the full CDR graph, featuring all relations:
-cdr_groups
-cdr_tags
-cdr_mos
-cdr_status
-cdr_relations
-cdr_presentity
-cdr_cash_balances
-cdr_time_balances
aside various fixes, NGCP::BulkProcessor::Closure is introduced,
which provides uniform code execution:
-perl code (from YAML)
-perl code (from string)
-javascript (from string)
an identical symbol table is exposed to either
language env, including the SQL connector api.
Change-Id: If10422df33d996fb6a6a6b53d0ead28ea1cef755
(cherry picked from commit 2f56063aff
)
mr12.5
parent
fccb7ec8d8
commit
01cbb4a949
@ -0,0 +1,551 @@
|
||||
package NGCP::BulkProcessor::Closure;
|
||||
use strict;
|
||||
|
||||
use warnings;
|
||||
|
||||
no warnings 'uninitialized'; ## no critic (ProhibitNoWarnings)
|
||||
|
||||
use Scalar::Util qw(blessed reftype);
|
||||
use Eval::Closure qw(eval_closure);
|
||||
my $eval_closure_make_lexical_assignment = sub {
|
||||
my ($key, $index, $alias) = @_;
|
||||
my $sigil = substr($key, 0, 1);
|
||||
my $name = substr($key, 1);
|
||||
if (Eval::Closure::HAS_LEXICAL_SUBS && $sigil eq '&') {
|
||||
my $tmpname = '$__' . $name . '__' . $index;
|
||||
return 'use feature "lexical_subs"; '
|
||||
. 'no warnings "experimental::lexical_subs"; '
|
||||
. 'my ' . $tmpname . ' = $_[' . $index . ']; '
|
||||
. 'my sub ' . $name . ' { goto ' . $tmpname . ' }';
|
||||
}
|
||||
if ($alias) {
|
||||
return 'my ' . $key . ';';
|
||||
}
|
||||
else {
|
||||
return 'my ' . $key . ' = ' . '$_[' . $index . '];';
|
||||
#return 'my ' . $key . ' = ' . $sigil . '{$_[' . $index . ']};';
|
||||
}
|
||||
};
|
||||
my $eval_closure_validate_env = sub {
|
||||
my ($env) = @_;
|
||||
|
||||
croak("The 'environment' parameter must be a hashref")
|
||||
unless reftype($env) eq 'HASH';
|
||||
|
||||
for my $var (keys %$env) {
|
||||
if (Eval::Closure::HAS_LEXICAL_SUBS) {
|
||||
croak("Environment key '$var' should start with \@, \%, \$, or \&")
|
||||
if index('$@%&', substr($var, 0, 1)) < 0;
|
||||
}
|
||||
else {
|
||||
croak("Environment key '$var' should start with \@, \%, or \$")
|
||||
if index('$@%', substr($var, 0, 1)) < 0;
|
||||
}
|
||||
#croak("Environment values must be references, not $env->{$var}")
|
||||
# unless ref($env->{$var});
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
#use JE::Destroyer qw();
|
||||
use JE qw();
|
||||
|
||||
{
|
||||
no warnings 'redefine'; ## no critic (ProhibitNoWarnings)
|
||||
*JE::Object::evall = sub {
|
||||
no warnings; ## no critic (ProhibitNoWarnings)
|
||||
my $global = shift;
|
||||
my $v = shift;
|
||||
my $r = eval 'local *_;' . $v; ## no critic (ProhibitStringyEval)
|
||||
if ($@) {
|
||||
my $e = $@;
|
||||
$r = eval "local *_;'$v'"; ## no critic (ProhibitStringyEval)
|
||||
if ($@) {
|
||||
die;
|
||||
}
|
||||
}
|
||||
$r;
|
||||
};
|
||||
}
|
||||
|
||||
use JSON qw();
|
||||
|
||||
use YAML::Types;
|
||||
{
|
||||
no warnings 'redefine'; ## no critic (ProhibitNoWarnings)
|
||||
*YAML::Type::code::yaml_load = sub {
|
||||
my $self = shift;
|
||||
my ($node, $class, $loader) = @_;
|
||||
if ($loader->load_code) {
|
||||
$node = "sub $node" unless $node =~ /^\s*sub/; #upstream backward compat
|
||||
my $code = eval "package yamlmain; no strict 'vars'; $node"; ## no critic (ProhibitStringyEval)
|
||||
if ($@) {
|
||||
die ($@);
|
||||
#$loader->warn('YAML_LOAD_WARN_PARSE_CODE', $@);
|
||||
#return sub {};
|
||||
}
|
||||
else {
|
||||
CORE::bless $code, $class if ($class and $YAML::LoadBlessed);
|
||||
return $code;
|
||||
}
|
||||
}
|
||||
else {
|
||||
return CORE::bless sub {}, $class if ($class and $YAML::LoadBlessed);
|
||||
return sub {};
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
use NGCP::BulkProcessor::SqlConnector qw();
|
||||
|
||||
use NGCP::BulkProcessor::Array qw(array_to_map);
|
||||
|
||||
require Exporter;
|
||||
our @ISA = qw(Exporter);
|
||||
our @EXPORT_OK = qw(
|
||||
closure
|
||||
cleanup
|
||||
is_code
|
||||
clear_stash
|
||||
);
|
||||
|
||||
(my $DISABLED_CORE_FUNCTION_MAP, undef, undef) = array_to_map([ qw(
|
||||
binmode close closedir dbmclose dbmopen eof fileno flock format getc read
|
||||
readdir rewinddir say seek seekdir select syscall sysread sysseek
|
||||
syswrite tell telldir truncate write print printf
|
||||
|
||||
chdir chmod chown chroot fcntl glob ioctl link lstat mkdir open opendir readlink
|
||||
rename rmdir stat symlink sysopen umask unlink utime
|
||||
|
||||
alarm exec fork getpgrp getppid getpriority kill pipe setpgrp setpriority sleep
|
||||
system times wait waitpid
|
||||
|
||||
accept bind connect getpeername getsockname getsockopt listen recv send setsockopt
|
||||
shutdown socket socketpair
|
||||
|
||||
msgctl msgget msgrcv msgsnd semctl semget semop shmctl shmget shmread shmwrite
|
||||
|
||||
endgrent endhostent endnetent endpwent getgrent getgrgid getgrnam getlogin getpwent
|
||||
getpwnam getpwuid setgrent setpwent
|
||||
|
||||
endprotoent endservent gethostbyaddr gethostbyname gethostent getnetbyaddr
|
||||
getnetbyname getnetent getprotobyname getprotobynumber getprotoent getservbyname
|
||||
getservbyport getservent sethostent setnetent setprotoent setservent
|
||||
|
||||
exit goto
|
||||
)], sub { return shift; }, sub { return 1; }, 'last');
|
||||
|
||||
my @DISABLED_CORE_FUNCTIONS = grep { $DISABLED_CORE_FUNCTION_MAP->{$_}; } keys %$DISABLED_CORE_FUNCTION_MAP;
|
||||
|
||||
my $PERL_ENV = 'use subs qw(' . join(' ', @DISABLED_CORE_FUNCTIONS) . ");\n";
|
||||
foreach my $f (@DISABLED_CORE_FUNCTIONS) {
|
||||
$PERL_ENV .= 'sub ' . $f . " { die('$f called'); }\n";
|
||||
}
|
||||
|
||||
my $JS_ENV = '';
|
||||
|
||||
my $JE_ANON_CLASS = 'je_anon';
|
||||
sub je_anon::TO_JSON {
|
||||
return _unbless(@_);
|
||||
};
|
||||
|
||||
my %interpreter_cache = ();
|
||||
my %stash = ();
|
||||
my %je_exported_map = ();
|
||||
|
||||
sub _stash_get {
|
||||
my $k = shift;
|
||||
return $stash{$k} if $k;
|
||||
}
|
||||
sub _stash_set {
|
||||
my ($k,$v) = @_;
|
||||
$stash{$k} = $v if $k;
|
||||
}
|
||||
|
||||
sub cleanup {
|
||||
|
||||
eval {
|
||||
#no warnings 'deprecated';
|
||||
require JE::Destroyer;
|
||||
JE::Destroyer->import();
|
||||
1;
|
||||
} or do {
|
||||
return;
|
||||
};
|
||||
clear_stash();
|
||||
foreach my $code (keys %interpreter_cache) {
|
||||
JE::Destroyer::destroy($interpreter_cache{$code}) if 'JE' eq ref $interpreter_cache{$code}; # break circular refs
|
||||
delete $interpreter_cache{$code};
|
||||
delete $je_exported_map{$code};
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
sub clear_stash {
|
||||
|
||||
%stash = ();
|
||||
|
||||
}
|
||||
|
||||
sub new {
|
||||
|
||||
my $class = shift;
|
||||
my $self = bless {}, $class;
|
||||
|
||||
my ($code,$context,$description) = @_;
|
||||
|
||||
$self->{description} = $description;
|
||||
if ('CODE' eq ref $code) {
|
||||
$self->{description} //= 'coderef';
|
||||
$self->{type} = "coderef";
|
||||
$self->{exported_map} = ();
|
||||
foreach my $key (_get_public_vars($context = {
|
||||
get_env => sub {
|
||||
return _filter_perl_env_symbols(keys %yamlmain::);
|
||||
},
|
||||
to_json => \&_unbless_to_json,
|
||||
stash_get => \&_stash_get,
|
||||
stash_set => \&_stash_set,
|
||||
%{$context // {}},
|
||||
})) {
|
||||
_register_closure_var($key,$context->{$key});
|
||||
$self->{exported_map}->{$key} = 1;
|
||||
}
|
||||
$self->{code} = $code;
|
||||
} elsif ($code =~ /^\s*sub/) { #perl
|
||||
$self->{source} = $code;
|
||||
$self->{description} //= 'perl function';
|
||||
$self->{type} = "perl";
|
||||
unless (exists $interpreter_cache{$code}) {
|
||||
local *Eval::Closure::_make_lexical_assignment = $eval_closure_make_lexical_assignment;
|
||||
local *Eval::Closure::_validate_env = $eval_closure_validate_env;
|
||||
my @exported = ();
|
||||
eval {
|
||||
$interpreter_cache{$code} = eval_closure(
|
||||
source => ($PERL_ENV . $code),
|
||||
environment => {
|
||||
map { if ('ARRAY' eq ref $context->{$_}) {
|
||||
push(@exported,$_);
|
||||
('$' . $_) => $context->{$_};
|
||||
} elsif ('HASH' eq ref $context->{$_}) {
|
||||
push(@exported,$_);
|
||||
('$' . $_) => $context->{$_};
|
||||
} elsif ($JE_ANON_CLASS eq ref $context->{$_}) {
|
||||
push(@exported,$_);
|
||||
('$' . $_) => _unbless($context->{$_});
|
||||
} elsif ('CODE' eq ref $context->{$_}) {
|
||||
push(@exported,$_);
|
||||
('&' . $_) => $context->{$_};
|
||||
} elsif (ref $context->{$_}) {
|
||||
push(@exported,$_);
|
||||
('$' . $_) => $context->{$_};
|
||||
} else {
|
||||
push(@exported,$_);
|
||||
('$' . $_) => $context->{$_};
|
||||
} } _get_public_vars($context = {
|
||||
get_env => sub {
|
||||
no strict "refs"; ## no critic (ProhibitNoStrict)
|
||||
return (@exported,_filter_perl_env_symbols(keys %{caller() .'::'}));
|
||||
},
|
||||
to_json => \&_unbless_to_json,
|
||||
stash_get => \&_stash_get,
|
||||
stash_set => \&_stash_set,
|
||||
%{$context // {}},
|
||||
})
|
||||
},
|
||||
terse_error => 1,
|
||||
description => $self->{description},
|
||||
alias => 0,
|
||||
);
|
||||
};
|
||||
if ($@) {
|
||||
die("$self->{description}: " . $@);
|
||||
}
|
||||
}
|
||||
} elsif ($code =~ /^\s*function/) { #javascript
|
||||
$self->{source} = $code;
|
||||
$self->{description} //= 'javascript function';
|
||||
$self->{type} = "js";
|
||||
my $je;
|
||||
if (exists $interpreter_cache{$code}) {
|
||||
$je = $interpreter_cache{$code};
|
||||
} else {
|
||||
$je_exported_map{$code} = {};
|
||||
$je = JE->new();
|
||||
$je->eval($JS_ENV . "\nvar _func = " . $code . ';');
|
||||
$interpreter_cache{$code} = $je;
|
||||
}
|
||||
$je->eval(_serialize_je_args($je,{
|
||||
get_env => sub {
|
||||
return [ _filter_js_env_symbols(keys %$je) ];
|
||||
},
|
||||
to_json => sub {
|
||||
my ($obj,$pretty, $canonical) = @_;
|
||||
return _to_json(_unbox_je_value($obj), _unbox_je_value($pretty), _unbox_je_value($canonical));
|
||||
},
|
||||
quotemeta => sub {
|
||||
my $s = shift;
|
||||
return quotemeta(_unbox_je_value($s));
|
||||
},
|
||||
sprintf => sub {
|
||||
my ($f,@p) = @_;
|
||||
return sprintf(_unbox_je_value($f), map {
|
||||
_unbox_je_value($_);
|
||||
} @p);
|
||||
},
|
||||
stash_get => sub {
|
||||
my $k = shift;
|
||||
return _stash_get(_unbox_je_value($k));
|
||||
},
|
||||
stash_set => sub {
|
||||
my ($k,$v) = @_;
|
||||
_stash_set(_unbox_je_value($k),_unbox_je_value($v));
|
||||
},
|
||||
%{$context // {}},
|
||||
},$je_exported_map{$code}));
|
||||
die("$self->{description}: " . $@) if $@;
|
||||
} else {
|
||||
die("unsupported expression langage");
|
||||
}
|
||||
|
||||
return $self;
|
||||
|
||||
}
|
||||
|
||||
sub _register_closure_var {
|
||||
|
||||
my ($key,$value) = @_;
|
||||
# modified globally?
|
||||
no strict "refs"; ## no critic (ProhibitNoStrict)
|
||||
if ('CODE' eq ref $value) {
|
||||
no warnings 'redefine'; ## no critic (ProhibitNoWarnings)
|
||||
*{"yamlmain::$key"} = $value;
|
||||
} else {
|
||||
${"yamlmain::$key"} = $value;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
sub _get_public_vars {
|
||||
|
||||
my $args = shift;
|
||||
return grep { substr($_,0,1) ne '_'; } keys %$args;
|
||||
|
||||
}
|
||||
|
||||
sub _serialize_je_args {
|
||||
|
||||
my ($je,$args,$je_env) = @_;
|
||||
my $sep;
|
||||
my @args;
|
||||
if ('HASH' eq ref $args and $je_env) {
|
||||
$sep = ";\n";
|
||||
@args = map { { k => $_, v => $args->{$_}, }; } _get_public_vars($args);
|
||||
} else {
|
||||
$sep = ",";
|
||||
@args = map { { k => undef, v => $_, }; } @$args;
|
||||
}
|
||||
return join ($sep,map {
|
||||
if ('CODE' eq ref $_->{v}) {
|
||||
if ($_->{k} and not $je_env->{$_->{k}}) {
|
||||
$je_env->{$_->{k}} = 1;
|
||||
my $sub = $_->{v};
|
||||
$je->new_function($_->{k} => sub {
|
||||
return $sub->(map { _unbox_je_value($_); } @_);
|
||||
});
|
||||
}
|
||||
();
|
||||
} elsif (blessed $_->{v} and $_->{v}->isa('NGCP::BulkProcessor::SqlConnector')) {
|
||||
if ($_->{k} and not $je_env->{$_->{k}}) {
|
||||
$je_env->{$_->{k}} = 1;
|
||||
my $db = $_->{v};
|
||||
no strict 'refs'; ## no critic (ProhibitNoStrict)
|
||||
foreach my $k (keys %NGCP::BulkProcessor::SqlConnector::) {
|
||||
next unless substr($k,0,3) eq "db_";
|
||||
if (exists &{"NGCP::BulkProcessor::SqlConnector::$k"}) { # check if symbol is method
|
||||
$je->new_function($k => sub {
|
||||
return $db->$k(map { _unbox_je_value($_); } @_);
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
();
|
||||
} elsif (('ARRAY' eq ref $_->{v})
|
||||
or ('HASH' eq ref $_->{v})
|
||||
or ($JE_ANON_CLASS eq ref $_->{v})) {
|
||||
if (not $_->{k}) {
|
||||
_to_json($_->{v});
|
||||
} elsif ($je_env->{$_->{k}}) {
|
||||
$_->{k} . ' = ' . _to_json($_->{v});
|
||||
} else {
|
||||
$je_env->{$_->{k}} = 1;
|
||||
'var ' . $_->{k} . ' = ' . _to_json($_->{v});
|
||||
}
|
||||
} elsif (('ARRAY' eq reftype($_->{v}))
|
||||
or ('HASH' eq reftype($_->{v}))) {
|
||||
if (not $_->{k}) {
|
||||
_unbless_to_json($_->{v});
|
||||
} elsif ($je_env->{$_->{k}}) {
|
||||
$_->{k} . ' = ' . _unbless_to_json($_->{v});
|
||||
} else {
|
||||
$je_env->{$_->{k}} = 1;
|
||||
'var ' . $_->{k} . ' = ' . _unbless_to_json($_->{v});
|
||||
}
|
||||
} elsif (ref $_->{v}) {
|
||||
warn((ref $_->{v}) . ' objects not available in javascript');
|
||||
} else {
|
||||
if (not $_->{k}) {
|
||||
"'" . _escape_js($_->{v}) . "'";
|
||||
} elsif ($je_env->{$_->{k}}) {
|
||||
$_->{k} . " = '" . _escape_js($_->{v}) . "'";
|
||||
} else {
|
||||
$je_env->{$_->{k}} = 1;
|
||||
'var ' . $_->{k} . " = '" . _escape_js($_->{v}) . "'";
|
||||
}
|
||||
}
|
||||
} @args);
|
||||
|
||||
}
|
||||
|
||||
sub calc {
|
||||
|
||||
my $self = shift;
|
||||
my $context = shift;
|
||||
my @v;
|
||||
if ("coderef" eq $self->{type}) {
|
||||
foreach my $key (_get_public_vars($context)) {
|
||||
unless ($self->{exported_map}->{$key}) {
|
||||
_register_closure_var($key,$context->{$key});
|
||||
$self->{exported_map}->{$key} = 1;
|
||||
}
|
||||
}
|
||||
eval {
|
||||
@v = $self->{code}->(@_);
|
||||
$v[0] = _unbless($v[0]) if ($JE_ANON_CLASS eq ref $v[0]);
|
||||
};
|
||||
if ($@) {
|
||||
die("$self->{description}: " . $@);
|
||||
}
|
||||
} elsif ("perl" eq $self->{type}) {
|
||||
@v = $interpreter_cache{$self->{source}}->(@_);
|
||||
$v[0] = _unbless($v[0]) if ($JE_ANON_CLASS eq ref $v[0]);
|
||||
if ($@) {
|
||||
die("$self->{description}: " . $@);
|
||||
}
|
||||
} elsif ("js" eq $self->{type}) {
|
||||
my $je = $interpreter_cache{$self->{source}};
|
||||
my $updated_je_env = '';
|
||||
$updated_je_env = _serialize_je_args($je,$context,$je_exported_map{$self->{source}}) if $context;
|
||||
$updated_je_env .= ";\n" if length($updated_je_env);
|
||||
my $call;
|
||||
if (scalar @_) {
|
||||
$call = "_func(" . _serialize_je_args($je,[ @_ ],$je_exported_map{$self->{source}}) . ");";
|
||||
} else {
|
||||
$call = "_func();"
|
||||
}
|
||||
$v[0] = _unbox_je_value($interpreter_cache{$self->{source}}->eval($updated_je_env . $call));
|
||||
if ($@) {
|
||||
die("$self->{description}: " . $@);
|
||||
}
|
||||
}
|
||||
|
||||
return @v if wantarray;
|
||||
return $v[0];
|
||||
|
||||
}
|
||||
|
||||
sub is_code {
|
||||
|
||||
my $code = shift;
|
||||
return unless defined $code;
|
||||
if ('CODE' eq ref $code) {
|
||||
return 1;
|
||||
} elsif (not ref $code) {
|
||||
if ($code =~ /^\s*function/) {
|
||||
return 1;
|
||||
} elsif ($code =~ /^\s*sub/) {
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
|
||||
}
|
||||
|
||||
sub _unbox_je_value {
|
||||
|
||||
my $v = shift;
|
||||
return undef unless defined $v; ## no critic (ProhibitExplicitReturnUndef)
|
||||
if ((ref $v) =~ /^JE::/) {
|
||||
$v = $v->value;
|
||||
} elsif ($JE_ANON_CLASS eq ref $v) {
|
||||
$v = _unbless($v);
|
||||
}
|
||||
if ('ARRAY' eq ref $v) {
|
||||
return [ map { _unbox_je_value($_); } @$v ];
|
||||
} elsif ('HASH' eq ref $v) {
|
||||
return { map { $_ => _unbox_je_value($v->{$_}); } keys %$v };
|
||||
} else {
|
||||
return $v;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
sub _unbless {
|
||||
|
||||
my $obj = shift;
|
||||
if ('HASH' eq reftype($obj)) {
|
||||
return { map { $_ => _unbless($obj->{$_}); } keys %$obj };
|
||||
} elsif ('ARRAY' eq reftype($obj)) {
|
||||
return [ map { _unbless($_); } @$obj ];
|
||||
} else {
|
||||
return $obj;
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
sub _escape_js {
|
||||
|
||||
my $str = shift // '';
|
||||
my $quote_char = shift;
|
||||
$quote_char //= "'";
|
||||
$str =~ s/\\/\\\\/g;
|
||||
$str =~ s/$quote_char/\\$quote_char/g;
|
||||
return $str;
|
||||
|
||||
}
|
||||
|
||||
sub _to_json {
|
||||
|
||||
my ($obj,$pretty,$canonical) = @_;
|
||||
return JSON::to_json($obj, {
|
||||
allow_nonref => 1, allow_blessed => 1, allow_unknown => 1,
|
||||
convert_blessed => 1, pretty => $pretty, canonical => $canonical, });
|
||||
|
||||
}
|
||||
|
||||
sub _filter_perl_env_symbols {
|
||||
|
||||
return grep {
|
||||
$_ !~ /^__ANON__/
|
||||
and $_ !~ /^BEGIN/
|
||||
and not (exists $DISABLED_CORE_FUNCTION_MAP->{$_} and $DISABLED_CORE_FUNCTION_MAP->{$_})
|
||||
; } @_;
|
||||
|
||||
}
|
||||
|
||||
sub _filter_js_env_symbols {
|
||||
|
||||
return grep {
|
||||
$_ !~ /^_func/
|
||||
; } @_;
|
||||
|
||||
}
|
||||
|
||||
sub _unbless_to_json {
|
||||
|
||||
my $obj = shift;
|
||||
return _to_json(_unbless($obj),@_);
|
||||
|
||||
}
|
||||
|
||||
1;
|
@ -0,0 +1,145 @@
|
||||
package NGCP::BulkProcessor::Dao::Trunk::accounting::cdr_cash_balance_data;
|
||||
use strict;
|
||||
|
||||
## no critic
|
||||
|
||||
use NGCP::BulkProcessor::Logging qw(
|
||||
getlogger
|
||||
);
|
||||
|
||||
use NGCP::BulkProcessor::ConnectorPool qw(
|
||||
get_accounting_db
|
||||
destroy_dbs
|
||||
);
|
||||
|
||||
use NGCP::BulkProcessor::SqlProcessor qw(
|
||||
checktableinfo
|
||||
copy_row
|
||||
|
||||
insert_record
|
||||
);
|
||||
use NGCP::BulkProcessor::SqlRecord qw();
|
||||
|
||||
use NGCP::BulkProcessor::Dao::Trunk::billing::contract_balances qw();
|
||||
use NGCP::BulkProcessor::Dao::Trunk::accounting::cdr_provider qw();
|
||||
use NGCP::BulkProcessor::Dao::Trunk::accounting::cdr_direction qw();
|
||||
|
||||
require Exporter;
|
||||
our @ISA = qw(Exporter NGCP::BulkProcessor::SqlRecord);
|
||||
our @EXPORT_OK = qw(
|
||||
gettablename
|
||||
settablename
|
||||
check_table
|
||||
|
||||
findby_cdrid
|
||||
|
||||
insert_row
|
||||
);
|
||||
|
||||
my $tablename = 'cdr_cash_balance_data';
|
||||
my $get_db = \&get_accounting_db;
|
||||
|
||||
my $expected_fieldnames = [
|
||||
"cdr_id",
|
||||
"provider_id",
|
||||
"direction_id",
|
||||
"cash_balance_id",
|
||||
"val_before",
|
||||
"val_after",
|
||||
"cdr_start_time",
|
||||
];
|
||||
|
||||
my $indexes = {};
|
||||
|
||||
my $insert_unique_fields = [];
|
||||
|
||||
sub new {
|
||||
|
||||
my $class = shift;
|
||||
my $self = NGCP::BulkProcessor::SqlRecord->new($class,$get_db,
|
||||
$tablename,$expected_fieldnames,$indexes);
|
||||
|
||||
copy_row($self,shift,$expected_fieldnames);
|
||||
|
||||
return $self;
|
||||
|
||||
}
|
||||
|
||||
sub findby_cdrid {
|
||||
|
||||
my ($xa_db,$cdrid,$load_recursive) = @_;
|
||||
|
||||
check_table();
|
||||
my $db = &$get_db();
|
||||
$xa_db //= $db;
|
||||
my $table = $db->tableidentifier($tablename);
|
||||
|
||||
my $stmt = 'SELECT * FROM ' . $table . ' WHERE ' .
|
||||
$db->columnidentifier('cdr_id') . ' = ?';
|
||||
my @params = ($cdrid);
|
||||
my $rows = $xa_db->db_get_all_arrayref($stmt,@params);
|
||||
|
||||
return buildrecords_fromrows($rows,$load_recursive);
|
||||
|
||||
}
|
||||
|
||||
sub insert_row {
|
||||
|
||||
my $db = &$get_db();
|
||||
my $xa_db = shift // $db;
|
||||
|
||||
my ($data,$insert_ignore) = @_;
|
||||
check_table();
|
||||
if (insert_record($db,$xa_db,__PACKAGE__,$data,$insert_ignore,$insert_unique_fields)) {
|
||||
return $xa_db->db_last_insert_id() || 1;
|
||||
}
|
||||
return undef;
|
||||
|
||||
}
|
||||
|
||||
sub buildrecords_fromrows {
|
||||
|
||||
my ($rows,$load_recursive) = @_;
|
||||
|
||||
my @records = ();
|
||||
my $record;
|
||||
|
||||
if (defined $rows and ref $rows eq 'ARRAY') {
|
||||
foreach my $row (@$rows) {
|
||||
$record = __PACKAGE__->new($row);
|
||||
|
||||
# transformations go here ...
|
||||
$record->load_relation($load_recursive,'contract_balance','NGCP::BulkProcessor::Dao::Trunk::billing::contract_balances::findby_id',undef,$record->{cash_balance_id},$load_recursive);
|
||||
$record->load_relation($load_recursive,'direction','NGCP::BulkProcessor::Dao::Trunk::accounting::cdr_direction::findby_id_cached',$record->{direction_id},$load_recursive);
|
||||
$record->load_relation($load_recursive,'provider','NGCP::BulkProcessor::Dao::Trunk::accounting::cdr_provider::findby_id_cached',$record->{provider_id},$load_recursive);
|
||||
|
||||
push @records,$record;
|
||||
}
|
||||
}
|
||||
|
||||
return \@records;
|
||||
|
||||
}
|
||||
|
||||
sub gettablename {
|
||||
|
||||
return $tablename;
|
||||
|
||||
}
|
||||
|
||||
sub settablename {
|
||||
|
||||
$tablename = shift;
|
||||
|
||||
}
|
||||
|
||||
sub check_table {
|
||||
|
||||
return checktableinfo($get_db,
|
||||
__PACKAGE__,$tablename,
|
||||
$expected_fieldnames,
|
||||
$indexes);
|
||||
|
||||
}
|
||||
|
||||
1;
|
@ -0,0 +1,137 @@
|
||||
package NGCP::BulkProcessor::Dao::Trunk::accounting::cdr_mos_data;
|
||||
use strict;
|
||||
|
||||
## no critic
|
||||
|
||||
use NGCP::BulkProcessor::Logging qw(
|
||||
getlogger
|
||||
);
|
||||
|
||||
use NGCP::BulkProcessor::ConnectorPool qw(
|
||||
get_accounting_db
|
||||
destroy_dbs
|
||||
);
|
||||
|
||||
use NGCP::BulkProcessor::SqlProcessor qw(
|
||||
checktableinfo
|
||||
copy_row
|
||||
|
||||
insert_record
|
||||
);
|
||||
use NGCP::BulkProcessor::SqlRecord qw();
|
||||
|
||||
require Exporter;
|
||||
our @ISA = qw(Exporter NGCP::BulkProcessor::SqlRecord);
|
||||
our @EXPORT_OK = qw(
|
||||
gettablename
|
||||
settablename
|
||||
check_table
|
||||
|
||||
findby_cdrid
|
||||
|
||||
insert_row
|
||||
);
|
||||
|
||||
my $tablename = 'cdr_mos_data';
|
||||
my $get_db = \&get_accounting_db;
|
||||
|
||||
my $expected_fieldnames = [
|
||||
"cdr_id",
|
||||
"mos_average",
|
||||
"mos_average_packetloss",
|
||||
"mos_average_jitter",
|
||||
"mos_average_roundtrip",
|
||||
"cdr_start_time",
|
||||
];
|
||||
|
||||
my $indexes = {};
|
||||
|
||||
my $insert_unique_fields = [];
|
||||
|
||||
sub new {
|
||||
|
||||
my $class = shift;
|
||||
my $self = NGCP::BulkProcessor::SqlRecord->new($class,$get_db,
|
||||
$tablename,$expected_fieldnames,$indexes);
|
||||
|
||||
copy_row($self,shift,$expected_fieldnames);
|
||||
|
||||
return $self;
|
||||
|
||||
}
|
||||
|
||||
sub findby_cdrid {
|
||||
|
||||
my ($xa_db,$cdrid,$load_recursive) = @_;
|
||||
|
||||
check_table();
|
||||
my $db = &$get_db();
|
||||
$xa_db //= $db;
|
||||
my $table = $db->tableidentifier($tablename);
|
||||
|
||||
my $stmt = 'SELECT * FROM ' . $table . ' WHERE ' .
|
||||
$db->columnidentifier('cdr_id') . ' = ?';
|
||||
my @params = ($cdrid);
|
||||
my $rows = $xa_db->db_get_all_arrayref($stmt,@params);
|
||||
|
||||
return buildrecords_fromrows($rows,$load_recursive);
|
||||
|
||||
}
|
||||
|
||||
sub insert_row {
|
||||
|
||||
my $db = &$get_db();
|
||||
my $xa_db = shift // $db;
|
||||
|
||||
my ($data,$insert_ignore) = @_;
|
||||
check_table();
|
||||
if (insert_record($db,$xa_db,__PACKAGE__,$data,$insert_ignore,$insert_unique_fields)) {
|
||||
return $xa_db->db_last_insert_id() || 1;
|
||||
}
|
||||
return undef;
|
||||
|
||||
}
|
||||
|
||||
sub buildrecords_fromrows {
|
||||
|
||||
my ($rows,$load_recursive) = @_;
|
||||
|
||||
my @records = ();
|
||||
my $record;
|
||||
|
||||
if (defined $rows and ref $rows eq 'ARRAY') {
|
||||
foreach my $row (@$rows) {
|
||||
$record = __PACKAGE__->new($row);
|
||||
|
||||
# transformations go here ...
|
||||
|
||||
push @records,$record;
|
||||
}
|
||||
}
|
||||
|
||||
return \@records;
|
||||
|
||||
}
|
||||
|
||||
sub gettablename {
|
||||
|
||||
return $tablename;
|
||||
|
||||
}
|
||||
|
||||
sub settablename {
|
||||
|
||||
$tablename = shift;
|
||||
|
||||
}
|
||||
|
||||
sub check_table {
|
||||
|
||||
return checktableinfo($get_db,
|
||||
__PACKAGE__,$tablename,
|
||||
$expected_fieldnames,
|
||||
$indexes);
|
||||
|
||||
}
|
||||
|
||||
1;
|
@ -0,0 +1,136 @@
|
||||
package NGCP::BulkProcessor::Dao::Trunk::accounting::cdr_presentity;
|
||||
use strict;
|
||||
|
||||
## no critic
|
||||
|
||||
use NGCP::BulkProcessor::Logging qw(
|
||||
getlogger
|
||||
);
|
||||
|
||||
use NGCP::BulkProcessor::ConnectorPool qw(
|
||||
get_accounting_db
|
||||
destroy_dbs
|
||||
);
|
||||
|
||||
use NGCP::BulkProcessor::SqlProcessor qw(
|
||||
checktableinfo
|
||||
copy_row
|
||||
|
||||
insert_record
|
||||
);
|
||||
use NGCP::BulkProcessor::SqlRecord qw();
|
||||
|
||||
require Exporter;
|
||||
our @ISA = qw(Exporter NGCP::BulkProcessor::SqlRecord);
|
||||
our @EXPORT_OK = qw(
|
||||
gettablename
|
||||
settablename
|
||||
check_table
|
||||
|
||||
findby_callid
|
||||
|
||||
insert_row
|
||||
);
|
||||
|
||||
my $tablename = 'cdr_presentity';
|
||||
my $get_db = \&get_accounting_db;
|
||||
|
||||
my $expected_fieldnames = [
|
||||
"call_id",
|
||||
"event",
|
||||
"received_time",
|
||||
"body",
|
||||
];
|
||||
|
||||
|
||||
my $indexes = {};
|
||||
|
||||
my $insert_unique_fields = [];
|
||||
|
||||
sub new {
|
||||
|
||||
my $class = shift;
|
||||
my $self = NGCP::BulkProcessor::SqlRecord->new($class,$get_db,
|
||||
$tablename,$expected_fieldnames,$indexes);
|
||||
|
||||
copy_row($self,shift,$expected_fieldnames);
|
||||
|
||||
return $self;
|
||||
|
||||
}
|
||||
|
||||
sub findby_callid {
|
||||
|
||||
my ($xa_db,$callid,$load_recursive) = @_;
|
||||
|
||||
check_table();
|
||||
my $db = &$get_db();
|
||||
$xa_db //= $db;
|
||||
my $table = $db->tableidentifier($tablename);
|
||||
|
||||
my $stmt = 'SELECT * FROM ' . $table . ' WHERE ' .
|
||||
$db->columnidentifier('call_id') . ' = ?';
|
||||
my @params = ($callid);
|
||||
my $rows = $xa_db->db_get_all_arrayref($stmt,@params);
|
||||
|
||||
return buildrecords_fromrows($rows,$load_recursive);
|
||||
|
||||
}
|
||||
|
||||
sub insert_row {
|
||||
|
||||
my $db = &$get_db();
|
||||
my $xa_db = shift // $db;
|
||||
|
||||
my ($data,$insert_ignore) = @_;
|
||||
check_table();
|
||||
if (insert_record($db,$xa_db,__PACKAGE__,$data,$insert_ignore,$insert_unique_fields)) {
|
||||
return $xa_db->db_last_insert_id() || 1;
|
||||
}
|
||||
return undef;
|
||||
|
||||
}
|
||||
|
||||
sub buildrecords_fromrows {
|
||||
|
||||
my ($rows,$load_recursive) = @_;
|
||||
|
||||
my @records = ();
|
||||
my $record;
|
||||
|
||||
if (defined $rows and ref $rows eq 'ARRAY') {
|
||||
foreach my $row (@$rows) {
|
||||
$record = __PACKAGE__->new($row);
|
||||
|
||||
# transformations go here ...
|
||||
|
||||
push @records,$record;
|
||||
}
|
||||
}
|
||||
|
||||
return \@records;
|
||||
|
||||
}
|
||||
|
||||
sub gettablename {
|
||||
|
||||
return $tablename;
|
||||
|
||||
}
|
||||
|
||||
sub settablename {
|
||||
|
||||
$tablename = shift;
|
||||
|
||||
}
|
||||
|
||||
sub check_table {
|
||||
|
||||
return checktableinfo($get_db,
|
||||
__PACKAGE__,$tablename,
|
||||
$expected_fieldnames,
|
||||
$indexes);
|
||||
|
||||
}
|
||||
|
||||
1;
|
@ -0,0 +1,143 @@
|
||||
package NGCP::BulkProcessor::Dao::Trunk::accounting::cdr_relation;
|
||||
use strict;
|
||||
|
||||
## no critic
|
||||
|
||||
use NGCP::BulkProcessor::Logging qw(
|
||||
getlogger
|
||||
);
|
||||
|
||||
use NGCP::BulkProcessor::ConnectorPool qw(
|
||||
get_accounting_db
|
||||
destroy_dbs
|
||||
);
|
||||
|
||||
use NGCP::BulkProcessor::SqlProcessor qw(
|
||||
checktableinfo
|
||||
copy_row
|
||||
|
||||
);
|
||||
use NGCP::BulkProcessor::SqlRecord qw();
|
||||
|
||||
use NGCP::BulkProcessor::Array qw(array_to_map);
|
||||
|
||||
require Exporter;
|
||||
our @ISA = qw(Exporter NGCP::BulkProcessor::SqlRecord);
|
||||
our @EXPORT_OK = qw(
|
||||
gettablename
|
||||
check_table
|
||||
|
||||
findall
|
||||
findby_id
|
||||
findby_id_cached
|
||||
|
||||
);
|
||||
|
||||
|
||||
my $tablename = 'cdr_relation';
|
||||
my $get_db = \&get_accounting_db;
|
||||
|
||||
my $expected_fieldnames = [
|
||||
"id",
|
||||
"type",
|
||||
];
|
||||
|
||||
|
||||
my $indexes = {};
|
||||
|
||||
my $insert_unique_fields = [];
|
||||
|
||||
sub new {
|
||||
|
||||
my $class = shift;
|
||||
my $self = NGCP::BulkProcessor::SqlRecord->new($class,$get_db,
|
||||
$tablename,$expected_fieldnames,$indexes);
|
||||
|
||||
copy_row($self,shift,$expected_fieldnames);
|
||||
|
||||
return $self;
|
||||
|
||||
}
|
||||
|
||||
sub findall {
|
||||
|
||||
my ($load_recursive) = @_;
|
||||
|
||||
check_table();
|
||||
my $db = &$get_db();
|
||||
my $table = $db->tableidentifier($tablename);
|
||||
|
||||
my $stmt = 'SELECT * FROM ' . $table;
|
||||
my @params = ();
|
||||
my $rows = $db->db_get_all_arrayref($stmt,@params);
|
||||
|
||||
return buildrecords_fromrows($rows,$load_recursive);
|
||||
|
||||
}
|
||||
|
||||
my $cdr_relation_map;
|
||||
|
||||
sub findby_id_cached {
|
||||
my ($id,$load_recursive) = @_;
|
||||
unless ($cdr_relation_map) {
|
||||
($cdr_relation_map, my $relations, my $ids) = array_to_map(findall($load_recursive),
|
||||
sub { return shift->{id}; }, sub { return shift; }, 'last');
|
||||
}
|
||||
return __PACKAGE__->new($cdr_relation_map->{$id}) if defined $id;
|
||||
return;
|
||||
}
|
||||
|
||||
sub findby_id {
|
||||
|
||||
my ($id,$load_recursive) = @_;
|
||||
|
||||
check_table();
|
||||
my $db = &$get_db();
|
||||
my $table = $db->tableidentifier($tablename);
|
||||
|
||||
my $stmt = 'SELECT * FROM ' . $table . ' WHERE ' .
|
||||
$db->columnidentifier('id') . ' = ?';
|
||||
my @params = ($id);
|
||||
my $rows = $db->db_get_all_arrayref($stmt,@params);
|
||||
|
||||
return buildrecords_fromrows($rows,$load_recursive);
|
||||
|
||||
}
|
||||
|
||||
sub buildrecords_fromrows {
|
||||
|
||||
my ($rows,$load_recursive) = @_;
|
||||
|
||||
my @records = ();
|
||||
my $record;
|
||||
|
||||
if (defined $rows and ref $rows eq 'ARRAY') {
|
||||
foreach my $row (@$rows) {
|
||||
$record = __PACKAGE__->new($row);
|
||||
|
||||
# transformations go here ...
|
||||
|
||||
push @records,$record;
|
||||
}
|
||||
}
|
||||
|
||||
return \@records;
|
||||
|
||||
}
|
||||
|
||||
sub gettablename {
|
||||
|
||||
return $tablename;
|
||||
|
||||
}
|
||||
|
||||
sub check_table {
|
||||
|
||||
return checktableinfo($get_db,
|
||||
__PACKAGE__,$tablename,
|
||||
$expected_fieldnames,
|
||||
$indexes);
|
||||
|
||||
}
|
||||
|
||||
1;
|
@ -0,0 +1,184 @@
|
||||
package NGCP::BulkProcessor::Dao::Trunk::accounting::cdr_relation_data;
|
||||
use strict;
|
||||
|
||||
## no critic
|
||||
|
||||
use NGCP::BulkProcessor::Logging qw(
|
||||
getlogger
|
||||
);
|
||||
|
||||
use NGCP::BulkProcessor::ConnectorPool qw(
|
||||
get_accounting_db
|
||||
destroy_dbs
|
||||
);
|
||||
|
||||
use NGCP::BulkProcessor::SqlProcessor qw(
|
||||
checktableinfo
|
||||
copy_row
|
||||
|
||||
insert_record
|
||||
);
|
||||
use NGCP::BulkProcessor::SqlRecord qw();
|
||||
|
||||
use NGCP::BulkProcessor::Dao::Trunk::accounting::cdr_relation qw();
|
||||
use NGCP::BulkProcessor::Dao::Trunk::accounting::cdr_provider qw();
|
||||
use NGCP::BulkProcessor::Dao::Trunk::accounting::cdr_direction qw();
|
||||
|
||||
require Exporter;
|
||||
our @ISA = qw(Exporter NGCP::BulkProcessor::SqlRecord);
|
||||
our @EXPORT_OK = qw(
|
||||
gettablename
|
||||
settablename
|
||||
check_table
|
||||
|
||||
findby_cdrproviderdirectiontag
|
||||
findby_cdrid
|
||||
|
||||
insert_row
|
||||
);
|
||||
|
||||
my $tablename = 'cdr_relation_data';
|
||||
my $get_db = \&get_accounting_db;
|
||||
|
||||
my $expected_fieldnames = [
|
||||
"cdr_id",
|
||||
"provider_id",
|
||||
"direction_id",
|
||||
"relation_id",
|
||||
"val",
|
||||
"cdr_start_time",
|
||||
];
|
||||
|
||||
my $indexes = {};
|
||||
|
||||
my $insert_unique_fields = [];
|
||||
|
||||
sub new {
|
||||
|
||||
my $class = shift;
|
||||
my $self = NGCP::BulkProcessor::SqlRecord->new($class,$get_db,
|
||||
$tablename,$expected_fieldnames,$indexes);
|
||||
|
||||
copy_row($self,shift,$expected_fieldnames);
|
||||
|
||||
return $self;
|
||||
|
||||
}
|
||||
|
||||
sub findby_cdrproviderdirectiontag {
|
||||
|
||||
my ($xa_db,$cdrid,$providerid,$directionid,$tagid,$load_recursive) = @_;
|
||||
|
||||
check_table();
|
||||
my $db = &$get_db();
|
||||
$xa_db //= $db;
|
||||
my $table = $db->tableidentifier($tablename);
|
||||
|
||||
my $stmt = 'SELECT * FROM ' . $table . ' WHERE ' .
|
||||
$db->columnidentifier('cdr_id') . ' = ?' .
|
||||
' AND ' . $db->columnidentifier('provider_id') . ' = ?' .
|
||||
' AND ' . $db->columnidentifier('direction_id') . ' = ?' .
|
||||
' AND ' . $db->columnidentifier('tag_id') . ' = ?';
|
||||
my @params = ($cdrid,$providerid,$directionid,$tagid);
|
||||
my $rows = $xa_db->db_get_all_arrayref($stmt,@params);
|
||||
|
||||
return buildrecords_fromrows($rows,$load_recursive);
|
||||
|
||||
}
|
||||
|
||||
sub findby_cdrid {
|
||||
|
||||
my ($xa_db,$cdrid,$load_recursive) = @_;
|
||||
|
||||
check_table();
|
||||
my $db = &$get_db();
|
||||
$xa_db //= $db;
|
||||
my $table = $db->tableidentifier($tablename);
|
||||
|
||||
my $stmt = 'SELECT * FROM ' . $table . ' WHERE ' .
|
||||
$db->columnidentifier('cdr_id') . ' = ?';
|
||||
my @params = ($cdrid);
|
||||
my $rows = $xa_db->db_get_all_arrayref($stmt,@params);
|
||||
|
||||
return buildrecords_fromrows($rows,$load_recursive);
|
||||
|
||||
}
|
||||
|
||||
sub findby_callid {
|
||||
|
||||
my ($xa_db,$callid,$load_recursive) = @_;
|
||||
|
||||
check_table();
|
||||
my $db = &$get_db();
|
||||
$xa_db //= $db;
|
||||
my $table = $db->tableidentifier($tablename);
|
||||
|
||||
my $stmt = 'SELECT * FROM ' . $table . ' WHERE ' .
|
||||
$db->columnidentifier('call_id') . ' = ?';
|
||||
my @params = ($callid);
|
||||
my $rows = $xa_db->db_get_all_arrayref($stmt,@params);
|
||||
|
||||
return buildrecords_fromrows($rows,$load_recursive);
|
||||
|
||||
}
|
||||
|
||||
sub insert_row {
|
||||
|
||||
my $db = &$get_db();
|
||||
my $xa_db = shift // $db;
|
||||
|
||||
my ($data,$insert_ignore) = @_;
|
||||
check_table();
|
||||
if (insert_record($db,$xa_db,__PACKAGE__,$data,$insert_ignore,$insert_unique_fields)) {
|
||||
return $xa_db->db_last_insert_id() || 1;
|
||||
}
|
||||
return undef;
|
||||
|
||||
}
|
||||
|
||||
sub buildrecords_fromrows {
|
||||
|
||||
my ($rows,$load_recursive) = @_;
|
||||
|
||||
my @records = ();
|
||||
my $record;
|
||||
|
||||
if (defined $rows and ref $rows eq 'ARRAY') {
|
||||
foreach my $row (@$rows) {
|
||||
$record = __PACKAGE__->new($row);
|
||||
|
||||
# transformations go here ...
|
||||
$record->load_relation($load_recursive,'relation','NGCP::BulkProcessor::Dao::Trunk::accounting::cdr_relation::findby_id_cached',$record->{relation_id},$load_recursive);
|
||||
$record->load_relation($load_recursive,'direction','NGCP::BulkProcessor::Dao::Trunk::accounting::cdr_direction::findby_id_cached',$record->{direction_id},$load_recursive);
|
||||
$record->load_relation($load_recursive,'provider','NGCP::BulkProcessor::Dao::Trunk::accounting::cdr_provider::findby_id_cached',$record->{provider_id},$load_recursive);
|
||||
|
||||
push @records,$record;
|
||||
}
|
||||
}
|
||||
|
||||
return \@records;
|
||||
|
||||
}
|
||||
|
||||
sub gettablename {
|
||||
|
||||
return $tablename;
|
||||
|
||||
}
|
||||
|
||||
sub settablename {
|
||||
|
||||
$tablename = shift;
|
||||
|
||||
}
|
||||
|
||||
sub check_table {
|
||||
|
||||
return checktableinfo($get_db,
|
||||
__PACKAGE__,$tablename,
|
||||
$expected_fieldnames,
|
||||
$indexes);
|
||||
|
||||
}
|
||||
|
||||
1;
|
@ -0,0 +1,145 @@
|
||||
package NGCP::BulkProcessor::Dao::Trunk::accounting::cdr_time_balance_data;
|
||||
use strict;
|
||||
|
||||
## no critic
|
||||
|
||||
use NGCP::BulkProcessor::Logging qw(
|
||||
getlogger
|
||||
);
|
||||
|
||||
use NGCP::BulkProcessor::ConnectorPool qw(
|
||||
get_accounting_db
|
||||
destroy_dbs
|
||||
);
|
||||
|
||||
use NGCP::BulkProcessor::SqlProcessor qw(
|
||||
checktableinfo
|
||||
copy_row
|
||||
|
||||
insert_record
|
||||
);
|
||||
use NGCP::BulkProcessor::SqlRecord qw();
|
||||
|
||||
use NGCP::BulkProcessor::Dao::Trunk::billing::contract_balances qw();
|
||||
use NGCP::BulkProcessor::Dao::Trunk::accounting::cdr_provider qw();
|
||||
use NGCP::BulkProcessor::Dao::Trunk::accounting::cdr_direction qw();
|
||||
|
||||
require Exporter;
|
||||
our @ISA = qw(Exporter NGCP::BulkProcessor::SqlRecord);
|
||||
our @EXPORT_OK = qw(
|
||||
gettablename
|
||||
settablename
|
||||
check_table
|
||||
|
||||
findby_cdrid
|
||||
|
||||
insert_row
|
||||
);
|
||||
|
||||
my $tablename = 'cdr_time_balance_data';
|
||||
my $get_db = \&get_accounting_db;
|
||||
|
||||
my $expected_fieldnames = [
|
||||
"cdr_id",
|
||||
"provider_id",
|
||||
"direction_id",
|
||||
"time_balance_id",
|
||||
"val_before",
|
||||
"val_after",
|
||||
"cdr_start_time",
|
||||
];
|
||||
|
||||
my $indexes = {};
|
||||
|
||||
my $insert_unique_fields = [];
|
||||
|
||||
sub new {
|
||||
|
||||
my $class = shift;
|
||||
my $self = NGCP::BulkProcessor::SqlRecord->new($class,$get_db,
|
||||
$tablename,$expected_fieldnames,$indexes);
|
||||
|
||||
copy_row($self,shift,$expected_fieldnames);
|
||||
|
||||
return $self;
|
||||
|
||||
}
|
||||
|
||||
sub findby_cdrid {
|
||||
|
||||
my ($xa_db,$cdrid,$load_recursive) = @_;
|
||||
|
||||
check_table();
|
||||
my $db = &$get_db();
|
||||
$xa_db //= $db;
|
||||
my $table = $db->tableidentifier($tablename);
|
||||
|
||||
my $stmt = 'SELECT * FROM ' . $table . ' WHERE ' .
|
||||
$db->columnidentifier('cdr_id') . ' = ?';
|
||||
my @params = ($cdrid);
|
||||
my $rows = $xa_db->db_get_all_arrayref($stmt,@params);
|
||||
|
||||
return buildrecords_fromrows($rows,$load_recursive);
|
||||
|
||||
}
|
||||
|
||||
sub insert_row {
|
||||
|
||||
my $db = &$get_db();
|
||||
my $xa_db = shift // $db;
|
||||
|
||||
my ($data,$insert_ignore) = @_;
|
||||
check_table();
|
||||
if (insert_record($db,$xa_db,__PACKAGE__,$data,$insert_ignore,$insert_unique_fields)) {
|
||||
return $xa_db->db_last_insert_id() || 1;
|
||||
}
|
||||
return undef;
|
||||
|
||||
}
|
||||
|
||||
sub buildrecords_fromrows {
|
||||
|
||||
my ($rows,$load_recursive) = @_;
|
||||
|
||||
my @records = ();
|
||||
my $record;
|
||||
|
||||
if (defined $rows and ref $rows eq 'ARRAY') {
|
||||
foreach my $row (@$rows) {
|
||||
$record = __PACKAGE__->new($row);
|
||||
|
||||
# transformations go here ...
|
||||
$record->load_relation($load_recursive,'contract_balance','NGCP::BulkProcessor::Dao::Trunk::billing::contract_balances::findby_id',undef,$record->{time_balance_id},$load_recursive);
|
||||
$record->load_relation($load_recursive,'direction','NGCP::BulkProcessor::Dao::Trunk::accounting::cdr_direction::findby_id_cached',$record->{direction_id},$load_recursive);
|
||||
$record->load_relation($load_recursive,'provider','NGCP::BulkProcessor::Dao::Trunk::accounting::cdr_provider::findby_id_cached',$record->{provider_id},$load_recursive);
|
||||
|
||||
push @records,$record;
|
||||
}
|
||||
}
|
||||
|
||||
return \@records;
|
||||
|
||||
}
|
||||
|
||||
sub gettablename {
|
||||
|
||||
return $tablename;
|
||||
|
||||
}
|
||||
|
||||
sub settablename {
|
||||
|
||||
$tablename = shift;
|
||||
|
||||
}
|
||||
|
||||
sub check_table {
|
||||
|
||||
return checktableinfo($get_db,
|
||||
__PACKAGE__,$tablename,
|
||||
$expected_fieldnames,
|
||||
$indexes);
|
||||
|
||||
}
|
||||
|
||||
1;
|
@ -0,0 +1,131 @@
|
||||
package NGCP::BulkProcessor::Dao::Trunk::billing::billing_fees_history;
|
||||
use strict;
|
||||
|
||||
## no critic
|
||||
|
||||
use NGCP::BulkProcessor::Logging qw(
|
||||
getlogger
|
||||
);
|
||||
|
||||
use NGCP::BulkProcessor::ConnectorPool qw(
|
||||
get_billing_db
|
||||
destroy_dbs
|
||||
);
|
||||
|
||||
use NGCP::BulkProcessor::SqlProcessor qw(
|
||||
checktableinfo
|
||||
copy_row
|
||||
);
|
||||
use NGCP::BulkProcessor::SqlRecord qw();
|
||||
|
||||
require Exporter;
|
||||
our @ISA = qw(Exporter NGCP::BulkProcessor::SqlRecord);
|
||||
our @EXPORT_OK = qw(
|
||||
gettablename
|
||||
check_table
|
||||
|
||||
findby_id
|
||||
);
|
||||
|
||||
my $tablename = 'billing_fees_history';
|
||||
my $get_db = \&get_billing_db;
|
||||
|
||||
my $expected_fieldnames = [
|
||||
'id',
|
||||
'bf_id',
|
||||
'billing_profile_id',
|
||||
'billing_zones_history_id',
|
||||
'source',
|
||||
'destination',
|
||||
'direction',
|
||||
'type',
|
||||
'onpeak_init_rate',
|
||||
'onpeak_init_interval',
|
||||
'onpeak_follow_rate',
|
||||
'onpeak_follow_interval',
|
||||
'offpeak_init_rate',
|
||||
'offpeak_init_interval',
|
||||
'offpeak_follow_rate',
|
||||
'offpeak_follow_interval',
|
||||
'onpeak_use_free_time',
|
||||
'match_mode',
|
||||
'onpeak_extra_rate',
|
||||
'onpeak_extra_second',
|
||||
'offpeak_extra_rate',
|
||||
'offpeak_extra_second',
|
||||
'offpeak_use_free_time',
|
||||
'aoc_pulse_amount_per_message',
|
||||
];
|
||||
|
||||
my $indexes = {};
|
||||
|
||||
my $insert_unique_fields = [];
|
||||
|
||||
#enum('regex_longest_pattern','regex_longest_match','prefix','exact_destination')
|
||||
|
||||
sub new {
|
||||
|
||||
my $class = shift;
|
||||
my $self = NGCP::BulkProcessor::SqlRecord->new($class,$get_db,
|
||||
$tablename,$expected_fieldnames,$indexes);
|
||||
|
||||
copy_row($self,shift,$expected_fieldnames);
|
||||
|
||||
return $self;
|
||||
|
||||
}
|
||||
|
||||
sub findby_id {
|
||||
|
||||
my ($id,$load_recursive) = @_;
|
||||
|
||||
check_table();
|
||||
my $db = &$get_db();
|
||||
my $table = $db->tableidentifier($tablename);
|
||||
|
||||
my $stmt = 'SELECT * FROM ' . $table . ' WHERE ' .
|
||||
$db->columnidentifier('id') . ' = ?';
|
||||
my @params = ($id);
|
||||
my $rows = $db->db_get_all_arrayref($stmt,@params);
|
||||
|
||||
return buildrecords_fromrows($rows,$load_recursive)->[0];
|
||||
|
||||
}
|
||||
|
||||
sub buildrecords_fromrows {
|
||||
|
||||
my ($rows,$load_recursive) = @_;
|
||||
|
||||
my @records = ();
|
||||
my $record;
|
||||
|
||||
if (defined $rows and ref $rows eq 'ARRAY') {
|
||||
foreach my $row (@$rows) {
|
||||
$record = __PACKAGE__->new($row);
|
||||
|
||||
# transformations go here ...
|
||||
|
||||
push @records,$record;
|
||||
}
|
||||
}
|
||||
|
||||
return \@records;
|
||||
|
||||
}
|
||||
|
||||
sub gettablename {
|
||||
|
||||
return $tablename;
|
||||
|
||||
}
|
||||
|
||||
sub check_table {
|
||||
|
||||
return checktableinfo($get_db,
|
||||
__PACKAGE__,$tablename,
|
||||
$expected_fieldnames,
|
||||
$indexes);
|
||||
|
||||
}
|
||||
|
||||
1;
|
@ -0,0 +1,110 @@
|
||||
package NGCP::BulkProcessor::Dao::Trunk::billing::billing_zones_history;
|
||||
use strict;
|
||||
|
||||
## no critic
|
||||
|
||||
use NGCP::BulkProcessor::Logging qw(
|
||||
getlogger
|
||||
);
|
||||
|
||||
use NGCP::BulkProcessor::ConnectorPool qw(
|
||||
get_billing_db
|
||||
destroy_dbs
|
||||
);
|
||||
|
||||
use NGCP::BulkProcessor::SqlProcessor qw(
|
||||
checktableinfo
|
||||
copy_row
|
||||
);
|
||||
use NGCP::BulkProcessor::SqlRecord qw();
|
||||
|
||||
require Exporter;
|
||||
our @ISA = qw(Exporter NGCP::BulkProcessor::SqlRecord);
|
||||
our @EXPORT_OK = qw(
|
||||
gettablename
|
||||
check_table
|
||||
|
||||
findby_id
|
||||
);
|
||||
|
||||
my $tablename = 'billing_zones_history';
|
||||
my $get_db = \&get_billing_db;
|
||||
|
||||
my $expected_fieldnames = [
|
||||
'id',
|
||||
'bz_id',
|
||||
'billing_profile_id',
|
||||
'zone',
|
||||
'detail',
|
||||
];
|
||||
|
||||
my $indexes = {};
|
||||
|
||||
my $insert_unique_fields = [];
|
||||
|
||||
sub new {
|
||||
|
||||
my $class = shift;
|
||||
my $self = NGCP::BulkProcessor::SqlRecord->new($class,$get_db,
|
||||
$tablename,$expected_fieldnames,$indexes);
|
||||
|
||||
copy_row($self,shift,$expected_fieldnames);
|
||||
|
||||
return $self;
|
||||
|
||||
}
|
||||
|
||||
sub findby_id {
|
||||
|
||||
my ($id,$load_recursive) = @_;
|
||||
|
||||
check_table();
|
||||
my $db = &$get_db();
|
||||
my $table = $db->tableidentifier($tablename);
|
||||
|
||||
my $stmt = 'SELECT * FROM ' . $table . ' WHERE ' .
|
||||
$db->columnidentifier('id') . ' = ?';
|
||||
my @params = ($id);
|
||||
my $rows = $db->db_get_all_arrayref($stmt,@params);
|
||||
|
||||
return buildrecords_fromrows($rows,$load_recursive)->[0];
|
||||
|
||||
}
|
||||
|
||||
sub buildrecords_fromrows {
|
||||
|
||||
my ($rows,$load_recursive) = @_;
|
||||
|
||||
my @records = ();
|
||||
my $record;
|
||||
|
||||
if (defined $rows and ref $rows eq 'ARRAY') {
|
||||
foreach my $row (@$rows) {
|
||||
$record = __PACKAGE__->new($row);
|
||||
|
||||
# transformations go here ...
|
||||
|
||||
push @records,$record;
|
||||
}
|
||||
}
|
||||
|
||||
return \@records;
|
||||
|
||||
}
|
||||
|
||||
sub gettablename {
|
||||
|
||||
return $tablename;
|
||||
|
||||
}
|
||||
|
||||
sub check_table {
|
||||
|
||||
return checktableinfo($get_db,
|
||||
__PACKAGE__,$tablename,
|
||||
$expected_fieldnames,
|
||||
$indexes);
|
||||
|
||||
}
|
||||
|
||||
1;
|
@ -1,279 +0,0 @@
|
||||
package NGCP::BulkProcessor::Projects::ETL::CDR::Dao::Tabular;
|
||||
use strict;
|
||||
|
||||
## no critic
|
||||
|
||||
use NGCP::BulkProcessor::Projects::ETL::CDR::ProjectConnectorPool qw(
|
||||
get_sqlite_db
|
||||
destroy_all_dbs
|
||||
);
|
||||
|
||||
use NGCP::BulkProcessor::Projects::ETL::CDR::Settings qw(
|
||||
$tabular_fields
|
||||
$csv_all_expected_fields
|
||||
);
|
||||
|
||||
use NGCP::BulkProcessor::SqlProcessor qw(
|
||||
registertableinfo
|
||||
create_targettable
|
||||
checktableinfo
|
||||
copy_row
|
||||
insert_stmt
|
||||
transfer_table
|
||||
);
|
||||
|
||||
use NGCP::BulkProcessor::SqlRecord qw();
|
||||
|
||||
require Exporter;
|
||||
our @ISA = qw(Exporter NGCP::BulkProcessor::SqlRecord);
|
||||
our @EXPORT_OK = qw(
|
||||
create_table
|
||||
gettablename
|
||||
check_table
|
||||
getinsertstatement
|
||||
getupsertstatement
|
||||
|
||||
get_fieldnames
|
||||
|
||||
update_delta
|
||||
findby_delta
|
||||
countby_delta
|
||||
|
||||
$deleted_delta
|
||||
$updated_delta
|
||||
$added_delta
|
||||
|
||||
copy_table
|
||||
);
|
||||
|
||||
my $tablename = 'tabular';
|
||||
my $get_db = \&get_sqlite_db;
|
||||
|
||||
my $fieldnames;
|
||||
my $expected_fieldnames;
|
||||
sub get_fieldnames {
|
||||
my $expected = shift;
|
||||
unless (defined $fieldnames and defined $expected_fieldnames) {
|
||||
$fieldnames = [ map {
|
||||
local $_ = (ref $_ ? (exists $_->{colname} ? $_->{colname} : $_->{path}) : $_);
|
||||
$_ =~ s/\./_/g;
|
||||
$_ =~ s/\[(\d+)\]/_$1/g;
|
||||
$_;
|
||||
} @$tabular_fields ];
|
||||
$expected_fieldnames = [ @$fieldnames ];
|
||||
push(@$expected_fieldnames,'id') unless grep { 'id' eq $_; } @$expected_fieldnames;
|
||||
push(@$expected_fieldnames,'delta');
|
||||
}
|
||||
return $fieldnames unless $expected;
|
||||
return $expected_fieldnames;
|
||||
}
|
||||
|
||||
my $primarykey_fieldnames = [ 'id' ];
|
||||
my $indexes = {
|
||||
$tablename . '_delta' => [ 'delta(7)' ],
|
||||
};
|
||||
|
||||
our $deleted_delta = 'DELETED';
|
||||
our $updated_delta = 'UPDATED';
|
||||
our $added_delta = 'ADDED';
|
||||
|
||||
sub new {
|
||||
|
||||
my $class = shift;
|
||||
my $self = NGCP::BulkProcessor::SqlRecord->new($class,$get_db,
|
||||
$tablename,get_fieldnames(1),$indexes);
|
||||
|
||||
copy_row($self,shift,get_fieldnames(1));
|
||||
|
||||
return $self;
|
||||
|
||||
}
|
||||
|
||||
sub create_table {
|
||||
|
||||
my ($truncate) = @_;
|
||||
|
||||
my $db = &$get_db();
|
||||
|
||||
registertableinfo($db,__PACKAGE__,$tablename,get_fieldnames(1),$indexes,$primarykey_fieldnames);
|
||||
return create_targettable($db,__PACKAGE__,$db,__PACKAGE__,$tablename,$truncate,0,undef);
|
||||
|
||||
}
|
||||
|
||||
sub findby_delta {
|
||||
|
||||
my ($delta,$load_recursive) = @_;
|
||||
|
||||
check_table();
|
||||
my $db = &$get_db();
|
||||
my $table = $db->tableidentifier($tablename);
|
||||
|
||||
return [] unless defined $delta;
|
||||
|
||||
my $rows = $db->db_get_all_arrayref(
|
||||
'SELECT * FROM ' .
|
||||
$table .
|
||||
' WHERE ' .
|
||||
$db->columnidentifier('delta') . ' = ?'
|
||||
, $delta);
|
||||
|
||||
return buildrecords_fromrows($rows,$load_recursive);
|
||||
|
||||
}
|
||||
|
||||
sub findby_domainusername {
|
||||
|
||||
my ($domain,$username,$load_recursive) = @_;
|
||||
|
||||
check_table();
|
||||
my $db = &$get_db();
|
||||
my $table = $db->tableidentifier($tablename);
|
||||
|
||||
return [] unless (defined $domain and defined $username);
|
||||
|
||||
my $rows = $db->db_get_all_arrayref(
|
||||
'SELECT * FROM ' . $table .
|
||||
' WHERE ' . $db->columnidentifier('domain') . ' = ?' .
|
||||
' AND ' . $db->columnidentifier('username') . ' = ?'
|
||||
, $domain, $username);
|
||||
|
||||
return buildrecords_fromrows($rows,$load_recursive)->[0];
|
||||
|
||||
}
|
||||
|
||||
sub update_delta {
|
||||
|
||||
my ($id,$delta) = @_;
|
||||
|
||||
check_table();
|
||||
my $db = &$get_db();
|
||||
my $table = $db->tableidentifier($tablename);
|
||||
|
||||
my $stmt = 'UPDATE ' . $table . ' SET delta = ?';
|
||||
my @params = ();
|
||||
push(@params,$delta);
|
||||
if (defined $id) {
|
||||
$stmt .= ' WHERE ' .
|
||||
$db->columnidentifier('id') . ' = ?';
|
||||
push(@params, $id);
|
||||
}
|
||||
|
||||
return $db->db_do($stmt,@params);
|
||||
|
||||
}
|
||||
|
||||
sub countby_delta {
|
||||
|
||||
my ($deltas) = @_;
|
||||
|
||||
check_table();
|
||||
my $db = &$get_db();
|
||||
my $table = $db->tableidentifier($tablename);
|
||||
|
||||
my $stmt = 'SELECT COUNT(*) FROM ' . $table . ' WHERE 1=1';
|
||||
my @params = ();
|
||||
if (defined $deltas and 'HASH' eq ref $deltas) {
|
||||
foreach my $in (keys %$deltas) {
|
||||
my @values = (defined $deltas->{$in} and 'ARRAY' eq ref $deltas->{$in} ? @{$deltas->{$in}} : ($deltas->{$in}));
|
||||
$stmt .= ' AND ' . $db->columnidentifier('delta') . ' ' . $in . ' (' . substr(',?' x scalar @values,1) . ')';
|
||||
push(@params,@values);
|
||||
}
|
||||
} elsif (defined $deltas and length($deltas) > 0) {
|
||||
$stmt .= ' AND ' . $db->columnidentifier('delta') . ' = ?';
|
||||
push(@params,$deltas);
|
||||
}
|
||||
|
||||
return $db->db_get_value($stmt,@params);
|
||||
|
||||
}
|
||||
|
||||
sub copy_table {
|
||||
|
||||
my ($get_target_db) = @_;
|
||||
|
||||
if ($csv_all_expected_fields) {
|
||||
check_table();
|
||||
} else {
|
||||
checktableinfo($get_db,
|
||||
__PACKAGE__,$tablename,
|
||||
get_fieldnames(0),
|
||||
$indexes);
|
||||
}
|
||||
|
||||
return transfer_table(
|
||||
get_db => $get_db,
|
||||
class => __PACKAGE__,
|
||||
get_target_db => $get_target_db,
|
||||
targetclass => __PACKAGE__,
|
||||
targettablename => $tablename,
|
||||
);
|
||||
|
||||
}
|
||||
|
||||
sub buildrecords_fromrows {
|
||||
|
||||
my ($rows,$load_recursive) = @_;
|
||||
|
||||
my @records = ();
|
||||
my $record;
|
||||
|
||||
if (defined $rows and ref $rows eq 'ARRAY') {
|
||||
foreach my $row (@$rows) {
|
||||
$record = __PACKAGE__->new($row);
|
||||
|
||||
# transformations go here ...
|
||||
|
||||
push @records,$record;
|
||||
}
|
||||
}
|
||||
|
||||
return \@records;
|
||||
|
||||
}
|
||||
|
||||
sub getinsertstatement {
|
||||
|
||||
my ($insert_ignore) = @_;
|
||||
check_table();
|
||||
return insert_stmt($get_db,__PACKAGE__,$insert_ignore);
|
||||
|
||||
}
|
||||
|
||||
sub getupsertstatement {
|
||||
|
||||
check_table();
|
||||
my $db = &$get_db();
|
||||
my $table = $db->tableidentifier($tablename);
|
||||
my $upsert_stmt = 'INSERT OR REPLACE INTO ' . $table . ' (' .
|
||||
join(', ', map { local $_ = $_; $_ = $db->columnidentifier($_); $_; } @{get_fieldnames(1)}) . ')';
|
||||
my @values = ();
|
||||
foreach my $fieldname (@{get_fieldnames(1)}) {
|
||||
if ('delta' eq $fieldname) {
|
||||
my $stmt = 'SELECT \'' . $updated_delta . '\' FROM ' . $table . ' WHERE ' .
|
||||
$db->columnidentifier('id') . ' = ?';
|
||||
push(@values,'COALESCE((' . $stmt . '), \'' . $added_delta . '\')');
|
||||
} else {
|
||||
push(@values,'?');
|
||||
}
|
||||
}
|
||||
$upsert_stmt .= ' VALUES (' . join(',',@values) . ')';
|
||||
return $upsert_stmt;
|
||||
|
||||
}
|
||||
|
||||
sub gettablename {
|
||||
|
||||
return $tablename;
|
||||
|
||||
}
|
||||
|
||||
sub check_table {
|
||||
|
||||
return checktableinfo($get_db,
|
||||
__PACKAGE__,$tablename,
|
||||
get_fieldnames(1),
|
||||
$indexes);
|
||||
|
||||
}
|
||||
|
||||
1;
|
@ -1,490 +0,0 @@
|
||||
package NGCP::BulkProcessor::Projects::ETL::CDR::ExportCDR;
|
||||
use strict;
|
||||
|
||||
## no critic
|
||||
|
||||
use threads::shared qw();
|
||||
|
||||
use Tie::IxHash;
|
||||
|
||||
use NGCP::BulkProcessor::Serialization qw();
|
||||
use Scalar::Util qw(blessed);
|
||||
use MIME::Base64 qw(encode_base64);
|
||||
|
||||
use NGCP::BulkProcessor::Projects::ETL::CDR::Settings qw(
|
||||
$dry
|
||||
$skip_errors
|
||||
|
||||
$export_cdr_multithreading
|
||||
$export_cdr_numofthreads
|
||||
$export_cdr_blocksize
|
||||
|
||||
run_dao_method
|
||||
get_dao_var
|
||||
get_export_filename
|
||||
|
||||
write_export_file
|
||||
$cdr_export_filename_format
|
||||
|
||||
$tabular_fields
|
||||
$load_recursive
|
||||
$tabular_single_row_txn
|
||||
$ignore_tabular_unique
|
||||
$graph_fields
|
||||
$graph_fields_mode
|
||||
);
|
||||
|
||||
use NGCP::BulkProcessor::Logging qw (
|
||||
getlogger
|
||||
processing_info
|
||||
processing_debug
|
||||
);
|
||||
use NGCP::BulkProcessor::LogError qw(
|
||||
rowprocessingerror
|
||||
rowprocessingwarn
|
||||
fileerror
|
||||
);
|
||||
|
||||
use NGCP::BulkProcessor::Dao::Trunk::billing::contracts qw();
|
||||
|
||||
use NGCP::BulkProcessor::Projects::ETL::CDR::Dao::Tabular qw();
|
||||
|
||||
use NGCP::BulkProcessor::Projects::ETL::CDR::ProjectConnectorPool qw(
|
||||
get_sqlite_db
|
||||
destroy_all_dbs
|
||||
ping_all_dbs
|
||||
);
|
||||
|
||||
use NGCP::BulkProcessor::Utils qw(create_uuid threadid timestamp stringtobool trim); #check_ipnet
|
||||
#use NGCP::BulkProcessor::DSSorter qw(sort_by_configs);
|
||||
#use NGCP::BulkProcessor::Table qw(get_rowhash);
|
||||
use NGCP::BulkProcessor::Array qw(array_to_map);
|
||||
use NGCP::BulkProcessor::DSPath qw();
|
||||
|
||||
require Exporter;
|
||||
our @ISA = qw(Exporter);
|
||||
our @EXPORT_OK = qw(
|
||||
export_cdr_graph
|
||||
export_cdr_tabular
|
||||
);
|
||||
|
||||
sub _init_graph_field_map {
|
||||
my $context = shift;
|
||||
my %graph_field_map = ();
|
||||
my %graph_field_globs = ();
|
||||
tie(%graph_field_globs, 'Tie::IxHash');
|
||||
foreach my $graph_field (@$graph_fields) {
|
||||
my ($c,$a);
|
||||
if ('HASH' eq ref $graph_field) {
|
||||
$a = $graph_field->{path};
|
||||
$c = $graph_field;
|
||||
} else {
|
||||
$a = $graph_field;
|
||||
$c = 1;
|
||||
}
|
||||
#my @a = ();
|
||||
#my $b = '';
|
||||
#foreach my $c (split(/\./,$a)) {
|
||||
#
|
||||
# foreach my () {
|
||||
# $b .= $_
|
||||
# push()
|
||||
# }
|
||||
#}
|
||||
if ($a =~ /\*/) {
|
||||
$a = quotemeta($a);
|
||||
$a =~ s/(\\\*)+/[^.]+/g;
|
||||
$a = '^' . $a . '$';
|
||||
$graph_field_globs{$a} = $c unless exists $graph_field_globs{$a};
|
||||
} else {
|
||||
$graph_field_map{$a} = $c unless exists $graph_field_map{$a};
|
||||
}
|
||||
}
|
||||
$context->{graph_field_map} = \%graph_field_map;
|
||||
$context->{graph_field_globs} = \%graph_field_globs;
|
||||
}
|
||||
|
||||
sub export_cdr_graph {
|
||||
|
||||
my $static_context = {
|
||||
|
||||
};
|
||||
_init_graph_field_map($static_context);
|
||||
($static_context->{export_filename},$static_context->{export_format}) = get_export_filename($cdr_export_filename_format);
|
||||
|
||||
my $result = 1; #_copy_cdr_checks($static_context);
|
||||
|
||||
destroy_all_dbs();
|
||||
my $warning_count :shared = 0;
|
||||
return ($result && run_dao_method('accounting::cdr::process_fromto',
|
||||
#source_dbs => $static_context->{source_dbs},
|
||||
static_context => $static_context,
|
||||
process_code => sub {
|
||||
my ($context,$records,$row_offset) = @_;
|
||||
ping_all_dbs();
|
||||
my @data = ();
|
||||
foreach my $record (@$records) {
|
||||
next unless _export_cdr_graph_init_context($context,$record);
|
||||
push(@data,_get_contract_graph($context));
|
||||
}
|
||||
write_export_file(\@data,$context->{export_filename},$context->{export_format});
|
||||
return 1;
|
||||
},
|
||||
init_process_context_code => sub {
|
||||
my ($context)= @_;
|
||||
$context->{error_count} = 0;
|
||||
$context->{warning_count} = 0;
|
||||
},
|
||||
uninit_process_context_code => sub {
|
||||
my ($context)= @_;
|
||||
destroy_all_dbs();
|
||||
{
|
||||
lock $warning_count;
|
||||
$warning_count += $context->{warning_count};
|
||||
}
|
||||
},
|
||||
destroy_reader_dbs_code => \&destroy_all_dbs,
|
||||
blocksize => $export_cdr_blocksize,
|
||||
multithreading => $export_cdr_multithreading,
|
||||
numofthreads => $export_cdr_numofthreads,
|
||||
),$warning_count,);
|
||||
|
||||
}
|
||||
|
||||
sub export_cdr_tabular {
|
||||
|
||||
my $result = NGCP::BulkProcessor::Projects::ETL::CDR::Dao::Tabular::create_table(0);
|
||||
|
||||
my $static_context = {
|
||||
upsert => _tabular_rows_reset_delta(),
|
||||
};
|
||||
|
||||
destroy_all_dbs();
|
||||
my $warning_count :shared = 0;
|
||||
return ($result && run_dao_method('billing::contracts::process_records',
|
||||
static_context => $static_context,
|
||||
process_code => sub {
|
||||
my ($context,$records,$row_offset) = @_;
|
||||
ping_all_dbs();
|
||||
my @subscriber_rows = ();
|
||||
foreach my $record (@$records) {
|
||||
next unless _export_cdr_tabular_init_context($context,$record);
|
||||
push(@subscriber_rows, _get_subscriber_rows($context));
|
||||
|
||||
if ($tabular_single_row_txn and (scalar @subscriber_rows) > 0) {
|
||||
while (defined (my $subscriber_row = shift @subscriber_rows)) {
|
||||
if ($skip_errors) {
|
||||
eval { _insert_tabular_rows($context,[$subscriber_row]); };
|
||||
_warn($context,$@) if $@;
|
||||
} else {
|
||||
_insert_tabular_rows($context,[$subscriber_row]);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (not $tabular_single_row_txn and (scalar @subscriber_rows) > 0) {
|
||||
if ($skip_errors) {
|
||||
eval { insert_tabular_rows($context,\@subscriber_rows); };
|
||||
_warn($context,$@) if $@;
|
||||
} else {
|
||||
insert_tabular_rows($context,\@subscriber_rows);
|
||||
}
|
||||
}
|
||||
|
||||
return 1;
|
||||
},
|
||||
init_process_context_code => sub {
|
||||
my ($context)= @_;
|
||||
$context->{db} = &get_sqlite_db();
|
||||
$context->{error_count} = 0;
|
||||
$context->{warning_count} = 0;
|
||||
},
|
||||
uninit_process_context_code => sub {
|
||||
my ($context)= @_;
|
||||
undef $context->{db};
|
||||
destroy_all_dbs();
|
||||
{
|
||||
lock $warning_count;
|
||||
$warning_count += $context->{warning_count};
|
||||
}
|
||||
},
|
||||
destroy_reader_dbs_code => \&destroy_all_dbs,
|
||||
blocksize => $export_cdr_blocksize,
|
||||
multithreading => $export_cdr_multithreading,
|
||||
numofthreads => $export_cdr_numofthreads,
|
||||
),$warning_count,);
|
||||
|
||||
}
|
||||
|
||||
sub _tabular_rows_reset_delta {
|
||||
my $upsert = 0;
|
||||
if (NGCP::BulkProcessor::Projects::ETL::CDR::Dao::Tabular::countby_delta() > 0) {
|
||||
processing_info(threadid(),'resetting delta of ' .
|
||||
NGCP::BulkProcessor::Projects::ETL::CDR::Dao::Tabular::update_delta(undef,
|
||||
$NGCP::BulkProcessor::Projects::ETL::CDR::Dao::Tabular::deleted_delta) .
|
||||
' records',getlogger(__PACKAGE__));
|
||||
$upsert |= 1;
|
||||
}
|
||||
return $upsert;
|
||||
}
|
||||
|
||||
sub _insert_tabular_rows {
|
||||
my ($context,$subscriber_rows) = @_;
|
||||
$context->{db}->db_do_begin(
|
||||
($context->{upsert} ?
|
||||
NGCP::BulkProcessor::Projects::ETL::CDR::Dao::Tabular::getupsertstatement()
|
||||
: NGCP::BulkProcessor::Projects::ETL::CDR::Dao::Tabular::getinsertstatement($ignore_tabular_unique)),
|
||||
);
|
||||
eval {
|
||||
$context->{db}->db_do_rowblock($subscriber_rows);
|
||||
$context->{db}->db_finish();
|
||||
};
|
||||
my $err = $@;
|
||||
if ($err) {
|
||||
eval {
|
||||
$context->{db}->db_finish(1);
|
||||
};
|
||||
die($err);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
sub _export_cdr_graph_init_context {
|
||||
|
||||
my ($context,$record) = @_;
|
||||
|
||||
my $result = 1;
|
||||
|
||||
return 0 unless _load_contract($context,$record);
|
||||
|
||||
return $result;
|
||||
|
||||
}
|
||||
|
||||
sub _get_contract_graph {
|
||||
my ($context) = @_;
|
||||
|
||||
my $dp = NGCP::BulkProcessor::DSPath->new($context->{contract}, {
|
||||
filter => sub {
|
||||
my $path = shift;
|
||||
if ('whitelist' eq $graph_fields_mode) {
|
||||
my $include;
|
||||
if (exists $context->{graph_field_map}->{$path}) {
|
||||
$include = $context->{graph_field_map}->{$path};
|
||||
} else {
|
||||
foreach my $glob (keys %{$context->{graph_field_globs}}) {
|
||||
if ($path =~ /$glob/) {
|
||||
$include = $context->{graph_field_globs}->{$glob};
|
||||
last;
|
||||
}
|
||||
}
|
||||
}
|
||||
if ('HASH' eq ref $include) {
|
||||
if (exists $include->{include}) {
|
||||
return $include->{include};
|
||||
}
|
||||
return 1;
|
||||
} else {
|
||||
return $include;
|
||||
}
|
||||
} elsif ('blacklist' eq $graph_fields_mode) {
|
||||
my $exclude;
|
||||
if (exists $context->{graph_field_map}->{$path}) {
|
||||
$exclude = $context->{graph_field_map}->{$path};
|
||||
} else {
|
||||
foreach my $glob (keys %{$context->{graph_field_globs}}) {
|
||||
if ($path =~ /$glob/) {
|
||||
$exclude = $context->{graph_field_globs}->{$glob};
|
||||
last;
|
||||
}
|
||||
}
|
||||
}
|
||||
if ('HASH' eq ref $exclude) {
|
||||
if (exists $exclude->{exclude}) {
|
||||
return not $exclude->{exclude};
|
||||
} elsif ($exclude->{transform}) {
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
} else {
|
||||
return not $exclude;
|
||||
}
|
||||
}
|
||||
},
|
||||
transform => sub {
|
||||
return shift;
|
||||
# ($bill_subs->{provisioning_voip_subscriber}->{voip_usr_preferences}, my $as, my $vs) =
|
||||
# array_to_map($bill_subs->{provisioning_voip_subscriber}->{voip_usr_preferences},
|
||||
# sub { return shift->{attribute}; }, sub { my $p = shift; }, 'group' );
|
||||
# if (my $prov_subscriber = $bill_subs->{provisioning_voip_subscriber}) {
|
||||
# foreach my $voicemail_user (@{$prov_subscriber->{voicemail_users}}) {
|
||||
# foreach my $voicemail (@{$voicemail_user->{voicemail_spool}}) {
|
||||
# $voicemail->{recording} = encode_base64($voicemail->{recording},'');
|
||||
# }
|
||||
# }
|
||||
# }
|
||||
},
|
||||
});
|
||||
|
||||
$dp->filter()->transform();
|
||||
|
||||
return $context->{contract};
|
||||
|
||||
}
|
||||
|
||||
sub _export_cdr_tabular_init_context {
|
||||
|
||||
my ($context,$record) = @_;
|
||||
|
||||
my $result = 1;
|
||||
|
||||
return 0 unless _load_contract($context,$record);
|
||||
|
||||
if (defined $context->{contract}->{voip_subscribers}
|
||||
and not scalar @{$context->{contract}->{voip_subscribers}}) {
|
||||
_info($context,"contract ID $record->{id} has no subscribers, skipping",1);
|
||||
$result = 0;
|
||||
}
|
||||
|
||||
return $result;
|
||||
|
||||
}
|
||||
|
||||
sub _get_subscriber_rows {
|
||||
|
||||
my ($context) = @_;
|
||||
|
||||
my @rows = ();
|
||||
foreach my $bill_subs (@{$context->{contract}->{voip_subscribers}}) {
|
||||
my @row = ();
|
||||
$bill_subs->{contract} = NGCP::BulkProcessor::Dao::Trunk::billing::contracts->new($context->{contract}); #no circular ref
|
||||
($bill_subs->{provisioning_voip_subscriber}->{voip_usr_preferences}, my $as, my $vs) =
|
||||
array_to_map($bill_subs->{provisioning_voip_subscriber}->{voip_usr_preferences},
|
||||
sub { return shift->{_attribute}; }, sub { my $p = shift; }, 'group' );
|
||||
if (my $prov_subscriber = $bill_subs->{provisioning_voip_subscriber}) {
|
||||
foreach my $voicemail_user (@{$prov_subscriber->{voicemail_users}}) {
|
||||
foreach my $voicemail (@{$voicemail_user->{voicemail_spool}}) {
|
||||
$voicemail->{recording} = encode_base64($voicemail->{recording},'');
|
||||
}
|
||||
}
|
||||
}
|
||||
my $dp = NGCP::BulkProcessor::DSPath->new($bill_subs, {
|
||||
retrieve_key_from_non_hash => sub {},
|
||||
key_does_not_exist => sub {},
|
||||
index_does_not_exist => sub {},
|
||||
});
|
||||
foreach my $tabular_field (@$tabular_fields) {
|
||||
my $a;
|
||||
my $sep = ',';
|
||||
my $transform;
|
||||
if ('HASH' eq ref $tabular_field) {
|
||||
$a = $tabular_field->{path};
|
||||
$sep = $tabular_field->{sep};
|
||||
$transform = $tabular_field->{transform};
|
||||
} else {
|
||||
$a = $tabular_field;
|
||||
}
|
||||
#eval {'' . ($dp->get('.' . $a) // '');}; if($@){
|
||||
# my $x=5;
|
||||
#}
|
||||
my $v = $dp->get('.' . $a);
|
||||
if ('CODE' eq ref $transform) {
|
||||
my $closure = _closure($transform,_get_closure_context($context));
|
||||
$v = $closure->($v,$bill_subs);
|
||||
}
|
||||
if ('ARRAY' eq ref $v) {
|
||||
if ('HASH' eq ref $v->[0]
|
||||
or (blessed($v->[0]) and $v->[0]->isa('NGCP::BulkProcessor::SqlRecord'))) {
|
||||
$v = join($sep, sort map { $_->{$tabular_field->{field}}; } @$v);
|
||||
} else {
|
||||
$v = join($sep, sort @$v);
|
||||
}
|
||||
} else {
|
||||
$v = '' . ($v // '');
|
||||
}
|
||||
push(@row,$v);
|
||||
}
|
||||
push(@row,$bill_subs->{uuid}) unless grep { 'uuid' eq $_; } @{NGCP::BulkProcessor::Projects::ETL::CDR::Dao::Tabular::get_fieldnames()};
|
||||
if ($context->{upsert}) {
|
||||
push(@row,$bill_subs->{uuid});
|
||||
} else {
|
||||
push(@row,$NGCP::BulkProcessor::Projects::ETL::CDR::Dao::Tabular::added_delta);
|
||||
}
|
||||
|
||||
push(@rows,\@row);
|
||||
}
|
||||
|
||||
return @rows;
|
||||
|
||||
}
|
||||
|
||||
sub _load_contract {
|
||||
|
||||
my ($context,$record) = @_;
|
||||
$context->{contract} = run_dao_method('billing::contracts::findby_id', $record->{id}, { %$load_recursive,
|
||||
#'contracts.voip_subscribers.domain' => 1,
|
||||
_context => _get_closure_context($context),
|
||||
});
|
||||
|
||||
return 1 if $context->{contract};
|
||||
return 0;
|
||||
|
||||
}
|
||||
|
||||
sub _get_closure_context {
|
||||
my $context = shift;
|
||||
return {
|
||||
_info => \&_info,
|
||||
_error => \&_error,
|
||||
_debug => \&_debug,
|
||||
_warn => \&_warn,
|
||||
context => $context,
|
||||
};
|
||||
}
|
||||
|
||||
sub _closure {
|
||||
my ($sub,$context) = @_;
|
||||
return sub {
|
||||
foreach my $key (keys %$context) {
|
||||
no strict "refs"; ## no critic (ProhibitNoStrict)
|
||||
*{"main::$key"} = $context->{$key} if 'CODE' eq ref $context->{$key};
|
||||
}
|
||||
return $sub->(@_,$context);
|
||||
};
|
||||
}
|
||||
|
||||
sub _error {
|
||||
|
||||
my ($context,$message) = @_;
|
||||
$context->{error_count} = $context->{error_count} + 1;
|
||||
rowprocessingerror($context->{tid} // threadid(),$message,getlogger(__PACKAGE__));
|
||||
|
||||
}
|
||||
|
||||
sub _warn {
|
||||
|
||||
my ($context,$message) = @_;
|
||||
$context->{warning_count} = $context->{warning_count} + 1;
|
||||
rowprocessingwarn($context->{tid} // threadid(),$message,getlogger(__PACKAGE__));
|
||||
|
||||
}
|
||||
|
||||
sub _info {
|
||||
|
||||
my ($context,$message,$debug) = @_;
|
||||
if ($debug) {
|
||||
processing_debug($context->{tid} // threadid(),$message,getlogger(__PACKAGE__));
|
||||
} else {
|
||||
processing_info($context->{tid} // threadid(),$message,getlogger(__PACKAGE__));
|
||||
}
|
||||
}
|
||||
|
||||
sub _debug {
|
||||
|
||||
my ($context,$message,$debug) = @_;
|
||||
processing_debug($context->{tid} // threadid(),$message,getlogger(__PACKAGE__));
|
||||
|
||||
}
|
||||
|
||||
1;
|
@ -1,120 +0,0 @@
|
||||
package NGCP::BulkProcessor::Projects::ETL::CDR::ProjectConnectorPool;
|
||||
use strict;
|
||||
|
||||
## no critic
|
||||
|
||||
use File::Basename;
|
||||
use Cwd;
|
||||
use lib Cwd::abs_path(File::Basename::dirname(__FILE__) . '/../../../');
|
||||
|
||||
use NGCP::BulkProcessor::Projects::ETL::CDR::Settings qw(
|
||||
$csv_dir
|
||||
$sqlite_db_file
|
||||
);
|
||||
|
||||
use NGCP::BulkProcessor::ConnectorPool qw(
|
||||
get_connectorinstancename
|
||||
);
|
||||
|
||||
use NGCP::BulkProcessor::SqlConnectors::CSVDB qw();
|
||||
use NGCP::BulkProcessor::SqlConnectors::SQLiteDB qw($staticdbfilemode);
|
||||
|
||||
use NGCP::BulkProcessor::SqlProcessor qw(cleartableinfo);
|
||||
|
||||
require Exporter;
|
||||
our @ISA = qw(Exporter);
|
||||
our @EXPORT_OK = qw(
|
||||
|
||||
get_sqlite_db
|
||||
sqlite_db_tableidentifier
|
||||
|
||||
get_csv_db
|
||||
csv_db_tableidentifier
|
||||
|
||||
destroy_dbs
|
||||
destroy_all_dbs
|
||||
ping_all_dbs
|
||||
|
||||
);
|
||||
|
||||
my $sqlite_dbs = {};
|
||||
my $csv_dbs = {};
|
||||
|
||||
sub get_sqlite_db {
|
||||
|
||||
my ($instance_name,$reconnect) = @_;
|
||||
my $name = get_connectorinstancename($instance_name);
|
||||
|
||||
if (not defined $sqlite_dbs->{$name}) {
|
||||
$sqlite_dbs->{$name} = NGCP::BulkProcessor::SqlConnectors::SQLiteDB->new($instance_name);
|
||||
if (not defined $reconnect) {
|
||||
$reconnect = 1;
|
||||
}
|
||||
}
|
||||
if ($reconnect) {
|
||||
$sqlite_dbs->{$name}->db_connect($staticdbfilemode,$sqlite_db_file);
|
||||
}
|
||||
|
||||
return $sqlite_dbs->{$name};
|
||||
|
||||
}
|
||||
|
||||
sub sqlite_db_tableidentifier {
|
||||
|
||||
my ($get_target_db,$tablename) = @_;
|
||||
my $target_db = (ref $get_target_db eq 'CODE') ? &$get_target_db() : $get_target_db;
|
||||
return $target_db->getsafetablename(NGCP::BulkProcessor::SqlConnectors::SQLiteDB::get_tableidentifier($tablename,$staticdbfilemode,$sqlite_db_file));
|
||||
|
||||
}
|
||||
|
||||
sub get_csv_db {
|
||||
|
||||
my ($instance_name,$reconnect) = @_;
|
||||
my $name = get_connectorinstancename($instance_name);
|
||||
if (not defined $csv_dbs->{$name}) {
|
||||
$csv_dbs->{$name} = NGCP::BulkProcessor::SqlConnectors::CSVDB->new($instance_name);
|
||||
if (not defined $reconnect) {
|
||||
$reconnect = 1;
|
||||
}
|
||||
}
|
||||
if ($reconnect) {
|
||||
$csv_dbs->{$name}->db_connect($csv_dir);
|
||||
}
|
||||
return $csv_dbs->{$name};
|
||||
|
||||
}
|
||||
|
||||
sub csv_db_tableidentifier {
|
||||
|
||||
my ($get_target_db,$tablename) = @_;
|
||||
my $target_db = (ref $get_target_db eq 'CODE') ? &$get_target_db() : $get_target_db;
|
||||
return $target_db->getsafetablename(NGCP::BulkProcessor::SqlConnectors::CSVDB::get_tableidentifier($tablename,$csv_dir));
|
||||
|
||||
}
|
||||
|
||||
sub destroy_dbs {
|
||||
|
||||
foreach my $name (keys %$sqlite_dbs) {
|
||||
cleartableinfo($sqlite_dbs->{$name});
|
||||
undef $sqlite_dbs->{$name};
|
||||
delete $sqlite_dbs->{$name};
|
||||
}
|
||||
|
||||
foreach my $name (keys %$csv_dbs) {
|
||||
cleartableinfo($csv_dbs->{$name});
|
||||
undef $csv_dbs->{$name};
|
||||
delete $csv_dbs->{$name};
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
sub destroy_all_dbs() {
|
||||
destroy_dbs();
|
||||
NGCP::BulkProcessor::ConnectorPool::destroy_dbs();
|
||||
}
|
||||
|
||||
sub ping_all_dbs() {
|
||||
NGCP::BulkProcessor::ConnectorPool::ping_dbs();
|
||||
}
|
||||
|
||||
1;
|
@ -1,484 +0,0 @@
|
||||
package NGCP::BulkProcessor::Projects::ETL::CDR::Settings;
|
||||
use strict;
|
||||
|
||||
## no critic
|
||||
|
||||
use threads::shared qw();
|
||||
|
||||
use File::Basename qw(fileparse);
|
||||
use NGCP::BulkProcessor::Serialization qw();
|
||||
use DateTime::TimeZone qw();
|
||||
|
||||
use JSON -support_by_pp, -no_export;
|
||||
*NGCP::BulkProcessor::Serialization::serialize_json = sub {
|
||||
my $input_ref = shift;
|
||||
return JSON::to_json($input_ref, { allow_nonref => 1, allow_blessed => 1, convert_blessed => 1, pretty => 1, as_nonblessed => 1 });
|
||||
};
|
||||
|
||||
use NGCP::BulkProcessor::Globals qw(
|
||||
$working_path
|
||||
$enablemultithreading
|
||||
$cpucount
|
||||
create_path
|
||||
);
|
||||
|
||||
use NGCP::BulkProcessor::Logging qw(
|
||||
getlogger
|
||||
scriptinfo
|
||||
configurationinfo
|
||||
);
|
||||
|
||||
use NGCP::BulkProcessor::LogError qw(
|
||||
fileerror
|
||||
filewarn
|
||||
configurationwarn
|
||||
configurationerror
|
||||
);
|
||||
|
||||
use NGCP::BulkProcessor::LoadConfig qw(
|
||||
split_tuple
|
||||
parse_regexp
|
||||
);
|
||||
|
||||
use NGCP::BulkProcessor::Utils qw(prompt timestampdigits threadid load_module);
|
||||
|
||||
use NGCP::BulkProcessor::Array qw(contains);
|
||||
|
||||
require Exporter;
|
||||
our @ISA = qw(Exporter);
|
||||
our @EXPORT_OK = qw(
|
||||
update_settings
|
||||
run_dao_method
|
||||
get_dao_var
|
||||
get_export_filename
|
||||
write_export_file
|
||||
write_sql_file
|
||||
|
||||
update_load_recursive
|
||||
$load_yml
|
||||
$load_recursive
|
||||
|
||||
update_tabular_fields
|
||||
$tabular_yml
|
||||
$tabular_fields
|
||||
$ignore_tabular_unique
|
||||
$tabular_single_row_txn
|
||||
$graph_yml
|
||||
$graph_fields
|
||||
$graph_fields_mode
|
||||
update_graph_fields
|
||||
|
||||
$sqlite_db_file
|
||||
$csv_dir
|
||||
|
||||
check_dry
|
||||
|
||||
$output_path
|
||||
$input_path
|
||||
|
||||
$cdr_export_filename_format
|
||||
$cdr_import_filename
|
||||
|
||||
$defaultsettings
|
||||
$defaultconfig
|
||||
|
||||
$dry
|
||||
$skip_errors
|
||||
$force
|
||||
|
||||
$export_cdr_multithreading
|
||||
$export_cdr_numofthreads
|
||||
$export_cdr_blocksize
|
||||
|
||||
$csv_all_expected_fields
|
||||
);
|
||||
#$cf_default_priority
|
||||
#$cf_default_timeout
|
||||
#$cft_default_ringtimeout
|
||||
|
||||
our $defaultconfig = 'config.cfg';
|
||||
our $defaultsettings = 'settings.cfg';
|
||||
|
||||
our $tabular_yml = 'tabular.yml';
|
||||
our $tabular_fields = [];
|
||||
our $ignore_tabular_unique = 0;
|
||||
our $tabular_single_row_txn = 1;
|
||||
|
||||
our $graph_yml = 'graph.yml';
|
||||
our $graph_fields = [];
|
||||
our $graph_fields_mode = 'whitelist';
|
||||
my @graph_fields_modes = qw(whitelist blacklist);
|
||||
|
||||
our $load_yml = 'load.yml';
|
||||
our $load_recursive;
|
||||
|
||||
our $output_path = $working_path . 'output/';
|
||||
our $input_path = $working_path . 'input/';
|
||||
our $csv_dir = 'cdr';
|
||||
|
||||
our $cdr_export_filename_format = undef;
|
||||
|
||||
our $csv_all_expected_fields = 1;
|
||||
|
||||
#our $cdr_import_filename = undef;
|
||||
#our $cdr_import_numofthreads = $cpucount;
|
||||
#our $cdr_import_multithreading = 1;
|
||||
#our $cdr_reseller_name = 'default';
|
||||
#our $cdr_billing_profile_name = 'Default Billing Profile';
|
||||
#our $cdr_domain = undef;
|
||||
#our $cdr_contact_email_format = '%s@example.org';
|
||||
#our $subscriber_contact_email_format = '%s@example.org';
|
||||
#our $split_cdrs = 0;
|
||||
|
||||
#our $subscriber_timezone = undef;
|
||||
#our $contract_timezone = undef;
|
||||
|
||||
#our $subscriber_profile_set_name = undef;
|
||||
#our $subscriber_profile_name = undef;
|
||||
#our $webusername_format = '%1$s';
|
||||
#our $subscriber_externalid_format = undef;
|
||||
|
||||
our $force = 0;
|
||||
our $dry = 0;
|
||||
our $skip_errors = 0;
|
||||
|
||||
my $mr = 'Trunk';
|
||||
my @supported_mr = ('Trunk');
|
||||
|
||||
our $sqlite_db_file = 'sqlite';
|
||||
|
||||
our $export_cdr_multithreading = $enablemultithreading;
|
||||
our $export_cdr_numofthreads = $cpucount;
|
||||
our $export_cdr_blocksize = 1000;
|
||||
|
||||
#our $cf_default_priority = 1;
|
||||
#our $cf_default_timeout = 300;
|
||||
#our $cft_default_ringtimeout = 20;
|
||||
|
||||
#our $rollback_sql_export_filename_format = undef;
|
||||
#our $rollback_sql_stmt_format = undef;
|
||||
|
||||
my $file_lock :shared = undef;
|
||||
|
||||
sub update_settings {
|
||||
|
||||
my ($data,$configfile) = @_;
|
||||
|
||||
if (defined $data) {
|
||||
|
||||
my $result = 1;
|
||||
my $regexp_result;
|
||||
|
||||
#&$configurationinfocode("testinfomessage",$configlogger);
|
||||
|
||||
$result &= _prepare_working_paths(1);
|
||||
|
||||
$cdr_export_filename_format = $data->{cdr_export_filename} if exists $data->{cdr_export_filename};
|
||||
get_export_filename($data->{cdr_export_filename},$configfile);
|
||||
|
||||
#$rollback_sql_export_filename_format = $data->{rollback_sql_export_filename_format} if exists $data->{rollback_sql_export_filename_format};
|
||||
#get_export_filename($data->{rollback_sql_export_filename_format},$configfile);
|
||||
#$rollback_sql_stmt_format = $data->{rollback_sql_stmt_format} if exists $data->{rollback_sql_stmt_format};
|
||||
|
||||
$sqlite_db_file = $data->{sqlite_db_file} if exists $data->{sqlite_db_file};
|
||||
$csv_dir = $data->{csv_dir} if exists $data->{csv_dir};
|
||||
|
||||
#$cdr_import_filename = _get_import_filename($cdr_import_filename,$data,'cdr_import_filename');
|
||||
#$cdr_import_multithreading = $data->{cdr_import_multithreading} if exists $data->{cdr_import_multithreading};
|
||||
#$cdr_import_numofthreads = _get_numofthreads($cpucount,$data,'cdr_import_numofthreads');
|
||||
#$cdr_reseller_name = $data->{cdr_reseller_name} if exists $data->{cdr_reseller_name};
|
||||
#$cdr_billing_profile_name = $data->{cdr_billing_profile_name} if exists $data->{cdr_billing_profile_name};
|
||||
#$cdr_domain = $data->{cdr_domain} if exists $data->{cdr_domain};
|
||||
#$cdr_contact_email_format = $data->{cdr_contact_email_format} if exists $data->{cdr_contact_email_format};
|
||||
#$subscriber_contact_email_format = $data->{subscriber_contact_email_format} if exists $data->{subscriber_contact_email_format};
|
||||
#$split_cdrs = $data->{split_cdrs} if exists $data->{split_cdrs};
|
||||
|
||||
#$contract_timezone = $data->{cdr_timezone} if exists $data->{cdr_timezone};
|
||||
#if ($contract_timezone and not DateTime::TimeZone->is_valid_name($contract_timezone)) {
|
||||
# configurationerror($configfile,"invalid cdr_timezone '$contract_timezone'");
|
||||
# $result = 0;
|
||||
#}
|
||||
|
||||
#$subscriber_timezone = $data->{subscriber_timezone} if exists $data->{subscriber_timezone};
|
||||
#if ($subscriber_timezone and not DateTime::TimeZone->is_valid_name($subscriber_timezone)) {
|
||||
# configurationerror($configfile,"invalid subscriber_timezone '$subscriber_timezone'");
|
||||
# $result = 0;
|
||||
#}
|
||||
|
||||
#$subscriber_profile_set_name = $data->{subscriber_profile_set_name} if exists $data->{subscriber_profile_set_name};
|
||||
#$subscriber_profile_name = $data->{subscriber_profile_name} if exists $data->{subscriber_profile_name};
|
||||
#if ($subscriber_profile_set_name and not $subscriber_profile_name
|
||||
# or not $subscriber_profile_set_name and $subscriber_profile_name) {
|
||||
# configurationerror($configfile,"both subscriber_profile_set_name and subscriber_profile_name required");
|
||||
# $result = 0;
|
||||
#}
|
||||
#$webusername_format = $data->{webusername_format} if exists $data->{webusername_format};
|
||||
#$subscriber_externalid_format = $data->{subscriber_externalid_format} if exists $data->{subscriber_externalid_format};
|
||||
|
||||
$dry = $data->{dry} if exists $data->{dry};
|
||||
$skip_errors = $data->{skip_errors} if exists $data->{skip_errors};
|
||||
|
||||
$export_cdr_multithreading = $data->{export_cdr_multithreading} if exists $data->{export_cdr_multithreading};
|
||||
$export_cdr_numofthreads = _get_numofthreads($cpucount,$data,'export_cdr_numofthreads');
|
||||
$export_cdr_blocksize = $data->{export_cdr_blocksize} if exists $data->{export_cdr_blocksize};
|
||||
|
||||
$tabular_yml = $data->{tabular_yml} if exists $data->{tabular_yml};
|
||||
$graph_yml = $data->{graph_yml} if exists $data->{graph_yml};
|
||||
$graph_fields_mode = $data->{graph_fields_mode} if exists $data->{graph_fields_mode};
|
||||
if (not $graph_fields_mode or not contains($graph_fields_mode,\@graph_fields_modes)) {
|
||||
configurationerror($configfile,'graph_fields_mode must be one of ' . join(', ', @graph_fields_modes));
|
||||
$result = 0;
|
||||
}
|
||||
$load_yml = $data->{load_yml} if exists $data->{load_yml};
|
||||
$tabular_single_row_txn = $data->{tabular_single_row_txn} if exists $data->{tabular_single_row_txn};
|
||||
$ignore_tabular_unique = $data->{ignore_tabular_unique} if exists $data->{ignore_tabular_unique};
|
||||
|
||||
#$cf_default_priority = $data->{cf_default_priority} if exists $data->{cf_default_priority};
|
||||
#$cf_default_timeout = $data->{cf_default_timeout} if exists $data->{cf_default_timeout};
|
||||
#$cft_default_ringtimeout = $data->{cft_default_ringtimeout} if exists $data->{cft_default_ringtimeout};
|
||||
|
||||
$csv_all_expected_fields = $data->{csv_all_expected_fields} if exists $data->{csv_all_expected_fields};
|
||||
|
||||
$mr = $data->{schema_version};
|
||||
if (not defined $mr or not contains($mr,\@supported_mr)) {
|
||||
configurationerror($configfile,'schema_version must be one of ' . join(', ', @supported_mr));
|
||||
$result = 0;
|
||||
}
|
||||
|
||||
return $result;
|
||||
}
|
||||
return 0;
|
||||
|
||||
}
|
||||
|
||||
sub run_dao_method {
|
||||
my $method_name = 'NGCP::BulkProcessor::Dao::' . $mr . '::' . shift;
|
||||
load_module($method_name);
|
||||
no strict 'refs';
|
||||
return $method_name->(@_);
|
||||
}
|
||||
|
||||
sub get_dao_var {
|
||||
my $var_name = 'NGCP::BulkProcessor::Dao::' . $mr . '::' . shift;
|
||||
load_module($var_name);
|
||||
no strict 'refs';
|
||||
return @{$var_name} if wantarray;
|
||||
return ${$var_name};
|
||||
}
|
||||
|
||||
sub _prepare_working_paths {
|
||||
|
||||
my ($create) = @_;
|
||||
my $result = 1;
|
||||
my $path_result;
|
||||
|
||||
($path_result,$input_path) = create_path($working_path . 'input',$input_path,$create,\&fileerror,getlogger(__PACKAGE__));
|
||||
$result &= $path_result;
|
||||
($path_result,$output_path) = create_path($working_path . 'output',$output_path,$create,\&fileerror,getlogger(__PACKAGE__));
|
||||
$result &= $path_result;
|
||||
|
||||
return $result;
|
||||
|
||||
}
|
||||
|
||||
sub _get_numofthreads {
|
||||
my ($default_value,$data,$key) = @_;
|
||||
my $numofthreads = $default_value;
|
||||
$numofthreads = $data->{$key} if exists $data->{$key};
|
||||
$numofthreads = $cpucount if $numofthreads > $cpucount;
|
||||
return $numofthreads;
|
||||
}
|
||||
|
||||
sub get_export_filename {
|
||||
my ($filename_format,$configfile) = @_;
|
||||
my $export_filename;
|
||||
my $export_format;
|
||||
if ($filename_format) {
|
||||
$export_filename = sprintf($filename_format,timestampdigits(),threadid());
|
||||
unless ($export_filename =~ /^\//) {
|
||||
$export_filename = $output_path . $export_filename;
|
||||
}
|
||||
if (-e $export_filename and (unlink $export_filename) == 0) {
|
||||
filewarn('cannot remove ' . $export_filename . ': ' . $!,getlogger(__PACKAGE__));
|
||||
$export_filename = undef;
|
||||
}
|
||||
my ($name,$path,$suffix) = fileparse($export_filename,".json",".yml",".yaml",".xml",".php",".pl",".db",".csv");
|
||||
if ($suffix eq '.json') {
|
||||
$export_format = $NGCP::BulkProcessor::Serialization::format_json;
|
||||
} elsif ($suffix eq '.yml' or $suffix eq '.yaml') {
|
||||
$export_format = $NGCP::BulkProcessor::Serialization::format_yaml;
|
||||
} elsif ($suffix eq '.xml') {
|
||||
$export_format = $NGCP::BulkProcessor::Serialization::format_xml;
|
||||
} elsif ($suffix eq '.php') {
|
||||
$export_format = $NGCP::BulkProcessor::Serialization::format_php;
|
||||
} elsif ($suffix eq '.pl') {
|
||||
$export_format = $NGCP::BulkProcessor::Serialization::format_perl;
|
||||
} elsif ($suffix eq '.db') {
|
||||
$export_format = 'sqlite';
|
||||
} elsif ($suffix eq '.csv') {
|
||||
$export_format = 'csv';
|
||||
} else {
|
||||
configurationerror($configfile,"$filename_format: either .json/.yaml/.xml/.php/.pl or .db/.csv export file format required");
|
||||
}
|
||||
}
|
||||
return ($export_filename,$export_format);
|
||||
}
|
||||
|
||||
sub write_export_file {
|
||||
|
||||
my ($data,$export_filename,$export_format) = @_;
|
||||
if (defined $export_filename) {
|
||||
fileerror("invalid extension for output filename $export_filename",getlogger(__PACKAGE__))
|
||||
unless contains($export_format,\@NGCP::BulkProcessor::Serialization::formats);
|
||||
# "concatenated json" https://en.wikipedia.org/wiki/JSON_streaming
|
||||
my $str = '';
|
||||
if (ref $data eq 'ARRAY') {
|
||||
foreach my $obj (@$data) {
|
||||
#$str .= "\n" if length($str);
|
||||
$str .= NGCP::BulkProcessor::Serialization::serialize($obj,$export_format);
|
||||
}
|
||||
} else {
|
||||
$str = NGCP::BulkProcessor::Serialization::serialize($data,$export_format);
|
||||
}
|
||||
_write_file($str,$export_filename);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
sub write_sql_file {
|
||||
|
||||
my ($data,$export_filename,$stmt_format) = @_;
|
||||
if (defined $export_filename and $stmt_format) {
|
||||
my $str = '';
|
||||
if (ref $data eq 'ARRAY') {
|
||||
foreach my $obj (@$data) {
|
||||
$str .= "\n" if length($str);
|
||||
if (ref $obj eq 'ARRAY') {
|
||||
$str .= sprintf($stmt_format,@$obj);
|
||||
} else {
|
||||
$str .= sprintf($stmt_format,$str);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
$str = sprintf($stmt_format,$data);
|
||||
}
|
||||
$str .= "\n";
|
||||
_write_file($str,$export_filename);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
sub _write_file {
|
||||
|
||||
my ($str,$export_filename) = @_;
|
||||
if (defined $export_filename) {
|
||||
lock $file_lock;
|
||||
open(my $fh, '>>', $export_filename) or fileerror('cannot open file ' . $export_filename . ': ' . $!,getlogger(__PACKAGE__));
|
||||
binmode($fh);
|
||||
print $fh $str;
|
||||
close $fh;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
sub update_tabular_fields {
|
||||
|
||||
my ($data,$configfile) = @_;
|
||||
|
||||
if (defined $data) {
|
||||
|
||||
my $result = 1;
|
||||
|
||||
eval {
|
||||
$tabular_fields = $data;
|
||||
};
|
||||
if ($@ or 'ARRAY' ne ref $tabular_fields) {
|
||||
$tabular_fields //= [];
|
||||
configurationerror($configfile,'invalid tabular fields',getlogger(__PACKAGE__));
|
||||
$result = 0;
|
||||
}
|
||||
|
||||
return $result;
|
||||
}
|
||||
return 0;
|
||||
|
||||
}
|
||||
|
||||
sub update_graph_fields {
|
||||
|
||||
my ($data,$configfile) = @_;
|
||||
|
||||
if (defined $data) {
|
||||
|
||||
my $result = 1;
|
||||
|
||||
eval {
|
||||
$graph_fields = $data;
|
||||
};
|
||||
if ($@ or 'ARRAY' ne ref $graph_fields) {
|
||||
$graph_fields //= [];
|
||||
configurationerror($configfile,'invalid graph fields',getlogger(__PACKAGE__));
|
||||
$result = 0;
|
||||
}
|
||||
|
||||
return $result;
|
||||
}
|
||||
return 0;
|
||||
|
||||
}
|
||||
|
||||
sub update_load_recursive {
|
||||
|
||||
my ($data,$configfile) = @_;
|
||||
|
||||
if (defined $data) {
|
||||
|
||||
my $result = 1;
|
||||
|
||||
eval {
|
||||
$load_recursive = $data;
|
||||
};
|
||||
if ($@ or 'HASH' ne ref $load_recursive) {
|
||||
undef $load_recursive;
|
||||
configurationerror($configfile,'invalid load recursive def',getlogger(__PACKAGE__));
|
||||
$result = 0;
|
||||
}
|
||||
|
||||
return $result;
|
||||
}
|
||||
return 0;
|
||||
|
||||
}
|
||||
|
||||
sub _get_import_filename {
|
||||
my ($old_value,$data,$key) = @_;
|
||||
my $import_filename = $old_value;
|
||||
$import_filename = $data->{$key} if exists $data->{$key};
|
||||
if (defined $import_filename and length($import_filename) > 0) {
|
||||
$import_filename = $input_path . $import_filename unless -e $import_filename;
|
||||
}
|
||||
return $import_filename;
|
||||
}
|
||||
|
||||
sub check_dry {
|
||||
|
||||
if ($dry) {
|
||||
scriptinfo('running in dry mode - NGCP databases will not be modified',getlogger(__PACKAGE__));
|
||||
return 1;
|
||||
} else {
|
||||
scriptinfo('NO DRY MODE - NGCP DATABASES WILL BE MODIFIED!',getlogger(__PACKAGE__));
|
||||
if (!$force) {
|
||||
if ('yes' eq lc(prompt("Type 'yes' to proceed: "))) {
|
||||
return 1;
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
} else {
|
||||
scriptinfo('force option applied',getlogger(__PACKAGE__));
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
1;
|
@ -1,61 +0,0 @@
|
||||
##general settings:
|
||||
working_path = /var/sipwise
|
||||
cpucount = 4
|
||||
enablemultithreading = 1
|
||||
|
||||
##gearman/service listener config:
|
||||
jobservers = 127.0.0.1:4730
|
||||
|
||||
##NGCP MySQL connectivity - "accounting" db:
|
||||
accounting_host = db01
|
||||
accounting_port = 3306
|
||||
accounting_databasename = accounting
|
||||
accounting_username = root
|
||||
accounting_password =
|
||||
|
||||
##NGCP MySQL connectivity - "billing" db:
|
||||
billing_host = db01
|
||||
billing_port = 3306
|
||||
billing_databasename = billing
|
||||
billing_username = root
|
||||
billing_password =
|
||||
|
||||
##NGCP MySQL connectivity - "provisioning" db:
|
||||
provisioning_host = db01
|
||||
provisioning_port = 3306
|
||||
provisioning_databasename = provisioning
|
||||
provisioning_username = root
|
||||
provisioning_password =
|
||||
|
||||
##NGCP MySQL connectivity - "kamailio" db:
|
||||
kamailio_host = db01
|
||||
kamailio_port = 3306
|
||||
kamailio_databasename = kamailio
|
||||
kamailio_username = root
|
||||
kamailio_password =
|
||||
|
||||
##NGCP MySQL connectivity - default db for distributed transactions (XA) to connect to:
|
||||
xa_host = db01
|
||||
xa_port = 3306
|
||||
xa_databasename = ngcp
|
||||
xa_username = root
|
||||
xa_password =
|
||||
|
||||
##NGCP REST-API connectivity:
|
||||
ngcprestapi_uri = https://127.0.0.1:1443
|
||||
ngcprestapi_username = administrator
|
||||
ngcprestapi_password = administrator
|
||||
ngcprestapi_realm = api_admin_http
|
||||
|
||||
##sending email:
|
||||
emailenable = 0
|
||||
erroremailrecipient =
|
||||
warnemailrecipient =
|
||||
completionemailrecipient = rkrenn@sipwise.com
|
||||
doneemailrecipient =
|
||||
|
||||
##logging:
|
||||
fileloglevel = INFO
|
||||
#DEBUG
|
||||
screenloglevel = INFO
|
||||
emailloglevel = OFF
|
@ -1,61 +0,0 @@
|
||||
##general settings:
|
||||
working_path = /home/rkrenn/temp/customer_exporter
|
||||
cpucount = 4
|
||||
enablemultithreading = 1
|
||||
|
||||
##gearman/service listener config:
|
||||
jobservers = 127.0.0.1:4730
|
||||
|
||||
##NGCP MySQL connectivity - "accounting" db:
|
||||
accounting_host = 192.168.0.178
|
||||
accounting_port = 3306
|
||||
accounting_databasename = accounting
|
||||
accounting_username = root
|
||||
accounting_password =
|
||||
|
||||
##NGCP MySQL connectivity - "billing" db:
|
||||
billing_host = 192.168.0.178
|
||||
billing_port = 3306
|
||||
billing_databasename = billing
|
||||
billing_username = root
|
||||
billing_password =
|
||||
|
||||
##NGCP MySQL connectivity - "provisioning" db:
|
||||
provisioning_host = 192.168.0.178
|
||||
provisioning_port = 3306
|
||||
provisioning_databasename = provisioning
|
||||
provisioning_username = root
|
||||
provisioning_password =
|
||||
|
||||
##NGCP MySQL connectivity - "kamailio" db:
|
||||
kamailio_host = 192.168.0.178
|
||||
kamailio_port = 3306
|
||||
kamailio_databasename = kamailio
|
||||
kamailio_username = root
|
||||
kamailio_password =
|
||||
|
||||
##NGCP MySQL connectivity - default db for distributed transactions (XA) to connect to:
|
||||
xa_host = 192.168.0.178
|
||||
xa_port = 3306
|
||||
xa_databasename = ngcp
|
||||
xa_username = root
|
||||
xa_password =
|
||||
|
||||
##NGCP REST-API connectivity:
|
||||
ngcprestapi_uri = https://127.0.0.1:1443
|
||||
ngcprestapi_username = administrator
|
||||
ngcprestapi_password = administrator
|
||||
ngcprestapi_realm = api_admin_http
|
||||
|
||||
##sending email:
|
||||
emailenable = 0
|
||||
erroremailrecipient =
|
||||
warnemailrecipient =
|
||||
completionemailrecipient = rkrenn@sipwise.com
|
||||
doneemailrecipient =
|
||||
|
||||
##logging:
|
||||
fileloglevel = INFO
|
||||
#DEBUG
|
||||
screenloglevel = INFO
|
||||
emailloglevel = OFF
|
@ -1,5 +0,0 @@
|
||||
# graph.yml: whitelist/blacklist of *contract* fields to export to .json/.yaml/.xml/...
|
||||
|
||||
- id
|
||||
- voip_subscribers*.provisioning_voip_subscriber.voip_usr_preferences*.attribute.attribute
|
||||
- voip_subscribers*.provisioning_voip_subscriber.voip_usr_preferences*.value
|
@ -1,44 +0,0 @@
|
||||
# load.yml: define which *contract* relations to fetch from db.
|
||||
|
||||
#contracts.voip_subscribers: 1
|
||||
contracts.voip_subscribers:
|
||||
include: !!perl/code |
|
||||
{
|
||||
my ($contract,$context) = @_;
|
||||
#return 0 if $contract->{status} eq 'terminated';
|
||||
return 1;
|
||||
}
|
||||
|
||||
filter: !!perl/code |
|
||||
{
|
||||
my ($bill_subs,$context) = @_;
|
||||
#_debug($context,"skipping terminated subscriber $bill_subs->{username}") if $bill_subs->{status} eq 'terminated';
|
||||
#return 0 if $bill_subs->{status} eq 'terminated';
|
||||
return 1;
|
||||
}
|
||||
|
||||
transform: !!perl/code |
|
||||
{
|
||||
my ($bill_subs,$context) = @_;
|
||||
return $bill_subs;
|
||||
}
|
||||
|
||||
contracts.contact: 1
|
||||
contracts.voip_subscribers.primary_number: 1
|
||||
contracts.voip_subscribers.provisioning_voip_subscriber: 1
|
||||
contracts.voip_subscribers.provisioning_voip_subscriber.voip_dbaliases: 1
|
||||
contracts.voip_subscribers.provisioning_voip_subscriber.voip_usr_preferences: 1
|
||||
contracts.voip_subscribers.provisioning_voip_subscriber.voip_usr_preferences.attribute: 1
|
||||
contracts.voip_subscribers.provisioning_voip_subscriber.voip_usr_preferences.allowed_ips: 1
|
||||
contracts.voip_subscribers.provisioning_voip_subscriber.voip_usr_preferences.ncos: 1
|
||||
contracts.voip_subscribers.provisioning_voip_subscriber.voip_usr_preferences.cf_mapping: 1
|
||||
contracts.voip_subscribers.provisioning_voip_subscriber.voip_usr_preferences.cf_mapping.destinations: 1
|
||||
contracts.voip_subscribers.provisioning_voip_subscriber.voicemail_users: 1
|
||||
#contracts.voip_subscribers.provisioning_voip_subscriber.voicemail_users.voicemail_spool: 1
|
||||
contracts.voip_subscribers.provisioning_voip_subscriber.voip_fax_preferences: 1
|
||||
contracts.voip_subscribers.provisioning_voip_subscriber.voip_fax_destinations:
|
||||
transform: !!perl/code |
|
||||
{
|
||||
my ($fax_destinations,$context) = @_;
|
||||
return [ map { $_->{destination} . ' (' . $_->{filetype} . ')'; } @$fax_destinations ];
|
||||
}
|
@ -1,319 +0,0 @@
|
||||
use strict;
|
||||
|
||||
## no critic
|
||||
|
||||
our $VERSION = "0.0";
|
||||
|
||||
use File::Basename;
|
||||
use Cwd;
|
||||
use lib Cwd::abs_path(File::Basename::dirname(__FILE__) . '/../../../../../');
|
||||
|
||||
use Getopt::Long qw(GetOptions);
|
||||
use Fcntl qw(LOCK_EX LOCK_NB);
|
||||
|
||||
use NGCP::BulkProcessor::Globals qw();
|
||||
use NGCP::BulkProcessor::Projects::ETL::CDR::Settings qw(
|
||||
update_settings
|
||||
update_tabular_fields
|
||||
update_graph_fields
|
||||
$tabular_yml
|
||||
$graph_yml
|
||||
|
||||
update_load_recursive
|
||||
get_export_filename
|
||||
$cdr_export_filename_format
|
||||
$load_yml
|
||||
|
||||
check_dry
|
||||
$output_path
|
||||
$defaultsettings
|
||||
$defaultconfig
|
||||
$dry
|
||||
$skip_errors
|
||||
$force
|
||||
);
|
||||
|
||||
use NGCP::BulkProcessor::Logging qw(
|
||||
init_log
|
||||
getlogger
|
||||
$attachmentlogfile
|
||||
scriptinfo
|
||||
cleanuplogfiles
|
||||
$currentlogfile
|
||||
);
|
||||
use NGCP::BulkProcessor::LogError qw (
|
||||
completion
|
||||
done
|
||||
scriptwarn
|
||||
scripterror
|
||||
filewarn
|
||||
fileerror
|
||||
);
|
||||
use NGCP::BulkProcessor::LoadConfig qw(
|
||||
load_config
|
||||
$SIMPLE_CONFIG_TYPE
|
||||
$YAML_CONFIG_TYPE
|
||||
$ANY_CONFIG_TYPE
|
||||
);
|
||||
use NGCP::BulkProcessor::Array qw(removeduplicates);
|
||||
use NGCP::BulkProcessor::Utils qw(getscriptpath prompt cleanupdir);
|
||||
use NGCP::BulkProcessor::Mail qw(
|
||||
cleanupmsgfiles
|
||||
);
|
||||
|
||||
use NGCP::BulkProcessor::SqlConnectors::CSVDB qw(cleanupcvsdirs);
|
||||
use NGCP::BulkProcessor::SqlConnectors::SQLiteDB qw(cleanupdbfiles);
|
||||
|
||||
use NGCP::BulkProcessor::Projects::ETL::CDR::ProjectConnectorPool qw(destroy_all_dbs get_csv_db get_sqlite_db);
|
||||
|
||||
use NGCP::BulkProcessor::Projects::ETL::CDR::Dao::Tabular qw();
|
||||
|
||||
use NGCP::BulkProcessor::Projects::ETL::CDR::ExportCDR qw(
|
||||
export_cdr_graph
|
||||
export_cdr_tabular
|
||||
);
|
||||
#use NGCP::BulkProcessor::Projects::ETL::Cdr::ImportCdr qw(
|
||||
# import_cdr_json
|
||||
#);
|
||||
|
||||
scripterror(getscriptpath() . ' already running',getlogger(getscriptpath())) unless flock DATA, LOCK_EX | LOCK_NB;
|
||||
|
||||
my @TASK_OPTS = ();
|
||||
|
||||
my $tasks = [];
|
||||
|
||||
my $cleanup_task_opt = 'cleanup';
|
||||
push(@TASK_OPTS,$cleanup_task_opt);
|
||||
|
||||
my $cleanup_all_task_opt = 'cleanup_all';
|
||||
push(@TASK_OPTS,$cleanup_all_task_opt);
|
||||
|
||||
my $export_cdr_graph_task_opt = 'export_cdr_graph';
|
||||
push(@TASK_OPTS,$export_cdr_graph_task_opt);
|
||||
|
||||
my $export_cdr_tabular_task_opt = 'export_cdr_tabular';
|
||||
push(@TASK_OPTS,$export_cdr_tabular_task_opt);
|
||||
|
||||
#my $import_cdr_json_task_opt = 'import_cdr_json';
|
||||
#push(@TASK_OPTS,$import_cdr_json_task_opt);
|
||||
|
||||
if (init()) {
|
||||
main();
|
||||
exit(0);
|
||||
} else {
|
||||
exit(1);
|
||||
}
|
||||
|
||||
sub init {
|
||||
|
||||
my $configfile = $defaultconfig;
|
||||
my $settingsfile = $defaultsettings;
|
||||
|
||||
return 0 unless GetOptions(
|
||||
"config=s" => \$configfile,
|
||||
"settings=s" => \$settingsfile,
|
||||
"task=s" => $tasks,
|
||||
"dry" => \$dry,
|
||||
"skip-errors" => \$skip_errors,
|
||||
"force" => \$force,
|
||||
);
|
||||
|
||||
$tasks = removeduplicates($tasks,1);
|
||||
|
||||
my $result = load_config($configfile);
|
||||
init_log();
|
||||
$result &= load_config($settingsfile,\&update_settings,$SIMPLE_CONFIG_TYPE);
|
||||
$result &= load_config($tabular_yml,\&update_tabular_fields,$YAML_CONFIG_TYPE);
|
||||
$result &= load_config($graph_yml,\&update_graph_fields,$YAML_CONFIG_TYPE);
|
||||
$result &= load_config($load_yml,\&update_load_recursive,$YAML_CONFIG_TYPE);
|
||||
return $result;
|
||||
|
||||
}
|
||||
|
||||
sub main() {
|
||||
|
||||
my @messages = ();
|
||||
my @attachmentfiles = ();
|
||||
my $result = 1;
|
||||
my $completion = 0;
|
||||
|
||||
if (defined $tasks and 'ARRAY' eq ref $tasks and (scalar @$tasks) > 0) {
|
||||
scriptinfo('skip-errors: processing won\'t stop upon errors',getlogger(__PACKAGE__)) if $skip_errors;
|
||||
foreach my $task (@$tasks) {
|
||||
|
||||
if (lc($cleanup_task_opt) eq lc($task)) {
|
||||
$result &= cleanup_task(\@messages,0) if taskinfo($cleanup_task_opt,$result);
|
||||
} elsif (lc($cleanup_all_task_opt) eq lc($task)) {
|
||||
$result &= cleanup_task(\@messages,1) if taskinfo($cleanup_all_task_opt,$result);
|
||||
|
||||
} elsif (lc($export_cdr_graph_task_opt) eq lc($task)) {
|
||||
$result &= export_cdr_graph_task(\@messages) if taskinfo($export_cdr_graph_task_opt,$result);
|
||||
$completion |= 1;
|
||||
} elsif (lc($export_cdr_tabular_task_opt) eq lc($task)) {
|
||||
$result &= export_cdr_tabular_task(\@messages) if taskinfo($export_cdr_tabular_task_opt,$result);
|
||||
$completion |= 1;
|
||||
#} elsif (lc($import_cdr_json_task_opt) eq lc($task)) {
|
||||
# if (taskinfo($import_cdr_json_task_opt,$result,1)) {
|
||||
# next unless check_dry();
|
||||
# $result &= import_cdr_json_task(\@messages);
|
||||
# $completion |= 1;
|
||||
# }
|
||||
|
||||
} else {
|
||||
$result = 0;
|
||||
scripterror("unknown task option '" . $task . "', must be one of " . join(', ',@TASK_OPTS),getlogger(getscriptpath()));
|
||||
last;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
$result = 0;
|
||||
scripterror('at least one task option is required. supported tasks: ' . join(', ',@TASK_OPTS),getlogger(getscriptpath()));
|
||||
}
|
||||
|
||||
push(@attachmentfiles,$attachmentlogfile);
|
||||
if ($completion) {
|
||||
completion(join("\n\n",@messages),\@attachmentfiles,getlogger(getscriptpath()));
|
||||
} else {
|
||||
done(join("\n\n",@messages),\@attachmentfiles,getlogger(getscriptpath()));
|
||||
}
|
||||
|
||||
return $result;
|
||||
}
|
||||
|
||||
sub taskinfo {
|
||||
my ($task,$result) = @_;
|
||||
scriptinfo($result ? "starting task: '$task'" : "skipping task '$task' due to previous problems",getlogger(getscriptpath()));
|
||||
return $result;
|
||||
}
|
||||
|
||||
sub cleanup_task {
|
||||
my ($messages,$clean_generated) = @_;
|
||||
my $result = 0;
|
||||
if (!$clean_generated or $force or 'yes' eq lc(prompt("Type 'yes' to proceed: "))) {
|
||||
eval {
|
||||
cleanupcvsdirs();
|
||||
cleanupdbfiles();
|
||||
cleanuplogfiles(\&fileerror,\&filewarn,($currentlogfile,$attachmentlogfile));
|
||||
cleanupmsgfiles(\&fileerror,\&filewarn);
|
||||
cleanupdir($output_path,1,\&filewarn,getlogger(getscriptpath())) if $clean_generated;
|
||||
$result = 1;
|
||||
};
|
||||
}
|
||||
if ($@ or !$result) {
|
||||
push(@$messages,'working directory cleanup INCOMPLETE');
|
||||
return 0;
|
||||
} else {
|
||||
push(@$messages,'working directory folders cleaned up');
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
sub export_cdr_graph_task {
|
||||
|
||||
my ($messages) = @_;
|
||||
my ($result,$warning_count) = (0,0);
|
||||
eval {
|
||||
($result,$warning_count) = export_cdr_graph();
|
||||
};
|
||||
my $err = $@;
|
||||
my $stats = ": $warning_count warnings";
|
||||
eval {
|
||||
#$stats .= "\n total mta subscriber records: " .
|
||||
# NGCP::BulkProcessor::Projects::Migration::UPCAT::Dao::import::MtaSubscriber::countby_ccacsn() . ' rows';
|
||||
#my $added_count = NGCP::BulkProcessor::Projects::Migration::UPCAT::Dao::import::MtaSubscriber::countby_delta(
|
||||
# $NGCP::BulkProcessor::Projects::Migration::UPCAT::Dao::import::MtaSubscriber::added_delta
|
||||
#);
|
||||
#$stats .= "\n new: $added_count rows";
|
||||
#my $existing_count = NGCP::BulkProcessor::Projects::Migration::UPCAT::Dao::import::MtaSubscriber::countby_delta(
|
||||
# $NGCP::BulkProcessor::Projects::Migration::UPCAT::Dao::import::MtaSubscriber::updated_delta
|
||||
#);
|
||||
#$stats .= "\n existing: $existing_count rows";
|
||||
#my $deleted_count = NGCP::BulkProcessor::Projects::Migration::UPCAT::Dao::import::MtaSubscriber::countby_delta(
|
||||
# $NGCP::BulkProcessor::Projects::Migration::UPCAT::Dao::import::MtaSubscriber::deleted_delta
|
||||
#);
|
||||
#$stats .= "\n removed: $deleted_count rows";
|
||||
};
|
||||
if ($err or !$result) {
|
||||
push(@$messages,"exporting cdr (graph) INCOMPLETE$stats");
|
||||
} else {
|
||||
push(@$messages,"exporting cdr (graph) completed$stats");
|
||||
}
|
||||
destroy_all_dbs();
|
||||
return $result;
|
||||
|
||||
}
|
||||
|
||||
sub export_cdr_tabular_task {
|
||||
|
||||
my ($messages) = @_;
|
||||
my ($result,$warning_count) = (0,0);
|
||||
eval {
|
||||
($result,$warning_count) = export_cdr_tabular();
|
||||
};
|
||||
my $err = $@;
|
||||
my $stats = ": $warning_count warnings";
|
||||
eval {
|
||||
$stats .= "\n total subscriber records: " .
|
||||
NGCP::BulkProcessor::Projects::ETL::CDR::Dao::Tabular::countby_delta() . ' rows';
|
||||
my $added_count = NGCP::BulkProcessor::Projects::ETL::CDR::Dao::Tabular::countby_delta(
|
||||
$NGCP::BulkProcessor::Projects::ETL::CDR::Dao::Tabular::added_delta
|
||||
);
|
||||
$stats .= "\n new: $added_count rows";
|
||||
my $existing_count = NGCP::BulkProcessor::Projects::ETL::CDR::Dao::Tabular::countby_delta(
|
||||
$NGCP::BulkProcessor::Projects::ETL::CDR::Dao::Tabular::updated_delta
|
||||
);
|
||||
$stats .= "\n existing: $existing_count rows";
|
||||
my $deleted_count = NGCP::BulkProcessor::Projects::ETL::CDR::Dao::Tabular::countby_delta(
|
||||
$NGCP::BulkProcessor::Projects::ETL::CDR::Dao::Tabular::deleted_delta
|
||||
);
|
||||
$stats .= "\n removed: $deleted_count rows";
|
||||
my ($export_filename,$export_format) = get_export_filename($cdr_export_filename_format);
|
||||
if ('sqlite' eq $export_format) {
|
||||
&get_sqlite_db()->copydbfile($export_filename);
|
||||
} elsif ('csv' eq $export_format) {
|
||||
NGCP::BulkProcessor::Projects::ETL::CDR::Dao::Tabular::copy_table(\&get_csv_db);
|
||||
&get_csv_db()->copytablefile(NGCP::BulkProcessor::Projects::ETL::CDR::Dao::Tabular::gettablename(),$export_filename);
|
||||
} else {
|
||||
push(@$messages,'invalid extension for output filename $export_filename');
|
||||
}
|
||||
};
|
||||
if ($err or !$result) {
|
||||
push(@$messages,"exporting cdr (tabular) INCOMPLETE$stats");
|
||||
} else {
|
||||
push(@$messages,"exporting cdr (tabular) completed$stats");
|
||||
}
|
||||
destroy_all_dbs();
|
||||
return $result;
|
||||
|
||||
}
|
||||
|
||||
#sub import_cdr_json_task {
|
||||
#
|
||||
# my ($messages) = @_;
|
||||
# my ($result,$warning_count,$contract_read_count,$subscriber_read_count,$contract_created_count,$subscriber_created_count,$contract_failed_count,$subscriber_failed_count) = (0,0,0,0,0,0,0,0);
|
||||
# eval {
|
||||
# ($result,$warning_count,$contract_read_count,$subscriber_read_count,$contract_created_count,$subscriber_created_count,$contract_failed_count,$subscriber_failed_count) = import_cdr_json();
|
||||
# };
|
||||
# my $err = $@;
|
||||
# my $stats = ": $warning_count warnings";
|
||||
# eval {
|
||||
# $stats .= "\n contracts read: " . $contract_read_count;
|
||||
# $stats .= "\n contracts created: " . $contract_created_count;
|
||||
# $stats .= "\n contracts failed: " . $contract_failed_count;
|
||||
# $stats .= "\n subscribers read: " . $subscriber_read_count;
|
||||
# $stats .= "\n subscribers created: " . $subscriber_created_count;
|
||||
# $stats .= "\n subscribers failed: " . $subscriber_failed_count;
|
||||
# };
|
||||
# if ($err or !$result) {
|
||||
# push(@$messages,"importing cdr (json) INCOMPLETE$stats");
|
||||
# } else {
|
||||
# push(@$messages,"importing cdr (json) completed$stats");
|
||||
# }
|
||||
# destroy_all_dbs();
|
||||
# return $result;
|
||||
#
|
||||
#}
|
||||
|
||||
__DATA__
|
||||
This exists to allow the locking code at the beginning of the file to work.
|
||||
DO NOT REMOVE THESE LINES!
|
@ -1,58 +0,0 @@
|
||||
#dry=0
|
||||
#skip_errors=0
|
||||
|
||||
schema_version = Trunk
|
||||
|
||||
export_cdr_multithreading = 1
|
||||
export_cdr_numofthreads = 4
|
||||
export_cdr_blocksize = 1000
|
||||
|
||||
cdr_export_filename=cdr_%s.csv
|
||||
|
||||
load_yml = load.yml
|
||||
tabular_yml = tabular.yml
|
||||
graph_yml = graph.yml
|
||||
graph_fields_mode = whitelist
|
||||
|
||||
csv_all_expected_fields = 0
|
||||
|
||||
sqlite_db_file = sqlite
|
||||
csv_dir = cdr
|
||||
tabular_single_row_txn = 1
|
||||
ignore_tabular_unique = 0
|
||||
|
||||
#cdr_import_filename=cdr_20210216173615.json
|
||||
#split_cdr = 1
|
||||
#cdr_import_multithreading = 1
|
||||
#cdr_import_numofthreads = 4
|
||||
#cdr_reseller_name = default
|
||||
#cdr_billing_profile_name = Default Billing Profile
|
||||
#cdr_domain = test1610072315.example.org
|
||||
#cdr_contact_email_format = DN0%2$s%3$s@example.org
|
||||
#cdr_timezone = Europe/Vienna
|
||||
#subscriber_profile_set_name = subscriber_profile_1_set_65261
|
||||
#subscriber_profile_name = subscriber_profile_1_65261
|
||||
## sip username as webusername:
|
||||
##webusername_format = %1$s
|
||||
## webusername = cc+ac+sn:
|
||||
##webusername_format = %2$s%3$s%4$s
|
||||
## webusername = 0+ac+sn:
|
||||
#webusername_format = 0%3$s%4$s
|
||||
## sip username as external_id:
|
||||
##subscriber_externalid_format = %1$s
|
||||
## external_id = cc+ac+sn:
|
||||
##subscriber_externalid_format = %2$s%3$s%4$s
|
||||
## external_id = 0+ac+sn:
|
||||
#subscriber_externalid_format = 0%3$s%4$s
|
||||
## subscriber contact will be created, only if one of below is set.
|
||||
#subscriber_contact_email_format = DN0%2$s%3$s@domain.org
|
||||
#subscriber_timezone = Europe/Vienna
|
||||
|
||||
#cf_default_priority: 1
|
||||
#cf_default_timeout: 300
|
||||
#cft_default_ringtimeout: 20
|
||||
|
||||
##write sql files for legacy db to set/unset the is_external pref of migrated subscribers:
|
||||
#
|
||||
#rollback_sql_export_filename_format = delete_subscribers_%s.sql
|
||||
#rollback_sql_stmt_format = start transaction;call billing.remove_subscriber("%1$s",%2$s);commit;
|
@ -1,70 +0,0 @@
|
||||
# tabular.yml: define which *subscriber* columns to add tabular (.db/.csv) exports.
|
||||
|
||||
- path: contract.id
|
||||
transform: !!perl/code |
|
||||
{
|
||||
my ($id,$bill_subs) = @_;
|
||||
return $id;
|
||||
}
|
||||
|
||||
- path: primary_number.cc
|
||||
- path: primary_number.ac
|
||||
- path: primary_number.sn
|
||||
- path: provisioning_voip_subscriber.voicemail_users[0].attach
|
||||
- path: provisioning_voip_subscriber.voicemail_users[0].delete
|
||||
- path: provisioning_voip_subscriber.voicemail_users[0].email
|
||||
- path: provisioning_voip_subscriber.voicemail_users[0].password
|
||||
- path: provisioning_voip_subscriber.voip_usr_preferences.allowed_clis
|
||||
sep: ','
|
||||
field: 'value'
|
||||
- path: provisioning_voip_subscriber.voip_usr_preferences.allowed_ips_grp[0].allowed_ips
|
||||
sep: ','
|
||||
field: 'ipnet'
|
||||
- path: provisioning_voip_subscriber.voip_usr_preferences.block_out_list
|
||||
sep: ','
|
||||
field: 'value'
|
||||
- path: provisioning_voip_subscriber.voip_usr_preferences.block_out_mode[0].value
|
||||
- path: provisioning_voip_subscriber.voip_usr_preferences.block_in_list
|
||||
sep: ','
|
||||
field: 'value'
|
||||
- path: provisioning_voip_subscriber.voip_usr_preferences.block_in_mode[0].value
|
||||
- path: provisioning_voip_subscriber.voip_usr_preferences.adm_block_in_list
|
||||
sep: ','
|
||||
field: 'value'
|
||||
- path: provisioning_voip_subscriber.voip_usr_preferences.adm_block_in_mode[0].value
|
||||
- path: provisioning_voip_subscriber.voip_usr_preferences.adm_block_out_list
|
||||
sep: ','
|
||||
field: 'value'
|
||||
- path: provisioning_voip_subscriber.voip_usr_preferences.adm_block_out_mode[0].value
|
||||
- path: provisioning_voip_subscriber.voip_usr_preferences.ncos_id[0].ncos.level
|
||||
- path: provisioning_voip_subscriber.voip_usr_preferences.adm_ncos_id[0].ncos.level
|
||||
- path: provisioning_voip_subscriber.voip_usr_preferences.cfb[0].cf_mapping.destinations
|
||||
sep: ','
|
||||
field: 'destination'
|
||||
- path: provisioning_voip_subscriber.voip_usr_preferences.cfna[0].cf_mapping.destinations
|
||||
sep: ','
|
||||
field: 'destination'
|
||||
- path: provisioning_voip_subscriber.voip_usr_preferences.cfo[0].cf_mapping.destinations
|
||||
sep: ','
|
||||
field: 'destination'
|
||||
- path: provisioning_voip_subscriber.voip_usr_preferences.cfr[0].cf_mapping.destinations
|
||||
sep: ','
|
||||
field: 'destination'
|
||||
- path: provisioning_voip_subscriber.voip_usr_preferences.cfs[0].cf_mapping.destinations
|
||||
sep: ','
|
||||
field: 'destination'
|
||||
- path: provisioning_voip_subscriber.voip_usr_preferences.cft[0].cf_mapping.destinations
|
||||
sep: ','
|
||||
field: 'destination'
|
||||
- path: provisioning_voip_subscriber.voip_usr_preferences.cfu[0].cf_mapping.destinations
|
||||
sep: ','
|
||||
field: 'destination'
|
||||
- path: provisioning_voip_subscriber.voip_fax_preferences.active
|
||||
- path: provisioning_voip_subscriber.voip_fax_preferences.ecm
|
||||
- path: provisioning_voip_subscriber.voip_fax_preferences.name
|
||||
- path: provisioning_voip_subscriber.voip_fax_preferences.t38
|
||||
- path: provisioning_voip_subscriber.voip_fax_destinations
|
||||
sep: ','
|
||||
- path: provisioning_voip_subscriber.voip_usr_preferences.force_inbound_calls_to_peer[0].value
|
||||
- path: provisioning_voip_subscriber.voip_usr_preferences.lnp_for_local_sub[0].value
|
||||
|
@ -1,157 +0,0 @@
|
||||
package NGCP::BulkProcessor::Projects::ETL::EDR::Dao::PeriodEvents;
|
||||
use strict;
|
||||
|
||||
## no critic
|
||||
|
||||
use NGCP::BulkProcessor::Projects::ETL::EDR::ProjectConnectorPool qw(
|
||||
get_sqlite_db
|
||||
destroy_all_dbs
|
||||
);
|
||||
|
||||
use NGCP::BulkProcessor::SqlProcessor qw(
|
||||
registertableinfo
|
||||
create_targettable
|
||||
checktableinfo
|
||||
copy_row
|
||||
insert_stmt
|
||||
transfer_table
|
||||
);
|
||||
|
||||
use NGCP::BulkProcessor::SqlRecord qw();
|
||||
|
||||
require Exporter;
|
||||
our @ISA = qw(Exporter NGCP::BulkProcessor::SqlRecord);
|
||||
our @EXPORT_OK = qw(
|
||||
create_table
|
||||
gettablename
|
||||
check_table
|
||||
getinsertstatement
|
||||
|
||||
copy_table
|
||||
);
|
||||
|
||||
my $tablename = 'period_events';
|
||||
my $get_db = \&get_sqlite_db;
|
||||
|
||||
my $fieldnames;
|
||||
my $expected_fieldnames = [
|
||||
'subscriber_id',
|
||||
'profile_id',
|
||||
'start_profile',
|
||||
'update_profile',
|
||||
'stop_profile',
|
||||
];
|
||||
|
||||
my $primarykey_fieldnames = [];
|
||||
my $indexes = {
|
||||
$tablename . '_suscriber_id' => [ 'subscriber_id(11)' ],
|
||||
};
|
||||
|
||||
sub new {
|
||||
|
||||
my $class = shift;
|
||||
my $self = NGCP::BulkProcessor::SqlRecord->new($class,$get_db,
|
||||
$tablename,$expected_fieldnames,$indexes);
|
||||
|
||||
copy_row($self,shift,$expected_fieldnames);
|
||||
|
||||
return $self;
|
||||
|
||||
}
|
||||
|
||||
sub create_table {
|
||||
|
||||
my ($truncate) = @_;
|
||||
|
||||
my $db = &$get_db();
|
||||
|
||||
registertableinfo($db,__PACKAGE__,$tablename,$expected_fieldnames,$indexes,$primarykey_fieldnames);
|
||||
return create_targettable($db,__PACKAGE__,$db,__PACKAGE__,$tablename,$truncate,1,undef);
|
||||
|
||||
}
|
||||
|
||||
sub findby_domainusername {
|
||||
|
||||
my ($domain,$username,$load_recursive) = @_;
|
||||
|
||||
check_table();
|
||||
my $db = &$get_db();
|
||||
my $table = $db->tableidentifier($tablename);
|
||||
|
||||
return [] unless (defined $domain and defined $username);
|
||||
|
||||
my $rows = $db->db_get_all_arrayref(
|
||||
'SELECT * FROM ' . $table .
|
||||
' WHERE ' . $db->columnidentifier('domain') . ' = ?' .
|
||||
' AND ' . $db->columnidentifier('username') . ' = ?'
|
||||
, $domain, $username);
|
||||
|
||||
return buildrecords_fromrows($rows,$load_recursive)->[0];
|
||||
|
||||
}
|
||||
|
||||
sub copy_table {
|
||||
|
||||
my ($get_target_db) = @_;
|
||||
|
||||
check_table();
|
||||
#checktableinfo($get_target_db,
|
||||
# __PACKAGE__,$tablename,
|
||||
# get_fieldnames(1),
|
||||
# $indexes);
|
||||
|
||||
return transfer_table(
|
||||
get_db => $get_db,
|
||||
class => __PACKAGE__,
|
||||
get_target_db => $get_target_db,
|
||||
targetclass => __PACKAGE__,
|
||||
targettablename => $tablename,
|
||||
);
|
||||
|
||||
}
|
||||
|
||||
sub buildrecords_fromrows {
|
||||
|
||||
my ($rows,$load_recursive) = @_;
|
||||
|
||||
my @records = ();
|
||||
my $record;
|
||||
|
||||
if (defined $rows and ref $rows eq 'ARRAY') {
|
||||
foreach my $row (@$rows) {
|
||||
$record = __PACKAGE__->new($row);
|
||||
|
||||
# transformations go here ...
|
||||
|
||||
push @records,$record;
|
||||
}
|
||||
}
|
||||
|
||||
return \@records;
|
||||
|
||||
}
|
||||
|
||||
sub getinsertstatement {
|
||||
|
||||
my ($insert_ignore) = @_;
|
||||
check_table();
|
||||
return insert_stmt($get_db,__PACKAGE__,$insert_ignore);
|
||||
|
||||
}
|
||||
|
||||
sub gettablename {
|
||||
|
||||
return $tablename;
|
||||
|
||||
}
|
||||
|
||||
sub check_table {
|
||||
|
||||
return checktableinfo($get_db,
|
||||
__PACKAGE__,$tablename,
|
||||
$expected_fieldnames,
|
||||
$indexes);
|
||||
|
||||
}
|
||||
|
||||
1;
|
@ -1,271 +0,0 @@
|
||||
package NGCP::BulkProcessor::Projects::ETL::EDR::ExportEvents;
|
||||
use strict;
|
||||
|
||||
## no critic
|
||||
|
||||
use threads::shared qw();
|
||||
|
||||
use Tie::IxHash;
|
||||
|
||||
#use NGCP::BulkProcessor::Serialization qw();
|
||||
#use Scalar::Util qw(blessed);
|
||||
#use MIME::Base64 qw(encode_base64);
|
||||
|
||||
use NGCP::BulkProcessor::Projects::ETL::EDR::Settings qw(
|
||||
$dry
|
||||
$skip_errors
|
||||
|
||||
$export_subscriber_profiles_multithreading
|
||||
$export_subscriber_profiles_numofthreads
|
||||
$export_subscriber_profiles_blocksize
|
||||
$export_subscriber_profiles_joins
|
||||
$export_subscriber_profiles_conditions
|
||||
$export_subscriber_profiles_limit
|
||||
|
||||
$period_events_single_row_txn
|
||||
$ignore_period_events_unique
|
||||
|
||||
);
|
||||
|
||||
use NGCP::BulkProcessor::Logging qw (
|
||||
getlogger
|
||||
processing_info
|
||||
processing_debug
|
||||
);
|
||||
use NGCP::BulkProcessor::LogError qw(
|
||||
rowprocessingerror
|
||||
rowprocessingwarn
|
||||
fileerror
|
||||
);
|
||||
|
||||
use NGCP::BulkProcessor::Dao::Trunk::accounting::events qw();
|
||||
|
||||
use NGCP::BulkProcessor::Projects::ETL::EDR::Dao::PeriodEvents qw();
|
||||
|
||||
use NGCP::BulkProcessor::Projects::ETL::EDR::ProjectConnectorPool qw(
|
||||
get_sqlite_db
|
||||
destroy_all_dbs
|
||||
ping_all_dbs
|
||||
);
|
||||
|
||||
use NGCP::BulkProcessor::Utils qw(create_uuid threadid timestamp stringtobool trim); #check_ipnet
|
||||
use NGCP::BulkProcessor::DSSorter qw(sort_by_configs);
|
||||
use NGCP::BulkProcessor::Array qw(contains);
|
||||
use NGCP::BulkProcessor::Calendar qw(from_epoch datetime_to_string);
|
||||
#use NGCP::BulkProcessor::DSPath qw();
|
||||
|
||||
require Exporter;
|
||||
our @ISA = qw(Exporter);
|
||||
our @EXPORT_OK = qw(
|
||||
export_subscriber_profiles
|
||||
);
|
||||
|
||||
sub export_subscriber_profiles {
|
||||
|
||||
my $result = NGCP::BulkProcessor::Projects::ETL::EDR::Dao::PeriodEvents::create_table(1);
|
||||
|
||||
my $static_context = {};
|
||||
|
||||
destroy_all_dbs();
|
||||
my $warning_count :shared = 0;
|
||||
return ($result && NGCP::BulkProcessor::Dao::Trunk::accounting::events::process_subscribers(
|
||||
static_context => $static_context,
|
||||
process_code => sub {
|
||||
my ($context,$records,$row_offset) = @_;
|
||||
ping_all_dbs();
|
||||
my @period_event_rows = ();
|
||||
foreach my $subscriber_id (map { $_->[0]; } @$records) {
|
||||
if ($subscriber_id == 202) {
|
||||
my $x=1;
|
||||
print "blah";
|
||||
}
|
||||
next unless _export_subscriber_profiles_init_context($context,$subscriber_id);
|
||||
push(@period_event_rows, _get_period_event_rows($context));
|
||||
|
||||
|
||||
|
||||
if ($period_events_single_row_txn and (scalar @period_event_rows) > 0) {
|
||||
while (defined (my $period_event_row = shift @period_event_rows)) {
|
||||
if ($skip_errors) {
|
||||
eval { _insert_period_events_rows($context,[$period_event_row]); };
|
||||
_warn($context,$@) if $@;
|
||||
} else {
|
||||
_insert_period_events_rows($context,[$period_event_row]);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (not $period_events_single_row_txn and (scalar @period_event_rows) > 0) {
|
||||
if ($skip_errors) {
|
||||
eval { insert_period_events_rows($context,\@period_event_rows); };
|
||||
_warn($context,$@) if $@;
|
||||
} else {
|
||||
insert_period_events_rows($context,\@period_event_rows);
|
||||
}
|
||||
}
|
||||
|
||||
return 1;
|
||||
},
|
||||
init_process_context_code => sub {
|
||||
my ($context)= @_;
|
||||
$context->{db} = &get_sqlite_db();
|
||||
$context->{error_count} = 0;
|
||||
$context->{warning_count} = 0;
|
||||
},
|
||||
uninit_process_context_code => sub {
|
||||
my ($context)= @_;
|
||||
undef $context->{db};
|
||||
destroy_all_dbs();
|
||||
{
|
||||
lock $warning_count;
|
||||
$warning_count += $context->{warning_count};
|
||||
}
|
||||
},
|
||||
destroy_reader_dbs_code => \&destroy_all_dbs,
|
||||
blocksize => $export_subscriber_profiles_blocksize,
|
||||
multithreading => $export_subscriber_profiles_multithreading,
|
||||
numofthreads => $export_subscriber_profiles_numofthreads,
|
||||
joins => $export_subscriber_profiles_joins,
|
||||
conditions => $export_subscriber_profiles_conditions,
|
||||
#sort => [{ column => 'id', numeric => 1, dir => 1 }],
|
||||
#limit => $export_subscriber_profiles_limit,
|
||||
),$warning_count,);
|
||||
|
||||
}
|
||||
|
||||
sub _export_subscriber_profiles_init_context {
|
||||
|
||||
my ($context,$subscriber_id) = @_;
|
||||
|
||||
my $result = 1;
|
||||
|
||||
$context->{events} = NGCP::BulkProcessor::Dao::Trunk::accounting::events::findby_subscriberid(
|
||||
undef,$subscriber_id,$export_subscriber_profiles_joins,$export_subscriber_profiles_conditions);
|
||||
|
||||
$context->{subscriber_id} = $subscriber_id;
|
||||
|
||||
return $result;
|
||||
|
||||
}
|
||||
|
||||
sub _get_period_event_rows {
|
||||
|
||||
my ($context) = @_;
|
||||
|
||||
my $profile_events = {
|
||||
start => undef,
|
||||
update => [],
|
||||
stop => undef,
|
||||
};
|
||||
my $last_event;
|
||||
|
||||
my %subscriber_profiles = ();
|
||||
tie(%subscriber_profiles, 'Tie::IxHash');
|
||||
|
||||
foreach my $event (@{sort_by_configs([ grep { contains($_->{type},[ qw(start_profile update_profile end_profile) ]); } @{$context->{events}} ],[
|
||||
{ numeric => 1,
|
||||
dir => 1, #-1,
|
||||
memberchain => [ 'id' ],
|
||||
}
|
||||
])}) {
|
||||
if ($event->{type} eq 'start_profile') {
|
||||
if (not defined $last_event or $last_event->{type} eq 'end_profile') {
|
||||
$profile_events->{start} = $event;
|
||||
$last_event = $event;
|
||||
$subscriber_profiles{$event->{new_status}} = $profile_events;
|
||||
} else {
|
||||
|
||||
}
|
||||
} elsif ($event->{type} eq 'update_profile') {
|
||||
if (defined $last_event and contains($last_event->{type},[ qw(start_profile update_profile) ])) {
|
||||
push(@{$profile_events->{update}},$event);
|
||||
$last_event = $event;
|
||||
} else {
|
||||
|
||||
}
|
||||
} elsif ($event->{type} eq 'end_profile') {
|
||||
if (defined $last_event and contains($last_event->{type},[ qw(start_profile update_profile) ])) {
|
||||
$profile_events->{stop} = $event;
|
||||
$last_event = $event;
|
||||
$profile_events = {
|
||||
start => undef,
|
||||
update => [],
|
||||
stop => undef,
|
||||
};
|
||||
} else {
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
my @period_event_rows = ();
|
||||
foreach my $profile_id (keys %subscriber_profiles) {
|
||||
$profile_events = $subscriber_profiles{$profile_id};
|
||||
push(@period_event_rows,[
|
||||
$context->{subscriber_id},
|
||||
$profile_id,
|
||||
datetime_to_string(from_epoch($profile_events->{start}->{timestamp})),
|
||||
join(",",map { datetime_to_string(from_epoch($_->{timestamp})); } @{$profile_events->{update}}),
|
||||
(defined $profile_events->{stop} ? datetime_to_string(from_epoch($profile_events->{stop}->{timestamp})) : undef),
|
||||
]);
|
||||
}
|
||||
|
||||
return @period_event_rows;
|
||||
|
||||
}
|
||||
|
||||
sub _insert_period_events_rows {
|
||||
my ($context,$subscriber_rows) = @_;
|
||||
$context->{db}->db_do_begin(
|
||||
NGCP::BulkProcessor::Projects::ETL::EDR::Dao::PeriodEvents::getinsertstatement($ignore_period_events_unique),
|
||||
);
|
||||
eval {
|
||||
$context->{db}->db_do_rowblock($subscriber_rows);
|
||||
$context->{db}->db_finish();
|
||||
};
|
||||
my $err = $@;
|
||||
if ($err) {
|
||||
eval {
|
||||
$context->{db}->db_finish(1);
|
||||
};
|
||||
die($err);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
sub _error {
|
||||
|
||||
my ($context,$message) = @_;
|
||||
$context->{error_count} = $context->{error_count} + 1;
|
||||
rowprocessingerror($context->{tid} // threadid(),$message,getlogger(__PACKAGE__));
|
||||
|
||||
}
|
||||
|
||||
sub _warn {
|
||||
|
||||
my ($context,$message) = @_;
|
||||
$context->{warning_count} = $context->{warning_count} + 1;
|
||||
rowprocessingwarn($context->{tid} // threadid(),$message,getlogger(__PACKAGE__));
|
||||
|
||||
}
|
||||
|
||||
sub _info {
|
||||
|
||||
my ($context,$message,$debug) = @_;
|
||||
if ($debug) {
|
||||
processing_debug($context->{tid} // threadid(),$message,getlogger(__PACKAGE__));
|
||||
} else {
|
||||
processing_info($context->{tid} // threadid(),$message,getlogger(__PACKAGE__));
|
||||
}
|
||||
}
|
||||
|
||||
sub _debug {
|
||||
|
||||
my ($context,$message,$debug) = @_;
|
||||
processing_debug($context->{tid} // threadid(),$message,getlogger(__PACKAGE__));
|
||||
|
||||
}
|
||||
|
||||
1;
|
@ -1,120 +0,0 @@
|
||||
package NGCP::BulkProcessor::Projects::ETL::EDR::ProjectConnectorPool;
|
||||
use strict;
|
||||
|
||||
## no critic
|
||||
|
||||
use File::Basename;
|
||||
use Cwd;
|
||||
use lib Cwd::abs_path(File::Basename::dirname(__FILE__) . '/../../../');
|
||||
|
||||
use NGCP::BulkProcessor::Projects::ETL::EDR::Settings qw(
|
||||
$csv_dir
|
||||
$sqlite_db_file
|
||||
);
|
||||
|
||||
use NGCP::BulkProcessor::ConnectorPool qw(
|
||||
get_connectorinstancename
|
||||
);
|
||||
|
||||
use NGCP::BulkProcessor::SqlConnectors::CSVDB qw();
|
||||
use NGCP::BulkProcessor::SqlConnectors::SQLiteDB qw($staticdbfilemode);
|
||||
|
||||
use NGCP::BulkProcessor::SqlProcessor qw(cleartableinfo);
|
||||
|
||||
require Exporter;
|
||||
our @ISA = qw(Exporter);
|
||||
our @EXPORT_OK = qw(
|
||||
|
||||
get_sqlite_db
|
||||
sqlite_db_tableidentifier
|
||||
|
||||
get_csv_db
|
||||
csv_db_tableidentifier
|
||||
|
||||
destroy_dbs
|
||||
destroy_all_dbs
|
||||
ping_all_dbs
|
||||
|
||||
);
|
||||
|
||||
my $sqlite_dbs = {};
|
||||
my $csv_dbs = {};
|
||||
|
||||
sub get_sqlite_db {
|
||||
|
||||
my ($instance_name,$reconnect) = @_;
|
||||
my $name = get_connectorinstancename($instance_name);
|
||||
|
||||
if (not defined $sqlite_dbs->{$name}) {
|
||||
$sqlite_dbs->{$name} = NGCP::BulkProcessor::SqlConnectors::SQLiteDB->new($instance_name);
|
||||
if (not defined $reconnect) {
|
||||
$reconnect = 1;
|
||||
}
|
||||
}
|
||||
if ($reconnect) {
|
||||
$sqlite_dbs->{$name}->db_connect($staticdbfilemode,$sqlite_db_file);
|
||||
}
|
||||
|
||||
return $sqlite_dbs->{$name};
|
||||
|
||||
}
|
||||
|
||||
sub sqlite_db_tableidentifier {
|
||||
|
||||
my ($get_target_db,$tablename) = @_;
|
||||
my $target_db = (ref $get_target_db eq 'CODE') ? &$get_target_db() : $get_target_db;
|
||||
return $target_db->getsafetablename(NGCP::BulkProcessor::SqlConnectors::SQLiteDB::get_tableidentifier($tablename,$staticdbfilemode,$sqlite_db_file));
|
||||
|
||||
}
|
||||
|
||||
sub get_csv_db {
|
||||
|
||||
my ($instance_name,$reconnect) = @_;
|
||||
my $name = get_connectorinstancename($instance_name);
|
||||
if (not defined $csv_dbs->{$name}) {
|
||||
$csv_dbs->{$name} = NGCP::BulkProcessor::SqlConnectors::CSVDB->new($instance_name);
|
||||
if (not defined $reconnect) {
|
||||
$reconnect = 1;
|
||||
}
|
||||
}
|
||||
if ($reconnect) {
|
||||
$csv_dbs->{$name}->db_connect($csv_dir);
|
||||
}
|
||||
return $csv_dbs->{$name};
|
||||
|
||||
}
|
||||
|
||||
sub csv_db_tableidentifier {
|
||||
|
||||
my ($get_target_db,$tablename) = @_;
|
||||
my $target_db = (ref $get_target_db eq 'CODE') ? &$get_target_db() : $get_target_db;
|
||||
return $target_db->getsafetablename(NGCP::BulkProcessor::SqlConnectors::CSVDB::get_tableidentifier($tablename,$csv_dir));
|
||||
|
||||
}
|
||||
|
||||
sub destroy_dbs {
|
||||
|
||||
foreach my $name (keys %$sqlite_dbs) {
|
||||
cleartableinfo($sqlite_dbs->{$name});
|
||||
undef $sqlite_dbs->{$name};
|
||||
delete $sqlite_dbs->{$name};
|
||||
}
|
||||
|
||||
foreach my $name (keys %$csv_dbs) {
|
||||
cleartableinfo($csv_dbs->{$name});
|
||||
undef $csv_dbs->{$name};
|
||||
delete $csv_dbs->{$name};
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
sub destroy_all_dbs() {
|
||||
destroy_dbs();
|
||||
NGCP::BulkProcessor::ConnectorPool::destroy_dbs();
|
||||
}
|
||||
|
||||
sub ping_all_dbs() {
|
||||
NGCP::BulkProcessor::ConnectorPool::ping_dbs();
|
||||
}
|
||||
|
||||
1;
|
@ -1,235 +0,0 @@
|
||||
package NGCP::BulkProcessor::Projects::ETL::EDR::Settings;
|
||||
use strict;
|
||||
|
||||
## no critic
|
||||
|
||||
use File::Basename qw(fileparse);
|
||||
|
||||
use NGCP::BulkProcessor::Globals qw(
|
||||
$working_path
|
||||
$enablemultithreading
|
||||
$cpucount
|
||||
create_path
|
||||
);
|
||||
|
||||
use NGCP::BulkProcessor::Logging qw(
|
||||
getlogger
|
||||
scriptinfo
|
||||
configurationinfo
|
||||
);
|
||||
|
||||
use NGCP::BulkProcessor::LogError qw(
|
||||
fileerror
|
||||
filewarn
|
||||
configurationwarn
|
||||
configurationerror
|
||||
);
|
||||
|
||||
use NGCP::BulkProcessor::LoadConfig qw(
|
||||
split_tuple
|
||||
parse_regexp
|
||||
);
|
||||
|
||||
use NGCP::BulkProcessor::Utils qw(prompt timestampdigits threadid load_module);
|
||||
|
||||
use NGCP::BulkProcessor::Array qw(contains);
|
||||
|
||||
require Exporter;
|
||||
our @ISA = qw(Exporter);
|
||||
our @EXPORT_OK = qw(
|
||||
update_settings
|
||||
|
||||
get_export_filename
|
||||
|
||||
$ignore_period_events_unique
|
||||
$period_events_single_row_txn
|
||||
|
||||
$sqlite_db_file
|
||||
$csv_dir
|
||||
|
||||
check_dry
|
||||
|
||||
$output_path
|
||||
$input_path
|
||||
|
||||
$subscriber_profiles_export_filename_format
|
||||
|
||||
$defaultsettings
|
||||
$defaultconfig
|
||||
|
||||
$dry
|
||||
$skip_errors
|
||||
$force
|
||||
|
||||
$export_subscriber_profiles_multithreading
|
||||
$export_subscriber_profiles_numofthreads
|
||||
$export_subscriber_profiles_blocksize
|
||||
|
||||
$export_subscriber_profiles_joins
|
||||
$export_subscriber_profiles_conditions
|
||||
$export_subscriber_profiles_limit
|
||||
|
||||
);
|
||||
|
||||
our $defaultconfig = 'config.cfg';
|
||||
our $defaultsettings = 'settings.cfg';
|
||||
|
||||
our $ignore_period_events_unique = 0;
|
||||
our $period_events_single_row_txn = 1;
|
||||
|
||||
our $output_path = $working_path . 'output/';
|
||||
our $input_path = $working_path . 'input/';
|
||||
our $csv_dir = 'events';
|
||||
|
||||
our $subscriber_profiles_export_filename_format = undef;
|
||||
|
||||
our $force = 0;
|
||||
our $dry = 0;
|
||||
our $skip_errors = 0;
|
||||
|
||||
our $sqlite_db_file = 'sqlite';
|
||||
|
||||
our $export_subscriber_profiles_multithreading = $enablemultithreading;
|
||||
our $export_subscriber_profiles_numofthreads = $cpucount;
|
||||
our $export_subscriber_profiles_blocksize = 1000;
|
||||
|
||||
our $export_subscriber_profiles_joins = [];
|
||||
our $export_subscriber_profiles_conditions = [];
|
||||
our $export_subscriber_profiles_limit = undef;
|
||||
|
||||
sub update_settings {
|
||||
|
||||
my ($data,$configfile) = @_;
|
||||
|
||||
if (defined $data) {
|
||||
|
||||
my $result = 1;
|
||||
my $regexp_result;
|
||||
|
||||
#&$configurationinfocode("testinfomessage",$configlogger);
|
||||
|
||||
$result &= _prepare_working_paths(1);
|
||||
|
||||
$subscriber_profiles_export_filename_format = $data->{subscriber_profiles_export_filename} if exists $data->{subscriber_profiles_export_filename};
|
||||
get_export_filename($data->{subscriber_profiles_export_filename},$configfile);
|
||||
|
||||
$sqlite_db_file = $data->{sqlite_db_file} if exists $data->{sqlite_db_file};
|
||||
$csv_dir = $data->{csv_dir} if exists $data->{csv_dir};
|
||||
|
||||
$dry = $data->{dry} if exists $data->{dry};
|
||||
$skip_errors = $data->{skip_errors} if exists $data->{skip_errors};
|
||||
|
||||
my $parse_result;
|
||||
($parse_result,$export_subscriber_profiles_joins) = _parse_export_joins($data->{export_subscriber_profiles_joins},$configfile);
|
||||
$result &= $parse_result;
|
||||
($parse_result,$export_subscriber_profiles_conditions) = _parse_export_conditions($data->{export_subscriber_profiles_conditions},$configfile);
|
||||
$result &= $parse_result;
|
||||
|
||||
$export_subscriber_profiles_limit = $data->{export_subscriber_profiles_limit} if exists $data->{export_subscriber_profiles_limit};
|
||||
|
||||
$export_subscriber_profiles_multithreading = $data->{export_subscriber_profiles_multithreading} if exists $data->{export_subscriber_profiles_multithreading};
|
||||
$export_subscriber_profiles_numofthreads = _get_numofthreads($cpucount,$data,'export_subscriber_profiles_numofthreads');
|
||||
$export_subscriber_profiles_blocksize = $data->{export_subscriber_profiles_blocksize} if exists $data->{export_subscriber_profiles_blocksize};
|
||||
|
||||
$period_events_single_row_txn = $data->{period_events_single_row_txn} if exists $data->{period_events_single_row_txn};
|
||||
$ignore_period_events_unique = $data->{ignore_period_events_unique} if exists $data->{ignore_period_events_unique};
|
||||
|
||||
return $result;
|
||||
}
|
||||
return 0;
|
||||
|
||||
}
|
||||
|
||||
sub _prepare_working_paths {
|
||||
|
||||
my ($create) = @_;
|
||||
my $result = 1;
|
||||
my $path_result;
|
||||
|
||||
($path_result,$input_path) = create_path($working_path . 'input',$input_path,$create,\&fileerror,getlogger(__PACKAGE__));
|
||||
$result &= $path_result;
|
||||
($path_result,$output_path) = create_path($working_path . 'output',$output_path,$create,\&fileerror,getlogger(__PACKAGE__));
|
||||
$result &= $path_result;
|
||||
|
||||
return $result;
|
||||
|
||||
}
|
||||
|
||||
sub _get_numofthreads {
|
||||
my ($default_value,$data,$key) = @_;
|
||||
my $numofthreads = $default_value;
|
||||
$numofthreads = $data->{$key} if exists $data->{$key};
|
||||
$numofthreads = $cpucount if $numofthreads > $cpucount;
|
||||
return $numofthreads;
|
||||
}
|
||||
|
||||
sub get_export_filename {
|
||||
my ($filename_format,$configfile) = @_;
|
||||
my $export_filename;
|
||||
my $export_format;
|
||||
if ($filename_format) {
|
||||
$export_filename = sprintf($filename_format,timestampdigits(),threadid());
|
||||
unless ($export_filename =~ /^\//) {
|
||||
$export_filename = $output_path . $export_filename;
|
||||
}
|
||||
if (-e $export_filename and (unlink $export_filename) == 0) {
|
||||
filewarn('cannot remove ' . $export_filename . ': ' . $!,getlogger(__PACKAGE__));
|
||||
$export_filename = undef;
|
||||
}
|
||||
my ($name,$path,$suffix) = fileparse($export_filename,".csv");
|
||||
if ($suffix eq '.csv') {
|
||||
$export_format = 'csv';
|
||||
} else {
|
||||
configurationerror($configfile,"$filename_format: .csv export file format required");
|
||||
}
|
||||
}
|
||||
return ($export_filename,$export_format);
|
||||
}
|
||||
|
||||
sub _parse_export_joins {
|
||||
my ($token,$file) = @_;
|
||||
my @joins = ();
|
||||
if (defined $token and length($token) > 0) {
|
||||
foreach my $f (_split(\$token)) {
|
||||
next unless($f);
|
||||
$f =~ s/^\s*\{?\s*//;
|
||||
$f =~ s/\}\s*\}\s*$/}/;
|
||||
my ($a, $b) = split(/\s*=>\s*{\s*/, $f);
|
||||
$a =~ s/^\s*\'//;
|
||||
$a =~ s/\'$//g;
|
||||
$b =~ s/\s*\}\s*$//;
|
||||
my ($c, $d) = split(/\s*=>\s*/, $b);
|
||||
$c =~ s/^\s*\'//g;
|
||||
$c =~ s/\'\s*//;
|
||||
$d =~ s/^\s*\'//g;
|
||||
$d =~ s/\'\s*//;
|
||||
push @joins, { $a => { $c => $d } };
|
||||
}
|
||||
}
|
||||
return (1,\@joins);
|
||||
}
|
||||
|
||||
sub _parse_export_conditions {
|
||||
my ($token,$file) = @_;
|
||||
my @conditions = ();
|
||||
if (defined $token and length($token) > 0) {
|
||||
foreach my $f (_split(\$token)) {
|
||||
next unless($f);
|
||||
$f =~ s/^\s*\{?\s*//;
|
||||
$f =~ s/\}\s*\}\s*$/}/;
|
||||
my ($a, $b) = split(/\s*=>\s*{\s*/, $f);
|
||||
$a =~ s/^\s*\'//;
|
||||
$a =~ s/\'$//g;
|
||||
$b =~ s/\s*\}\s*$//;
|
||||
my ($c, $d) = split(/\s*=>\s*/, $b);
|
||||
$c =~ s/^\s*\'//g;
|
||||
$c =~ s/\'\s*//;
|
||||
$d =~ s/^\s*\'//g;
|
||||
$d =~ s/\'\s*//;
|
||||
push @conditions, { $a => { $c => $d } };
|
||||
}
|
||||
}
|
||||
return (1,\@conditions);
|
||||
}
|
||||
|
||||
1;
|
@ -1,61 +0,0 @@
|
||||
##general settings:
|
||||
working_path = /var/sipwise
|
||||
cpucount = 4
|
||||
enablemultithreading = 1
|
||||
|
||||
##gearman/service listener config:
|
||||
jobservers = 127.0.0.1:4730
|
||||
|
||||
##NGCP MySQL connectivity - "accounting" db:
|
||||
accounting_host = db01
|
||||
accounting_port = 3306
|
||||
accounting_databasename = accounting
|
||||
accounting_username = root
|
||||
accounting_password =
|
||||
|
||||
##NGCP MySQL connectivity - "billing" db:
|
||||
billing_host = db01
|
||||
billing_port = 3306
|
||||
billing_databasename = billing
|
||||
billing_username = root
|
||||
billing_password =
|
||||
|
||||
##NGCP MySQL connectivity - "provisioning" db:
|
||||
provisioning_host = db01
|
||||
provisioning_port = 3306
|
||||
provisioning_databasename = provisioning
|
||||
provisioning_username = root
|
||||
provisioning_password =
|
||||
|
||||
##NGCP MySQL connectivity - "kamailio" db:
|
||||
kamailio_host = db01
|
||||
kamailio_port = 3306
|
||||
kamailio_databasename = kamailio
|
||||
kamailio_username = root
|
||||
kamailio_password =
|
||||
|
||||
##NGCP MySQL connectivity - default db for distributed transactions (XA) to connect to:
|
||||
xa_host = db01
|
||||
xa_port = 3306
|
||||
xa_databasename = ngcp
|
||||
xa_username = root
|
||||
xa_password =
|
||||
|
||||
##NGCP REST-API connectivity:
|
||||
ngcprestapi_uri = https://127.0.0.1:1443
|
||||
ngcprestapi_username = administrator
|
||||
ngcprestapi_password = administrator
|
||||
ngcprestapi_realm = api_admin_http
|
||||
|
||||
##sending email:
|
||||
emailenable = 0
|
||||
erroremailrecipient =
|
||||
warnemailrecipient =
|
||||
completionemailrecipient = rkrenn@sipwise.com
|
||||
doneemailrecipient =
|
||||
|
||||
##logging:
|
||||
fileloglevel = INFO
|
||||
#DEBUG
|
||||
screenloglevel = INFO
|
||||
emailloglevel = OFF
|
@ -1,61 +0,0 @@
|
||||
##general settings:
|
||||
working_path = /home/rkrenn/temp/customer_exporter
|
||||
cpucount = 4
|
||||
enablemultithreading = 1
|
||||
|
||||
##gearman/service listener config:
|
||||
jobservers = 127.0.0.1:4730
|
||||
|
||||
##NGCP MySQL connectivity - "accounting" db:
|
||||
accounting_host = 192.168.0.96
|
||||
accounting_port = 3306
|
||||
accounting_databasename = accounting
|
||||
accounting_username = root
|
||||
accounting_password =
|
||||
|
||||
##NGCP MySQL connectivity - "billing" db:
|
||||
billing_host = 192.168.0.96
|
||||
billing_port = 3306
|
||||
billing_databasename = billing
|
||||
billing_username = root
|
||||
billing_password =
|
||||
|
||||
##NGCP MySQL connectivity - "provisioning" db:
|
||||
provisioning_host = 192.168.0.96
|
||||
provisioning_port = 3306
|
||||
provisioning_databasename = provisioning
|
||||
provisioning_username = root
|
||||
provisioning_password =
|
||||
|
||||
##NGCP MySQL connectivity - "kamailio" db:
|
||||
kamailio_host = 192.168.0.96
|
||||
kamailio_port = 3306
|
||||
kamailio_databasename = kamailio
|
||||
kamailio_username = root
|
||||
kamailio_password =
|
||||
|
||||
##NGCP MySQL connectivity - default db for distributed transactions (XA) to connect to:
|
||||
xa_host = 192.168.0.96
|
||||
xa_port = 3306
|
||||
xa_databasename = ngcp
|
||||
xa_username = root
|
||||
xa_password =
|
||||
|
||||
##NGCP REST-API connectivity:
|
||||
ngcprestapi_uri = https://127.0.0.1:1443
|
||||
ngcprestapi_username = administrator
|
||||
ngcprestapi_password = administrator
|
||||
ngcprestapi_realm = api_admin_http
|
||||
|
||||
##sending email:
|
||||
emailenable = 0
|
||||
erroremailrecipient =
|
||||
warnemailrecipient =
|
||||
completionemailrecipient = rkrenn@sipwise.com
|
||||
doneemailrecipient =
|
||||
|
||||
##logging:
|
||||
fileloglevel = INFO
|
||||
#DEBUG
|
||||
screenloglevel = INFO
|
||||
emailloglevel = OFF
|
@ -1,214 +0,0 @@
|
||||
use strict;
|
||||
|
||||
## no critic
|
||||
|
||||
our $VERSION = "0.0";
|
||||
|
||||
use File::Basename;
|
||||
use Cwd;
|
||||
use lib Cwd::abs_path(File::Basename::dirname(__FILE__) . '/../../../../../');
|
||||
|
||||
use Getopt::Long qw(GetOptions);
|
||||
use Fcntl qw(LOCK_EX LOCK_NB);
|
||||
|
||||
use NGCP::BulkProcessor::Globals qw();
|
||||
use NGCP::BulkProcessor::Projects::ETL::EDR::Settings qw(
|
||||
update_settings
|
||||
|
||||
get_export_filename
|
||||
$subscriber_profiles_export_filename_format
|
||||
|
||||
check_dry
|
||||
$output_path
|
||||
$defaultsettings
|
||||
$defaultconfig
|
||||
$dry
|
||||
$skip_errors
|
||||
$force
|
||||
);
|
||||
|
||||
use NGCP::BulkProcessor::Logging qw(
|
||||
init_log
|
||||
getlogger
|
||||
$attachmentlogfile
|
||||
scriptinfo
|
||||
cleanuplogfiles
|
||||
$currentlogfile
|
||||
);
|
||||
use NGCP::BulkProcessor::LogError qw (
|
||||
completion
|
||||
done
|
||||
scriptwarn
|
||||
scripterror
|
||||
filewarn
|
||||
fileerror
|
||||
);
|
||||
use NGCP::BulkProcessor::LoadConfig qw(
|
||||
load_config
|
||||
$SIMPLE_CONFIG_TYPE
|
||||
$YAML_CONFIG_TYPE
|
||||
$ANY_CONFIG_TYPE
|
||||
);
|
||||
use NGCP::BulkProcessor::Array qw(removeduplicates);
|
||||
use NGCP::BulkProcessor::Utils qw(getscriptpath prompt cleanupdir);
|
||||
use NGCP::BulkProcessor::Mail qw(
|
||||
cleanupmsgfiles
|
||||
);
|
||||
|
||||
use NGCP::BulkProcessor::SqlConnectors::CSVDB qw(cleanupcvsdirs);
|
||||
use NGCP::BulkProcessor::SqlConnectors::SQLiteDB qw(cleanupdbfiles);
|
||||
|
||||
use NGCP::BulkProcessor::Projects::ETL::EDR::ProjectConnectorPool qw(destroy_all_dbs get_csv_db get_sqlite_db);
|
||||
|
||||
use NGCP::BulkProcessor::Projects::ETL::EDR::Dao::PeriodEvents qw();
|
||||
|
||||
use NGCP::BulkProcessor::Projects::ETL::EDR::ExportEvents qw(
|
||||
export_subscriber_profiles
|
||||
);
|
||||
|
||||
scripterror(getscriptpath() . ' already running',getlogger(getscriptpath())) unless flock DATA, LOCK_EX | LOCK_NB;
|
||||
|
||||
my @TASK_OPTS = ();
|
||||
|
||||
my $tasks = [];
|
||||
|
||||
my $cleanup_task_opt = 'cleanup';
|
||||
push(@TASK_OPTS,$cleanup_task_opt);
|
||||
|
||||
my $cleanup_all_task_opt = 'cleanup_all';
|
||||
push(@TASK_OPTS,$cleanup_all_task_opt);
|
||||
|
||||
my $export_subscriber_profiles_task_opt = 'export_subscriber_profiles';
|
||||
push(@TASK_OPTS,$export_subscriber_profiles_task_opt);
|
||||
|
||||
if (init()) {
|
||||
main();
|
||||
exit(0);
|
||||
} else {
|
||||
exit(1);
|
||||
}
|
||||
|
||||
sub init {
|
||||
|
||||
my $configfile = $defaultconfig;
|
||||
my $settingsfile = $defaultsettings;
|
||||
|
||||
return 0 unless GetOptions(
|
||||
"config=s" => \$configfile,
|
||||
"settings=s" => \$settingsfile,
|
||||
"task=s" => $tasks,
|
||||
"skip-errors" => \$skip_errors,
|
||||
"force" => \$force,
|
||||
);
|
||||
|
||||
$tasks = removeduplicates($tasks,1);
|
||||
|
||||
my $result = load_config($configfile);
|
||||
init_log();
|
||||
$result &= load_config($settingsfile,\&update_settings,$SIMPLE_CONFIG_TYPE);
|
||||
return $result;
|
||||
|
||||
}
|
||||
|
||||
sub main() {
|
||||
|
||||
my @messages = ();
|
||||
my @attachmentfiles = ();
|
||||
my $result = 1;
|
||||
my $completion = 0;
|
||||
|
||||
if (defined $tasks and 'ARRAY' eq ref $tasks and (scalar @$tasks) > 0) {
|
||||
scriptinfo('skip-errors: processing won\'t stop upon errors',getlogger(__PACKAGE__)) if $skip_errors;
|
||||
foreach my $task (@$tasks) {
|
||||
|
||||
if (lc($cleanup_task_opt) eq lc($task)) {
|
||||
$result &= cleanup_task(\@messages,0) if taskinfo($cleanup_task_opt,$result);
|
||||
} elsif (lc($cleanup_all_task_opt) eq lc($task)) {
|
||||
$result &= cleanup_task(\@messages,1) if taskinfo($cleanup_all_task_opt,$result);
|
||||
|
||||
} elsif (lc($export_subscriber_profiles_task_opt) eq lc($task)) {
|
||||
$result &= export_subscriber_profiles_task(\@messages) if taskinfo($export_subscriber_profiles_task_opt,$result);
|
||||
$completion |= 1;
|
||||
|
||||
} else {
|
||||
$result = 0;
|
||||
scripterror("unknown task option '" . $task . "', must be one of " . join(', ',@TASK_OPTS),getlogger(getscriptpath()));
|
||||
last;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
$result = 0;
|
||||
scripterror('at least one task option is required. supported tasks: ' . join(', ',@TASK_OPTS),getlogger(getscriptpath()));
|
||||
}
|
||||
|
||||
push(@attachmentfiles,$attachmentlogfile);
|
||||
if ($completion) {
|
||||
completion(join("\n\n",@messages),\@attachmentfiles,getlogger(getscriptpath()));
|
||||
} else {
|
||||
done(join("\n\n",@messages),\@attachmentfiles,getlogger(getscriptpath()));
|
||||
}
|
||||
|
||||
return $result;
|
||||
}
|
||||
|
||||
sub taskinfo {
|
||||
my ($task,$result) = @_;
|
||||
scriptinfo($result ? "starting task: '$task'" : "skipping task '$task' due to previous problems",getlogger(getscriptpath()));
|
||||
return $result;
|
||||
}
|
||||
|
||||
sub cleanup_task {
|
||||
my ($messages,$clean_generated) = @_;
|
||||
my $result = 0;
|
||||
if (!$clean_generated or $force or 'yes' eq lc(prompt("Type 'yes' to proceed: "))) {
|
||||
eval {
|
||||
cleanupcvsdirs();
|
||||
cleanupdbfiles();
|
||||
cleanuplogfiles(\&fileerror,\&filewarn,($currentlogfile,$attachmentlogfile));
|
||||
cleanupmsgfiles(\&fileerror,\&filewarn);
|
||||
cleanupdir($output_path,1,\&filewarn,getlogger(getscriptpath())) if $clean_generated;
|
||||
$result = 1;
|
||||
};
|
||||
}
|
||||
if ($@ or !$result) {
|
||||
push(@$messages,'working directory cleanup INCOMPLETE');
|
||||
return 0;
|
||||
} else {
|
||||
push(@$messages,'working directory folders cleaned up');
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
sub export_subscriber_profiles_task {
|
||||
|
||||
my ($messages) = @_;
|
||||
my ($result,$warning_count) = (0,0);
|
||||
eval {
|
||||
($result,$warning_count) = export_subscriber_profiles();
|
||||
};
|
||||
my $err = $@;
|
||||
my $stats = ": $warning_count warnings";
|
||||
eval {
|
||||
my ($export_filename,$export_format) = get_export_filename($subscriber_profiles_export_filename_format);
|
||||
if ('sqlite' eq $export_format) {
|
||||
&get_sqlite_db()->copydbfile($export_filename);
|
||||
} elsif ('csv' eq $export_format) {
|
||||
NGCP::BulkProcessor::Projects::ETL::EDR::Dao::PeriodEvents::copy_table(\&get_csv_db);
|
||||
&get_csv_db()->copytablefile(NGCP::BulkProcessor::Projects::ETL::EDR::Dao::PeriodEvents::gettablename(),$export_filename);
|
||||
} else {
|
||||
push(@$messages,'invalid extension for output filename $export_filename');
|
||||
}
|
||||
};
|
||||
if ($err or !$result) {
|
||||
push(@$messages,"exporting subscriber profiles INCOMPLETE$stats");
|
||||
} else {
|
||||
push(@$messages,"exporting subscriber profiles completed$stats");
|
||||
}
|
||||
destroy_all_dbs();
|
||||
return $result;
|
||||
|
||||
}
|
||||
|
||||
__DATA__
|
||||
This exists to allow the locking code at the beginning of the file to work.
|
||||
DO NOT REMOVE THESE LINES!
|
@ -1,21 +0,0 @@
|
||||
#dry=0
|
||||
#skip_errors=0
|
||||
|
||||
export_subscriber_profiles_multithreading = 1
|
||||
export_subscriber_profiles_numofthreads = 2
|
||||
export_subscriber_profiles_blocksize = 1000
|
||||
export_subscriber_profiles_limit = 10000
|
||||
|
||||
#export_cdr_conditions = { 'accounting.cdr.destination_domain' => { 'IN' => '("80.110.2.164","ccs.upc.at")' } }
|
||||
#export_cdr_conditions = { 'accounting.cdr.destination_domain' => { '=' => '"ccs.upc.at"' } }
|
||||
#, { 'accounting.cdr.rating_status' => { '=' => '"ok"' } }
|
||||
#{ 'accounting.cdr.call_status' => { '=' => '"ok"' } }
|
||||
#export_cdr_joins = { 'accounting.cdr_export_status_data esd' => { 'esd.cdr_id' => 'accounting.cdr.id' } }, { 'accounting.cdr_export_status es' => { 'es.id' => 'esd.status_id' } }
|
||||
export_cdr_conditions = { 'accounting.cdr.id' => { 'IN' => '(51,53, 87,89, 55, 79, 65,67,69, 81,83,85, 111, 113)' } }
|
||||
|
||||
subscriber_profiles_export_filename=subscriber_profiles_%s.csv
|
||||
|
||||
sqlite_db_file = sqlite
|
||||
csv_dir = events
|
||||
period_events_single_row_txn = 1
|
||||
ignore_period_events_unique = 0
|
Loading…
Reference in new issue