Added script to migrate

This commit is contained in:
edwardro22
2017-09-11 12:17:14 +00:00
parent 5a37c9fa94
commit df31455316
11 changed files with 677 additions and 75 deletions

View File

@@ -1,68 +1,101 @@
#!/#!/usr/bin/python
#!/usr/bin/python
# depends:
# ^ redis # install via easy_install redis
# asserts:
# ^ destination redis is not password protected when connected from source redis server
# (https://github.com/antirez/redis/pull/2507)
# ^ pymongo # install via: easy_install pymongo
# behaviour:
# ^ the script will not overwrite keys on the destination server/database
# ^ the script will "move" the collections if source and target server are the same
# but will "copy" (dump/restore) if source and target servers are different
from_host = '127.0.0.1'
from_port = 6379
from_db = 11
from_pass = ''
from_host = '127.0.0.1'
from_port = '27017'
from_db = '11'
from_auth_db = 'cgrates' # Auth db on source server
from_user = 'cgrates'
from_pass = ''
to_host = '127.0.0.1'
to_port = 6379
to_db = 10
to_pass = '' # Not used
to_host = '127.0.0.1'
to_port = '27017'
to_db = '10'
to_auth_db = "cgrates" # Auth db on target server
to_user = 'cgrates'
to_pass = ''
keymask = '*'
timeout = 2000
ignore_empty_cols = True
# Do not migrate collections with 0 document count.
# Works only if from/to is on same host.
import time
import redis
# Overwrite target collections flag.
# Works only if from/to is on same host.
# If from/to hosts are different we use mongorestore which overwrites by default.
drop_target = False
from_redis = redis.Redis(host = from_host, port = from_port, password=from_pass, db = from_db)
to_redis = redis.Redis(host = to_host, port = to_port, db = to_db)
dump_folder = 'dump'
to_keys = to_redis.keys(keymask)
from_keys = from_redis.keys(keymask)
print('Found %d keys on source.' % len(from_keys))
print('Found %d keys on destination.' % len(to_keys))
import sys
from pymongo import MongoClient
from urllib import quote_plus
from collections import OrderedDict
# keys found
if len(from_keys) > 0:
# same server
if from_host == to_host and from_port == to_port:
# same server
if from_host == to_host and from_port == to_port:
print('Migrating on same server...')
i = 0
for key in from_keys:
i += 1
print('Moving key %s (%d of %d)...' % (key, i, len(from_keys)))
from_redis.execute_command('MOVE', key, to_db)
mongo_from_url = 'mongodb://' + from_user + ':' + quote_plus(from_pass) + '@'+ from_host + ':' + from_port + '/' + from_auth_db
if from_pass == '': # disabled auth
mongo_from_url = 'mongodb://' + from_host + ':' + from_port + '/' + from_db
client = MongoClient(mongo_from_url)
# different servers
else:
print('Migrating between different servers...')
i = 0
for key in from_keys:
i += 1
print('Moving key %s (%d of %d)...' % (key, i, len(from_keys))),
try:
from_redis.execute_command('MIGRATE', to_host, to_port, key, to_db, timeout)
except redis.exceptions.ResponseError, e:
if not 'ERR Target key name is busy' in str(e):
raise e
print('Done.')
# done
from_keys_after = from_redis.keys(keymask)
to_keys_after = to_redis.keys(keymask)
print('There are now %d keys on source.' % len(from_keys_after))
print('There are now %d keys on destination.' % len(to_keys_after))
print('%d keys were moved' % (len(to_keys_after) - len(to_keys)))
print('Migration complete.')
# no keys found
db = client[from_db]
cols = db.collection_names()
# collections found
if len(cols) > 0:
print('Found %d collections on source. Moving...' % len(cols))
i = 0
for col in cols:
i += 1
if not ignore_empty_cols or (ignore_empty_cols and db[col].count() > 0):
print('Moving collection %s (%d of %d)...' % (col, i, len(cols)))
try:
client.admin.command(OrderedDict([('renameCollection', from_db + '.' + col), ('to', to_db + '.' + col), ('dropTarget', drop_target)]))
except:
e = sys.exc_info()[0]
print(e)
else:
print('Skipping empty collection %s (%d of %d)...' % (col, i, len(cols)))
# no collections found
else:
print('No collections in source database.')
# different servers
else:
print('No keys with keymask %s found in source database' % keymask)
import subprocess
import os
import shutil
print('Migrating between different servers...')
print('Dumping...')
out = subprocess.check_output([
'mongodump',
'--host', '%s' % from_host,
'-u', '%s' % from_user,
'-p', '%s' % from_pass,
'--authenticationDatabase', '%s' % from_auth_db,
'--db', '%s' % from_db,
'--port', '%s' % from_port,
'-o', '%s' % dump_folder,
], stderr= subprocess.STDOUT)
print('Dump complete.')
print('Restoring...')
out = subprocess.check_output([
'mongorestore',
'--host', '%s' % to_host,
'-u', '%s' % to_user,
'-p', '%s' % to_pass,
'--authenticationDatabase', '%s' % to_auth_db,
'--db', '%s' % to_db,
'--port', '%s' % to_port,
'--drop', '%s/%s' % (dump_folder, from_db),
], stderr= subprocess.STDOUT)
print('Restore complete.')
print('Migration complete.')

View File

@@ -8,28 +8,24 @@
# behaviour:
# ^ the script will not overwrite keys on the destination server/database
from_host = '127.0.0.1'
from_port = 6379
from_db = 11
from_pass = ''
to_host = '127.0.0.1'
to_port = 6379
to_db = 10
to_pass = '' # Not used
keymask = '*'
timeout = 2000
import time
import redis
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-host", "--host",default="127.0.0.1", help='default: "127.0.0.1"')
parser.add_argument("-port", "--port", type=int ,default=6379, help='default: 6379')
parser.add_argument("-pass", "--password", default="", help='default: ""')
args = parser.parse_args()
from_host = args.host
from_port = args.port
from_pass = args.password
from_redis = redis.Redis(host = from_host, port = from_port, password=from_pass, db = from_db)
to_redis = redis.Redis(host = from_host, port = from_port, db = to_db)
to_redis = redis.Redis(host = to_host, port = to_port, db = to_db)
to_keys = to_redis.keys(keymask)
from_keys = from_redis.keys(keymask)
@@ -38,14 +34,28 @@ print('Found %d keys on destination.' % len(to_keys))
# keys found
if len(from_keys) > 0:
print('Migrating on same server...')
i = 0
for key in from_keys:
i += 1
print('Moving key %s (%d of %d)...' % (key, i, len(from_keys)))
from_redis.execute_command('MOVE', key, to_db)
# same server
if from_host == to_host and from_port == to_port:
print('Migrating on same server...')
i = 0
for key in from_keys:
i += 1
print('Moving key %s (%d of %d)...' % (key, i, len(from_keys)))
from_redis.execute_command('MOVE', key, to_db)
print('Done.')
# different servers
else:
print('Migrating between different servers...')
i = 0
for key in from_keys:
i += 1
print('Moving key %s (%d of %d)...' % (key, i, len(from_keys))),
try:
from_redis.execute_command('MIGRATE', to_host, to_port, key, to_db, timeout)
except redis.exceptions.ResponseError, e:
if not 'ERR Target key name is busy' in str(e):
raise e
print('Done.')
# done
from_keys_after = from_redis.keys(keymask)
to_keys_after = to_redis.keys(keymask)
@@ -55,4 +65,4 @@ if len(from_keys) > 0:
print('Migration complete.')
# no keys found
else:
print('No keys with keymask %s found in source database' % keymask)
print('No keys with keymask %s found in source database' % keymask)

View File

@@ -0,0 +1,101 @@
#!/usr/bin/python
# depends:
# ^ pymongo # install via: easy_install pymongo
# behaviour:
# ^ the script will "move" the collections if source and target server are the same
# but will "copy" (dump/restore) if source and target servers are different
from_host = '127.0.0.1'
from_port = '27017'
from_db = '11'
from_auth_db = 'cgrates' # Auth db on source server
from_user = 'cgrates'
from_pass = ''
to_host = '127.0.0.1'
to_port = '27017'
to_db = '10'
to_auth_db = "cgrates" # Auth db on target server
to_user = 'cgrates'
to_pass = ''
ignore_empty_cols = True
# Do not migrate collections with 0 document count.
# Works only if from/to is on same host.
# Overwrite target collections flag.
# Works only if from/to is on same host.
# If from/to hosts are different we use mongorestore which overwrites by default.
drop_target = False
dump_folder = 'dump'
import sys
from pymongo import MongoClient
from urllib import quote_plus
from collections import OrderedDict
# same server
if from_host == to_host and from_port == to_port:
print('Migrating on same server...')
mongo_from_url = 'mongodb://' + from_user + ':' + quote_plus(from_pass) + '@'+ from_host + ':' + from_port + '/' + from_auth_db
if from_pass == '': # disabled auth
mongo_from_url = 'mongodb://' + from_host + ':' + from_port + '/' + from_db
client = MongoClient(mongo_from_url)
db = client[from_db]
cols = db.collection_names()
# collections found
if len(cols) > 0:
print('Found %d collections on source. Moving...' % len(cols))
i = 0
for col in cols:
i += 1
if not ignore_empty_cols or (ignore_empty_cols and db[col].count() > 0):
print('Moving collection %s (%d of %d)...' % (col, i, len(cols)))
try:
client.admin.command(OrderedDict([('renameCollection', from_db + '.' + col), ('to', to_db + '.' + col), ('dropTarget', drop_target)]))
except:
e = sys.exc_info()[0]
print(e)
else:
print('Skipping empty collection %s (%d of %d)...' % (col, i, len(cols)))
# no collections found
else:
print('No collections in source database.')
# different servers
else:
import subprocess
import os
import shutil
print('Migrating between different servers...')
print('Dumping...')
out = subprocess.check_output([
'mongodump',
'--host', '%s' % from_host,
'-u', '%s' % from_user,
'-p', '%s' % from_pass,
'--authenticationDatabase', '%s' % from_auth_db,
'--db', '%s' % from_db,
'--port', '%s' % from_port,
'-o', '%s' % dump_folder,
], stderr= subprocess.STDOUT)
print('Dump complete.')
print('Restoring...')
out = subprocess.check_output([
'mongorestore',
'--host', '%s' % to_host,
'-u', '%s' % to_user,
'-p', '%s' % to_pass,
'--authenticationDatabase', '%s' % to_auth_db,
'--db', '%s' % to_db,
'--port', '%s' % to_port,
'--drop', '%s/%s' % (dump_folder, from_db),
], stderr= subprocess.STDOUT)
print('Restore complete.')
print('Migration complete.')

View File

@@ -0,0 +1,68 @@
#!/usr/bin/python
# depends:
# ^ redis # install via easy_install redis
# asserts:
# ^ destination redis is not password protected when connected from source redis server
# (https://github.com/antirez/redis/pull/2507)
# behaviour:
# ^ the script will not overwrite keys on the destination server/database
from_host = '192.168.100.40'
from_port = 6379
from_db = 11
from_pass = ''
to_host = '192.168.100.40'
to_port = 6379
to_db = 10
to_pass = '' # Not used
keymask = '*'
timeout = 2000
import time
import redis
from_redis = redis.Redis(host = from_host, port = from_port, password=from_pass, db = from_db)
to_redis = redis.Redis(host = to_host, port = to_port, db = to_db)
to_keys = to_redis.keys(keymask)
from_keys = from_redis.keys(keymask)
print('Found %d keys on source.' % len(from_keys))
print('Found %d keys on destination.' % len(to_keys))
# keys found
if len(from_keys) > 0:
# same server
if from_host == to_host and from_port == to_port:
print('Migrating on same server...')
i = 0
for key in from_keys:
i += 1
print('Moving key %s (%d of %d)...' % (key, i, len(from_keys)))
from_redis.execute_command('MOVE', key, to_db)
# different servers
else:
print('Migrating between different servers...')
i = 0
for key in from_keys:
i += 1
print('Moving key %s (%d of %d)...' % (key, i, len(from_keys))),
try:
from_redis.execute_command('MIGRATE', to_host, to_port, key, to_db, timeout)
except redis.exceptions.ResponseError, e:
if not 'ERR Target key name is busy' in str(e):
raise e
print('Done.')
# done
from_keys_after = from_redis.keys(keymask)
to_keys_after = to_redis.keys(keymask)
print('There are now %d keys on source.' % len(from_keys_after))
print('There are now %d keys on destination.' % len(to_keys_after))
print('%d keys were moved' % (len(to_keys_after) - len(to_keys)))
print('Migration complete.')
# no keys found
else:
print('No keys with keymask %s found in source database' % keymask)

View File

@@ -0,0 +1,58 @@
/*
This script will migrate CDRs from the old CGRateS tables to the new cdrs table
but it only migrate CDRs where the duration is > 0.
If you need CDRs also with duration is = 0 you can make the appropriate change in the line beginning WHERE cdrs_primary.usage
Also the script will process 10,000 CDRs before committing to save system resources
especially in systems where they are millions of CDRs to be migrated
You can increase or lower the value of step in the line after BEGIN below.
You have to use 'CALL cgrates.migration();' to execute the script. If named other then default use that database name.
*/
DELIMITER //
CREATE PROCEDURE `migration`()
BEGIN
/* DECLARE variables */
DECLARE max_cdrs bigint;
DECLARE start_id bigint;
DECLARE end_id bigint;
DECLARE step bigint;
/* Optimize table for performance */
ALTER TABLE cdrs DISABLE KEYS;
SET autocommit=0;
SET unique_checks=0;
SET foreign_key_checks=0;
/* You must change the step var to commit every step rows inserted */
SET step := 10000;
SET start_id := 0;
SET end_id := start_id + step;
SET max_cdrs = (select max(id) from rated_cdrs);
WHILE (start_id <= max_cdrs) DO
INSERT INTO
cdrs(cgrid,run_id,origin_host,source,origin_id,tor,request_type,direction,tenant,category,account,subject,destination,setup_time,pdd,answer_time,`usage`,supplier,disconnect_cause,extra_fields,cost_source,cost,cost_details,extra_info, created_at, updated_at, deleted_at)
SELECT cdrs_primary.cgrid,rated_cdrs.runid as run_id,cdrs_primary.cdrhost as origin_host,cdrs_primary.cdrsource as source,cdrs_primary.accid as origin_id, cdrs_primary.tor,rated_cdrs.reqtype as request_type,rated_cdrs.direction, rated_cdrs.tenant,rated_cdrs.category, rated_cdrs.account, rated_cdrs.subject, rated_cdrs.destination,rated_cdrs.setup_time,rated_cdrs.pdd,rated_cdrs.answer_time,rated_cdrs.`usage`,rated_cdrs.supplier,rated_cdrs.disconnect_cause,cdrs_extra.extra_fields,cost_details.cost_source,rated_cdrs.cost,cost_details.timespans as cost_details,rated_cdrs.extra_info,rated_cdrs.created_at,rated_cdrs.updated_at, rated_cdrs.deleted_at
FROM rated_cdrs
INNER JOIN cdrs_primary ON rated_cdrs.cgrid = cdrs_primary.cgrid
INNER JOIN cdrs_extra ON rated_cdrs.cgrid = cdrs_extra.cgrid
INNER JOIN cost_details ON rated_cdrs.cgrid = cost_details.cgrid
WHERE cdrs_primary.`usage` > '0'
AND not exists (select 1 from cdrs where cdrs.cgrid = cdrs_primary.cgrid AND cdrs.run_id=rated_cdrs.runid)
AND rated_cdrs.id >= start_id
AND rated_cdrs.id < end_id
GROUP BY cgrid, run_id, origin_id;
SET start_id = start_id + step;
SET end_id = end_id + step;
END WHILE;
/* SET Table for live usage */
SET autocommit=1;
SET unique_checks=1;
SET foreign_key_checks=1;
ALTER TABLE cdrs ENABLE KEYS;
OPTIMIZE TABLE cdrs;
END //
DELIMITER ;
CALL cgrates.migration();

View File

@@ -0,0 +1,74 @@
use cgrates;
--
-- Table structure for table `cdrs`
--
DROP TABLE IF EXISTS cdrs;
CREATE TABLE cdrs (
id int(11) NOT NULL AUTO_INCREMENT,
cgrid char(40) NOT NULL,
run_id varchar(64) NOT NULL,
origin_host varchar(64) NOT NULL,
source varchar(64) NOT NULL,
origin_id varchar(64) NOT NULL,
tor varchar(16) NOT NULL,
request_type varchar(24) NOT NULL,
direction varchar(8) NOT NULL,
tenant varchar(64) NOT NULL,
category varchar(32) NOT NULL,
account varchar(128) NOT NULL,
subject varchar(128) NOT NULL,
destination varchar(128) NOT NULL,
setup_time datetime NOT NULL,
pdd DECIMAL(12,9) NOT NULL,
answer_time datetime NOT NULL,
`usage` DECIMAL(30,9) NOT NULL,
supplier varchar(128) NOT NULL,
disconnect_cause varchar(64) NOT NULL,
extra_fields text NOT NULL,
cost_source varchar(64) NOT NULL,
cost DECIMAL(20,4) NOT NULL,
cost_details text,
account_summary text,
extra_info text,
created_at TIMESTAMP NULL,
updated_at TIMESTAMP NULL,
deleted_at TIMESTAMP NULL,
PRIMARY KEY (id),
UNIQUE KEY cdrrun (cgrid, run_id, origin_id)
);
DROP TABLE IF EXISTS sm_costs;
CREATE TABLE sm_costs (
id int(11) NOT NULL AUTO_INCREMENT,
cgrid char(40) NOT NULL,
run_id varchar(64) NOT NULL,
origin_host varchar(64) NOT NULL,
origin_id varchar(64) NOT NULL,
cost_source varchar(64) NOT NULL,
`usage` DECIMAL(30,9) NOT NULL,
cost_details text,
created_at TIMESTAMP NULL,
deleted_at TIMESTAMP NULL,
PRIMARY KEY (`id`),
UNIQUE KEY costid (cgrid, run_id),
KEY origin_idx (origin_host, origin_id),
KEY run_origin_idx (run_id, origin_id),
KEY deleted_at_idx (deleted_at)
);
--
-- Table structure for table `versions`
--
DROP TABLE IF EXISTS versions;
CREATE TABLE versions (
`id` int(11) NOT NULL AUTO_INCREMENT,
`item` varchar(64) NOT NULL,
`version` int(11) NOT NULL,
PRIMARY KEY (`id`),
UNIQUE KEY `item` (`item`)
);

View File

@@ -0,0 +1,43 @@
/*
This script will migrate CDRs from the old CGRateS tables to the new cdrs table
but it only migrate CDRs where the duration is > 0.
If you need CDRs also with duration is = 0 you can make the appropriate change in the line beginning WHERE cdrs_primary.usage
Also the script will process 10,000 CDRs before committing to save system resources
especially in systems where they are millions of CDRs to be migrated
You can increase or lower the value of step in the line after BEGIN below.
*/
DO $$
DECLARE
max_cdrs bigint;
start_id bigint;
end_id bigint;
step bigint;
BEGIN
/* You must change the step var to commit every step rows inserted */
step := 10000;
start_id := 0;
end_id := start_id + step;
select max(id) INTO max_cdrs from rated_cdrs;
WHILE start_id <= max_cdrs
LOOP
--RAISE NOTICE '%', (to_char(start_id, '99999999') || '-' || to_char(end_id, '99999999'));
INSERT INTO
cdrs(cgrid,run_id,origin_host,source,origin_id,tor,request_type,direction,tenant,category,account,subject,destination,setup_time,pdd,answer_time,usage,supplier,disconnect_cause,extra_fields,cost_source,cost,cost_details,extra_info, created_at, updated_at, deleted_at)
SELECT cdrs_primary.cgrid,rated_cdrs.runid as run_id,cdrs_primary.cdrhost as origin_host,cdrs_primary.cdrsource as source,cdrs_primary.accid as origin_id, cdrs_primary.tor,rated_cdrs.reqtype as request_type,rated_cdrs.direction, rated_cdrs.tenant,rated_cdrs.category, rated_cdrs.account, rated_cdrs.subject, rated_cdrs.destination,rated_cdrs.setup_time,rated_cdrs.pdd,rated_cdrs.answer_time,rated_cdrs.usage,rated_cdrs.supplier,rated_cdrs.disconnect_cause,cdrs_extra.extra_fields,cost_details.cost_source,rated_cdrs.cost,cost_details.timespans as cost_details,rated_cdrs.extra_info,rated_cdrs.created_at,rated_cdrs.updated_at, rated_cdrs.deleted_at
FROM rated_cdrs
INNER JOIN cdrs_primary ON rated_cdrs.cgrid = cdrs_primary.cgrid
INNER JOIN cdrs_extra ON rated_cdrs.cgrid = cdrs_extra.cgrid
INNER JOIN cost_details ON rated_cdrs.cgrid = cost_details.cgrid
WHERE cdrs_primary.usage > '0'
AND not exists (select 1 from cdrs c where c.cgrid = cdrs_primary.cgrid)
AND rated_cdrs.id >= start_id
AND rated_cdrs.id < end_id
;
start_id = start_id + step;
end_id = end_id + step;
END LOOP;
END
$$;

View File

@@ -0,0 +1,79 @@
--
-- Table structure for table `cdrs`
--
DROP TABLE IF EXISTS cdrs;
CREATE TABLE cdrs (
id SERIAL PRIMARY KEY,
cgrid CHAR(40) NOT NULL,
run_id VARCHAR(64) NOT NULL,
origin_host VARCHAR(64) NOT NULL,
source VARCHAR(64) NOT NULL,
origin_id VARCHAR(64) NOT NULL,
tor VARCHAR(16) NOT NULL,
request_type VARCHAR(24) NOT NULL,
direction VARCHAR(8) NOT NULL,
tenant VARCHAR(64) NOT NULL,
category VARCHAR(32) NOT NULL,
account VARCHAR(128) NOT NULL,
subject VARCHAR(128) NOT NULL,
destination VARCHAR(128) NOT NULL,
setup_time TIMESTAMP WITH TIME ZONE NOT NULL,
pdd NUMERIC(12,9) NOT NULL,
answer_time TIMESTAMP WITH TIME ZONE NOT NULL,
usage NUMERIC(30,9) NOT NULL,
supplier VARCHAR(128) NOT NULL,
disconnect_cause VARCHAR(64) NOT NULL,
extra_fields jsonb NOT NULL,
cost_source VARCHAR(64) NOT NULL,
cost NUMERIC(20,4) DEFAULT NULL,
cost_details jsonb,
account_summary jsonb,
extra_info text,
created_at TIMESTAMP WITH TIME ZONE,
updated_at TIMESTAMP WITH TIME ZONE NULL,
deleted_at TIMESTAMP WITH TIME ZONE NULL,
UNIQUE (cgrid, run_id, origin_id)
);
;
DROP INDEX IF EXISTS deleted_at_cp_idx;
CREATE INDEX deleted_at_cp_idx ON cdrs (deleted_at);
DROP TABLE IF EXISTS sm_costs;
CREATE TABLE sm_costs (
id SERIAL PRIMARY KEY,
cgrid CHAR(40) NOT NULL,
run_id VARCHAR(64) NOT NULL,
origin_host VARCHAR(64) NOT NULL,
origin_id VARCHAR(64) NOT NULL,
cost_source VARCHAR(64) NOT NULL,
usage NUMERIC(30,9) NOT NULL,
cost_details jsonb,
created_at TIMESTAMP WITH TIME ZONE,
deleted_at TIMESTAMP WITH TIME ZONE NULL,
UNIQUE (cgrid, run_id)
);
DROP INDEX IF EXISTS cgrid_smcost_idx;
CREATE INDEX cgrid_smcost_idx ON sm_costs (cgrid, run_id);
DROP INDEX IF EXISTS origin_smcost_idx;
CREATE INDEX origin_smcost_idx ON sm_costs (origin_host, origin_id);
DROP INDEX IF EXISTS run_origin_smcost_idx;
CREATE INDEX run_origin_smcost_idx ON sm_costs (run_id, origin_id);
DROP INDEX IF EXISTS deleted_at_smcost_idx;
CREATE INDEX deleted_at_smcost_idx ON sm_costs (deleted_at);
--
-- Table structure for table `versions`
--
DROP TABLE IF EXISTS versions;
CREATE TABLE versions (
"id" SERIAL PRIMARY KEY,
"item" varchar(64) NOT NULL,
"version" INTEGER NOT NULL,
UNIQUE (item)
);

View File

@@ -0,0 +1,56 @@
#! /usr/bin/env sh
if test $# -lt 2; then
echo ""
echo "setup_cgr_db.sh <datadb_name> <stordb_name> <User> <host>"
echo ""
exit 0
fi
user=$3
if [ -z "$3" ]; then
user="cgrates"
fi
host=$4
if [ -z "$4" ]; then
host="localhost"
fi
export PGPASSWORD="CGRateS.org"
DIR="$(dirname "$(readlink -f "$0")")"
case $1 in
"redis")
./dbsmerge_redis.py
;;
"mongo")
./dbsmerge_mongo.py
;;
esac
case $2 in
"mysql")
mysql -u$user -p$PGPASSWORD -h $host < "$DIR"/mysql_tables_update.sql
up=$?
mysql -u$user -p$PGPASSWORD -h $host -D cgrates < "$DIR"/mysql_cdr_migration.sql
mig=$?
#./usage_mysql.py What's the point of those changes?
;;
"postgres")
psql -U $user -h $host -d cgrates -f "$DIR"/pq_tables_update.sql
up=$?
psql -U $user -h $host -d cgrates -f "$DIR"/pg_cdr_migration.sql
mig=$?
#./usage_postgres.py What's the point of those changes?
;;
esac
if [ $up = 0 ] && [ $mig = 0 ]; then
echo -e "\n\t+++ CGR-DB successfully set-up! +++\n"
exit 0
fi

View File

@@ -0,0 +1,40 @@
#!/usr/bin/python
# depends:
# ^ mysql (debian: python-mysql.connector)
host = '192.168.100.40'
port = 3306
database = 'cgrates'
user = 'root'
password = 'CGRateS.org'
import mysql.connector
config = {
'user': user,
'password': password,
'host': host,
'port': port,
'database': database,
}
print('Connecting to MySQL...')
cnx = mysql.connector.connect(**config)
cursor = cnx.cursor()
print('Renaming old column...')
cursor.execute('ALTER TABLE cdrs CHANGE COLUMN `usage` `usage_old` DECIMAL(30,9)')
print('Adding new column...')
cursor.execute('ALTER TABLE cdrs ADD `usage` DECIMAL(30)')
print('Setting new values...')
cursor.execute('UPDATE cdrs SET `usage` = `usage_old` * 1000000000 WHERE usage_old IS NOT NULL')
print('Deleting old column...')
cursor.execute('ALTER TABLEX cdrs DROP COLUMN usage_old')
print('Closing MySQL connection...')
cnx.close()

View File

@@ -0,0 +1,40 @@
#!/usr/bin/python
# depends:
# ^ psycopg2 (debian: python-psycopg2)
host = '127.0.0.1'
port = 5432
database = 'cgrates'
user = 'cgrates'
password = 'CGRateS.org'
import psycopg2
print('Connecting to PostgreSQL...')
cnx = psycopg2.connect(
host=host,
port=port,
dbname=database,
user=user,
password=password
)
cursor = cnx.cursor()
print('Renaming old column...')
cursor.execute('ALTER TABLE cdrs RENAME COLUMN usage to usage_old')
print('Adding new column...')
cursor.execute('ALTER TABLE cdrs ADD usage NUMERIC(30)')
print('Setting new values...')
cursor.execute('UPDATE cdrs SET usage = usage_old * 1000000000 WHERE usage_old IS NOT NULL')
print('Deleting old column...')
cursor.execute('ALTER TABLE cdrs DROP COLUMN usage_old')
print('Commiting...')
cnx.commit()
print('Closing PostgreSQL connection...')
cnx.close()