StorDB tp_resource_limits -> tp_resources

This commit is contained in:
DanB
2017-08-15 18:48:38 +02:00
parent 273a4c32eb
commit e42c46b90f
8 changed files with 22 additions and 22 deletions

View File

@@ -392,11 +392,11 @@ CREATE TABLE tp_aliases (
);
--
-- Table structure for table `tp_resource_limits`
-- Table structure for table `tp_resources`
--
DROP TABLE IF EXISTS tp_resource_limits;
CREATE TABLE tp_resource_limits (
DROP TABLE IF EXISTS tp_resources;
CREATE TABLE tp_resources (
`id` int(11) NOT NULL AUTO_INCREMENT,
`tpid` varchar(64) NOT NULL,
`tag` varchar(64) NOT NULL,
@@ -414,7 +414,7 @@ CREATE TABLE tp_resource_limits (
`created_at` TIMESTAMP,
PRIMARY KEY (`id`),
KEY `tpid` (`tpid`),
UNIQUE KEY `unique_tp_resource_limits` (`tpid`, `tag`, `filter_type`, `filter_field_name`)
UNIQUE KEY `unique_tp_resource` (`tpid`, `tag`, `filter_type`, `filter_field_name`)
);
--

View File

@@ -388,11 +388,11 @@ CREATE INDEX tpaliases_idx ON tp_aliases (tpid,direction,tenant,category,account
--
-- Table structure for table `tp_resource_limits`
-- Table structure for table `tp_resources`
--
DROP TABLE IF EXISTS tp_resource_limits;
CREATE TABLE tp_resource_limits (
DROP TABLE IF EXISTS tp_resources;
CREATE TABLE tp_resources (
"id" SERIAL PRIMARY KEY,
"tpid" varchar(64) NOT NULL,
"tag" varchar(64) NOT NULL,
@@ -409,8 +409,8 @@ CREATE TABLE tp_resource_limits (
"thresholds" varchar(64) NOT NULL,
"created_at" TIMESTAMP WITH TIME ZONE
);
CREATE INDEX tp_resource_limits_idx ON tp_resource_limits (tpid);
CREATE INDEX tp_resource_limits_unique ON tp_resource_limits ("tpid", "tag", "filter_type", "filter_field_name");
CREATE INDEX tp_resources_idx ON tp_resources (tpid);
CREATE INDEX tp_resources_unique ON tp_resources ("tpid", "tag", "filter_type", "filter_field_name");
--

View File

@@ -1811,7 +1811,7 @@ func APItoModelLcrRules(ts []*utils.TPLcrRules) (result TpLcrRules) {
return result
}
type TpResourceLimits []*TpResourceLimit
type TpResourceLimits []*TpResource
func (tps TpResourceLimits) AsTPResourceLimits() (result []*utils.TPResourceLimit) {
mrl := make(map[string]*utils.TPResourceLimit)
@@ -1875,7 +1875,7 @@ func APItoModelResourceLimit(rl *utils.TPResourceLimit) (mdls TpResourceLimits)
return
}
for i, fltr := range rl.Filters {
mdl := &TpResourceLimit{
mdl := &TpResource{
Tpid: rl.TPid,
Tag: rl.ID,
}

View File

@@ -667,9 +667,9 @@ func TestTPAccountActionsAsExportSlice(t *testing.T) {
}
}
func TestTpResourceLimitsAsTPResourceLimits(t *testing.T) {
tps := []*TpResourceLimit{
&TpResourceLimit{
func TestTpResourcesAsTpResources(t *testing.T) {
tps := []*TpResource{
&TpResource{
Tpid: "TEST_TPID",
Tag: "ResGroup1",
FilterType: MetaStringPrefix,
@@ -681,14 +681,14 @@ func TestTpResourceLimitsAsTPResourceLimits(t *testing.T) {
Weight: 10.0,
Limit: "45",
Thresholds: "WARN_RES1;WARN_RES2"},
&TpResourceLimit{
&TpResource{
Tpid: "TEST_TPID",
Tag: "ResGroup1",
FilterType: MetaStringPrefix,
FilterFieldName: "Category",
FilterFieldValues: "call;inbound_call",
Thresholds: "WARN3"},
&TpResourceLimit{
&TpResource{
Tpid: "TEST_TPID",
Tag: "ResGroup2",
FilterType: MetaStringPrefix,

View File

@@ -450,7 +450,7 @@ func (t TBLSMCosts) TableName() string {
return utils.TBLSMCosts
}
type TpResourceLimit struct {
type TpResource struct {
ID int64
Tpid string
Tag string `index:"0" re:""`

View File

@@ -593,7 +593,7 @@ func (csvs *CSVStorage) GetTPAliases(filter *utils.TPAliases) ([]*utils.TPAliase
}
func (csvs *CSVStorage) GetTPResourceLimits(tpid, id string) ([]*utils.TPResourceLimit, error) {
csvReader, fp, err := csvs.readerFunc(csvs.resLimitsFn, csvs.sep, getColumnCount(TpResourceLimit{}))
csvReader, fp, err := csvs.readerFunc(csvs.resLimitsFn, csvs.sep, getColumnCount(TpResource{}))
if err != nil {
//log.Print("Could not load resource limits file: ", err)
// allow writing of the other values
@@ -608,11 +608,11 @@ func (csvs *CSVStorage) GetTPResourceLimits(tpid, id string) ([]*utils.TPResourc
log.Print("bad line in resourcelimits csv: ", err)
return nil, err
}
if tpResLimit, err := csvLoad(TpResourceLimit{}, record); err != nil {
if tpResLimit, err := csvLoad(TpResource{}, record); err != nil {
log.Print("error loading resourcelimit: ", err)
return nil, err
} else {
tpLimit := tpResLimit.(TpResourceLimit)
tpLimit := tpResLimit.(TpResource)
tpLimit.Tpid = tpid
tpResLimits = append(tpResLimits, &tpLimit)
}

View File

@@ -565,7 +565,7 @@ func (self *SQLStorage) SetTPResourceLimits(rls []*utils.TPResourceLimit) error
tx := self.db.Begin()
for _, rl := range rls {
// Remove previous
if err := tx.Where(&TpResourceLimit{Tpid: rl.TPid, Tag: rl.ID}).Delete(TpResourceLimit{}).Error; err != nil {
if err := tx.Where(&TpResource{Tpid: rl.TPid, Tag: rl.ID}).Delete(TpResource{}).Error; err != nil {
tx.Rollback()
return err
}

View File

@@ -99,7 +99,7 @@ const (
TBLTPDerivedChargers = "tp_derived_chargers"
TBLTPUsers = "tp_users"
TBLTPAliases = "tp_aliases"
TBLTPResourceLimits = "tp_resource_limits"
TBLTPResourceLimits = "tp_resources"
TBLTPStats = "tp_stats"
TBLTPThresholds = "tp_thresholds"
TBLSMCosts = "sm_costs"