Updated dispatcher *ratio handling

This commit is contained in:
Trial97
2020-08-19 11:33:15 +03:00
committed by Dan Christian Bogos
parent 1026b48119
commit f175e62096
3 changed files with 42 additions and 14 deletions

View File

@@ -280,9 +280,6 @@ func newSingleStrategyDispatcher(hosts engine.DispatcherHostProfiles, params map
if ratio, err = utils.IfaceAsTInt64(dflt); err != nil {
return nil, err
}
if ratio <= 0 {
ratio = 1
}
return &loadStrategyDispatcher{
tntID: tntID,
hosts: hosts.Clone(),
@@ -318,9 +315,6 @@ func newLoadMetrics(hosts engine.DispatcherHostProfiles, dfltRatio int64) (*Load
} else if ratio, err := utils.IfaceAsTInt64(strRatio); err != nil {
return nil, err
} else {
if ratio <= 0 {
ratio = 1
}
lM.HostsRatio[host.ID] = ratio
}
}
@@ -399,12 +393,21 @@ func (hc *hostCosts) Swap(i, j int) {
func (lM *LoadMetrics) getHosts(hostIDs []string) []string {
hlp := &hostCosts{
ids: hostIDs,
multiple: make([]int64, len(hostIDs)),
ids: make([]string, 0, len(hostIDs)),
multiple: make([]int64, 0, len(hostIDs)),
}
lM.mutex.RLock()
for i, id := range hostIDs {
hlp.multiple[i] = lM.HostsLoad[id] / lM.HostsRatio[id]
for _, id := range hostIDs {
switch {
case lM.HostsRatio[id] < 0:
hlp.multiple = append(hlp.multiple, 0)
case lM.HostsRatio[id] == 0:
continue
default:
hlp.multiple = append(hlp.multiple, lM.HostsLoad[id]/lM.HostsRatio[id])
}
hlp.ids = append(hlp.ids, id)
}
lM.mutex.RUnlock()
sort.Stable(hlp)

View File

@@ -114,7 +114,7 @@ func TestNewSingleStrategyDispatcher(t *testing.T) {
exp = &loadStrategyDispatcher{
hosts: dhp,
tntID: "cgrates.org",
defaultRatio: 1,
defaultRatio: 0,
}
if rply, err := newSingleStrategyDispatcher(dhp, map[string]interface{}{utils.MetaDefaultRatio: 0}, "cgrates.org"); err != nil {
t.Fatal(err)
@@ -137,7 +137,7 @@ func TestNewLoadMetrics(t *testing.T) {
HostsLoad: map[string]int64{},
HostsRatio: map[string]int64{
"DSP_1": 1,
"DSP_2": 1,
"DSP_2": 0,
"DSP_3": 2,
},
}
@@ -161,13 +161,14 @@ func TestLoadMetricsGetHosts2(t *testing.T) {
{ID: "DSP_3", Params: map[string]interface{}{utils.MetaRatio: 1}},
{ID: "DSP_4", Params: map[string]interface{}{utils.MetaRatio: 5}},
{ID: "DSP_5", Params: map[string]interface{}{utils.MetaRatio: 1}},
{ID: "DSP_6", Params: map[string]interface{}{utils.MetaRatio: 0}},
}
lm, err := newLoadMetrics(dhp, 1)
if err != nil {
t.Fatal(err)
}
hostsIDs := engine.DispatcherHostIDs(dhp.HostIDs())
exp := []string(hostsIDs.Clone())
exp := []string(hostsIDs.Clone())[:5]
if rply := lm.getHosts(hostsIDs.Clone()); !reflect.DeepEqual(exp, rply) {
t.Errorf("Expected: %+v ,received: %+v", exp, rply)
}
@@ -209,4 +210,28 @@ func TestLoadMetricsGetHosts2(t *testing.T) {
t.Errorf("Expected: %+v ,received: %+v", exp, rply)
}
}
dhp = engine.DispatcherHostProfiles{
{ID: "DSP_1", Params: map[string]interface{}{utils.MetaRatio: -1}},
{ID: "DSP_2", Params: map[string]interface{}{utils.MetaRatio: 3}},
{ID: "DSP_3", Params: map[string]interface{}{utils.MetaRatio: 1}},
{ID: "DSP_4", Params: map[string]interface{}{utils.MetaRatio: 5}},
{ID: "DSP_5", Params: map[string]interface{}{utils.MetaRatio: 1}},
{ID: "DSP_6", Params: map[string]interface{}{utils.MetaRatio: 0}},
}
lm, err = newLoadMetrics(dhp, 1)
if err != nil {
t.Fatal(err)
}
hostsIDs = engine.DispatcherHostIDs(dhp.HostIDs())
exp = []string(hostsIDs.Clone())[:5]
if rply := lm.getHosts(hostsIDs.Clone()); !reflect.DeepEqual(exp, rply) {
t.Errorf("Expected: %+v ,received: %+v", exp, rply)
}
for i := 0; i < 100; i++ {
if rply := lm.getHosts(hostsIDs.Clone()); !reflect.DeepEqual(exp, rply) {
t.Errorf("Expected: %+v ,received: %+v", exp, rply)
}
lm.incrementLoad(exp[0], utils.EmptyString)
}
}

View File

@@ -123,7 +123,7 @@ func testLoaderITInitStoreDB(t *testing.T) {
lCfg.StorDbCfg().ConnMaxLifetime, lCfg.StorDbCfg().StringIndexedFields,
lCfg.StorDbCfg().PrefixIndexedFields, lCfg.StorDbCfg().Items)
if err != nil {
t.Error("Error on opening database connection: ", err)
t.Fatal("Error on opening database connection: ", err)
}
storDb = db
// Creating the table serves also as reset since there is a drop prior to create