Ploish some details (#63)

* Ploish some details

* Rename linkedlist file and make it looks more like golang style, not Java
This commit is contained in:
陈键冬 2020-04-06 16:35:51 +08:00 committed by GitHub
parent 59f2bb003b
commit 73ed8a66d3
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
14 changed files with 36 additions and 42 deletions

View File

@ -53,4 +53,3 @@ type AlertUpgrade struct {
Duration int `json:"duration"`
Level int `json:"level"`
}

View File

@ -14,7 +14,7 @@ type CounterTsMap struct {
}
func NewCounterTsMap() *CounterTsMap {
return &CounterTsMap{M: make(map[string]int64, 0)}
return &CounterTsMap{M: make(map[string]int64)}
}
func (c *CounterTsMap) Set(counter string, ts int64) {

View File

@ -37,8 +37,6 @@ func (e *EndpointIndexMap) Push(item dataobj.IndexModel, now int64) {
return
}
metricIndex.Set(item, counter, now)
return
}
func (e *EndpointIndexMap) Clean(timeDuration int64) {
@ -123,7 +121,7 @@ func (e *EndpointIndexMap) GetEndpoints() []string {
length := len(e.M)
ret := make([]string, length)
i := 0
for endpoint, _ := range e.M {
for endpoint := range e.M {
ret[i] = endpoint
i++
}

View File

@ -69,6 +69,4 @@ func reportEndpoint(endpoints []interface{}) {
}
time.Sleep(100 * time.Millisecond)
}
return
}

View File

@ -41,7 +41,7 @@ var semaPermanence = semaphore.NewSemaphore(1)
func InitDB(cfg CacheSection) {
Config = cfg
IndexDB = &EndpointIndexMap{M: make(map[string]*MetricIndexMap, 0)}
IndexDB = &EndpointIndexMap{M: make(map[string]*MetricIndexMap)}
NewEndpoints = list.NewSafeListLimited(100000)
Rebuild(Config.PersistDir, Config.RebuildWorker)

View File

@ -78,7 +78,6 @@ func (m *MetricIndexMap) DelMetric(metric string) {
m.Lock()
defer m.Unlock()
delete(m.Data, metric)
return
}
func (m *MetricIndexMap) Len() int {
@ -106,7 +105,7 @@ func (m *MetricIndexMap) GetMetrics() []string {
m.RLock()
defer m.RUnlock()
var metrics []string
for k, _ := range m.Data {
for k := range m.Data {
metrics = append(metrics, k)
}
return metrics

View File

@ -16,8 +16,9 @@ func (t TagPairs) Len() int {
}
func (t TagPairs) Less(i, j int) bool {
return t[i].Key > t[i].Key
return t[i].Key > t[j].Key
}
func (t TagPairs) Swap(i, j int) {
t[i], t[j] = t[j], t[i]
}
@ -135,7 +136,7 @@ func TagPairToMap(tagPairs []*TagPair) map[string][]string {
func GetSortTags(tagMap map[string][]string) []*TagPair {
var keys []string
for key, _ := range tagMap {
for key := range tagMap {
keys = append(keys, key)
}
sort.Strings(keys)

View File

@ -33,7 +33,7 @@ func (t *TagkvIndex) GetTagkv() []*TagPair {
for k, vm := range t.Tagkv {
var vs []string
for v, _ := range vm {
for v := range vm {
vs = append(vs, v)
}
tagkv := TagPair{
@ -53,7 +53,7 @@ func (t *TagkvIndex) GetTagkvMap() map[string][]string {
for k, vm := range t.Tagkv {
var vs []string
for v, _ := range vm {
for v := range vm {
vs = append(vs, v)
}

View File

@ -125,7 +125,7 @@ func GetTagPairs(c *gin.Context) {
for tagk, tagvFilter := range tagkvFilter {
tagvs := []string{}
for v, _ := range tagvFilter {
for v := range tagvFilter {
tagvs = append(tagvs, v)
}
tagkv := &cache.TagPair{
@ -305,7 +305,7 @@ func GetIndexByClude(c *gin.Context) {
var err error
var tags []string
if len(includeList) == 0 && len(excludeList) == 0 {
for counter, _ := range counterMap {
for counter := range counterMap {
tagList = append(tagList, counter)
}
resp = append(resp, XcludeResp{

View File

@ -42,5 +42,4 @@ func push(args []*dataobj.IndexModel, reply *dataobj.IndexResp) {
reply.Total = len(args)
reply.Latency = (time.Now().UnixNano() - start.UnixNano()) / 1000000
return
}

View File

@ -30,7 +30,7 @@ type IndexRequest struct {
type Counter struct {
Counter string `json:"counter"`
Step int `json:"step"`
Dstype string `json:"dstype`
Dstype string `json:"dstype"`
}
// 执行Query操作

View File

@ -12,32 +12,32 @@ type SafeLinkedList struct {
L *list.List
}
func (this *SafeLinkedList) Front() *list.Element {
this.RLock()
defer this.RUnlock()
return this.L.Front()
func (ll *SafeLinkedList) Front() *list.Element {
ll.RLock()
defer ll.RUnlock()
return ll.L.Front()
}
func (this *SafeLinkedList) Len() int {
this.RLock()
defer this.RUnlock()
return this.L.Len()
func (ll *SafeLinkedList) Len() int {
ll.RLock()
defer ll.RUnlock()
return ll.L.Len()
}
// @return needJudge 如果是false不需要做judge因为新上来的数据不合法
func (this *SafeLinkedList) PushFrontAndMaintain(v *dataobj.JudgeItem, maxCount int) bool {
this.Lock()
defer this.Unlock()
func (ll *SafeLinkedList) PushFrontAndMaintain(v *dataobj.JudgeItem, maxCount int) bool {
ll.Lock()
defer ll.Unlock()
sz := this.L.Len()
sz := ll.L.Len()
if sz > 0 {
// 新push上来的数据有可能重复了或者timestamp不对这种数据要丢掉
if v.Timestamp <= this.L.Front().Value.(*dataobj.JudgeItem).Timestamp || v.Timestamp <= 0 {
if v.Timestamp <= ll.L.Front().Value.(*dataobj.JudgeItem).Timestamp || v.Timestamp <= 0 {
return false
}
}
this.L.PushFront(v)
ll.L.PushFront(v)
sz++
if sz <= maxCount {
@ -46,7 +46,7 @@ func (this *SafeLinkedList) PushFrontAndMaintain(v *dataobj.JudgeItem, maxCount
del := sz - maxCount
for i := 0; i < del; i++ {
this.L.Remove(this.L.Back())
ll.L.Remove(ll.L.Back())
}
return true
@ -54,19 +54,19 @@ func (this *SafeLinkedList) PushFrontAndMaintain(v *dataobj.JudgeItem, maxCount
// @param limit 至多返回这些,如果不够,有多少返回多少
// @return bool isEnough
func (this *SafeLinkedList) HistoryData(limit int) ([]*dataobj.RRDData, bool) {
func (ll *SafeLinkedList) HistoryData(limit int) ([]*dataobj.RRDData, bool) {
if limit < 1 {
// 其实limit不合法此处也返回false吧上层代码要注意
// 因为false通常使上层代码进入异常分支这样就统一了
return []*dataobj.RRDData{}, false
}
size := this.Len()
size := ll.Len()
if size == 0 {
return []*dataobj.RRDData{}, false
}
firstElement := this.Front()
firstElement := ll.Front()
firstItem := firstElement.Value.(*dataobj.JudgeItem)
var vs []*dataobj.RRDData

View File

@ -89,7 +89,7 @@ func Judge(stra *model.Stra, exps []model.Exp, historyData []*dataobj.RRDData, f
defer func() {
if len(exps) == 1 {
bytes, err := json.Marshal(history)
bs, err := json.Marshal(history)
if err != nil {
logger.Error("Marshal history:%v err:%v", history, err)
}
@ -98,7 +98,7 @@ func Judge(stra *model.Stra, exps []model.Exp, historyData []*dataobj.RRDData, f
Etime: now,
Endpoint: firstItem.Endpoint,
Info: info,
Detail: string(bytes),
Detail: string(bs),
Value: value,
Partition: redi.Config.Prefix + "/event/p" + strconv.Itoa(stra.Priority),
Sid: stra.Id,
@ -134,7 +134,7 @@ func Judge(stra *model.Stra, exps []model.Exp, historyData []*dataobj.RRDData, f
return
}
for i, _ := range respData {
for i := range respData {
firstItem.Endpoint = respData[i].Endpoint
firstItem.Tags = getTags(respData[i].Counter)
firstItem.Step = respData[i].Step
@ -153,7 +153,7 @@ func Judge(stra *model.Stra, exps []model.Exp, historyData []*dataobj.RRDData, f
logger.Errorf("stra:%v get query data err:%v", stra, err)
return
}
for i, _ := range respData {
for i := range respData {
firstItem.Endpoint = respData[i].Endpoint
firstItem.Tags = getTags(respData[i].Counter)
firstItem.Step = respData[i].Step
@ -204,7 +204,7 @@ func judgeItemWithStrategy(stra *model.Stra, historyData []*dataobj.RRDData, exp
var sum float64
data := respItems[0]
for i, _ := range data.Values {
for i := range data.Values {
sum += float64(data.Values[i].Value)
}

View File

@ -18,7 +18,7 @@ func NodataJudge(concurrency int) {
if concurrency < 1 {
concurrency = 1000
}
nodataJob = semaphore.NewSemaphore(1000)
nodataJob = semaphore.NewSemaphore(concurrency)
t1 := time.NewTicker(time.Duration(9000) * time.Millisecond)
nodataJudge()