Lint: Replace += 1 with ++
This commit is contained in:
parent
9ed1ed1003
commit
33fd68778e
|
@ -549,7 +549,7 @@ func (a *ATNDeserializer) checkCondition(condition bool, message string) {
|
|||
|
||||
func (a *ATNDeserializer) readInt() int {
|
||||
v := a.data[a.pos]
|
||||
a.pos += 1
|
||||
a.pos++
|
||||
return int(v)
|
||||
}
|
||||
|
||||
|
|
|
@ -235,7 +235,7 @@ func (c *CommonTokenStream) NextTokenOnChannel(i, channel int) int {
|
|||
if token.GetTokenType() == TokenEOF {
|
||||
return -1
|
||||
}
|
||||
i += 1
|
||||
i++
|
||||
c.Sync(i)
|
||||
token = c.tokens[i]
|
||||
}
|
||||
|
@ -388,7 +388,7 @@ func (c *CommonTokenStream) LB(k int) Token {
|
|||
for n <= k {
|
||||
// Skip off-channel tokens
|
||||
i = c.previousTokenOnChannel(i-1, c.channel)
|
||||
n += 1
|
||||
n++
|
||||
}
|
||||
if i < 0 {
|
||||
return nil
|
||||
|
@ -412,7 +412,7 @@ func (c *CommonTokenStream) LT(k int) Token {
|
|||
if c.Sync(i + 1) {
|
||||
i = c.NextTokenOnChannel(i+1, c.channel)
|
||||
}
|
||||
n += 1
|
||||
n++
|
||||
}
|
||||
return c.tokens[i]
|
||||
}
|
||||
|
@ -424,7 +424,7 @@ func (c *CommonTokenStream) getNumberOfOnChannelTokens() int {
|
|||
for i := 0; i < len(c.tokens); i++ {
|
||||
var t = c.tokens[i]
|
||||
if t.GetChannel() == c.channel {
|
||||
n += 1
|
||||
n++
|
||||
}
|
||||
if t.GetTokenType() == TokenEOF {
|
||||
break
|
||||
|
|
|
@ -30,7 +30,7 @@ func (is *InputStream) Consume() {
|
|||
// assert is.LA(1) == TokenEOF
|
||||
panic("cannot consume EOF")
|
||||
}
|
||||
is.index += 1
|
||||
is.index++
|
||||
}
|
||||
|
||||
func (is *InputStream) LA(offset int) int {
|
||||
|
@ -39,7 +39,7 @@ func (is *InputStream) LA(offset int) int {
|
|||
return 0 // nil
|
||||
}
|
||||
if offset < 0 {
|
||||
offset += 1 // e.g., translate LA(-1) to use offset=0
|
||||
offset++ // e.g., translate LA(-1) to use offset=0
|
||||
}
|
||||
var pos = is.index + offset - 1
|
||||
|
||||
|
|
|
@ -186,7 +186,7 @@ func (i *IntervalSet) removeRange(v *Interval) {
|
|||
} else if v.stop < ni.stop {
|
||||
i.intervals[k] = NewInterval(v.stop, ni.stop)
|
||||
}
|
||||
k += 1
|
||||
k++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -109,7 +109,7 @@ func (l *LexerATNSimulator) Match(input CharStream, mode int) int {
|
|||
fmt.Println("Match")
|
||||
}
|
||||
|
||||
l.Match_calls += 1
|
||||
l.Match_calls++
|
||||
l.mode = mode
|
||||
var mark = input.Mark()
|
||||
|
||||
|
@ -672,10 +672,10 @@ func (l *LexerATNSimulator) GetText(input CharStream) string {
|
|||
func (l *LexerATNSimulator) consume(input CharStream) {
|
||||
var curChar = input.LA(1)
|
||||
if curChar == int('\n') {
|
||||
l.line += 1
|
||||
l.line++
|
||||
l.column = 0
|
||||
} else {
|
||||
l.column += 1
|
||||
l.column++
|
||||
}
|
||||
input.Consume()
|
||||
}
|
||||
|
|
|
@ -418,7 +418,7 @@ func (p *BaseParser) NotifyErrorListeners(msg string, offendingToken Token, err
|
|||
if offendingToken == nil {
|
||||
offendingToken = p.GetCurrentToken()
|
||||
}
|
||||
p._SyntaxErrors += 1
|
||||
p._SyntaxErrors++
|
||||
var line = offendingToken.GetLine()
|
||||
var column = offendingToken.GetColumn()
|
||||
listener := p.GetErrorListenerDispatch()
|
||||
|
|
|
@ -796,7 +796,7 @@ func (p *ParserATNSimulator) getPredsForAmbigAlts(ambigAlts *BitSet, configs ATN
|
|||
if pred == nil {
|
||||
altToPred[i] = SemanticContextNone
|
||||
} else if pred != SemanticContextNone {
|
||||
nPredAlts += 1
|
||||
nPredAlts++
|
||||
}
|
||||
}
|
||||
// nonambig alts are nil in altToPred
|
||||
|
@ -1112,7 +1112,7 @@ func (p *ParserATNSimulator) closure_(config ATNConfig, configs ATNConfigSet, cl
|
|||
} else if _, ok := t.(*RuleTransition); ok {
|
||||
// latch when newDepth goes negative - once we step out of the entry context we can't return
|
||||
if newDepth >= 0 {
|
||||
newDepth += 1
|
||||
newDepth++
|
||||
}
|
||||
}
|
||||
p.closureCheckingStopState(c, configs, closureBusy, continueCollecting, fullCtx, newDepth, treatEofAsEpsilon)
|
||||
|
|
|
@ -637,31 +637,31 @@ func mergeArrays(a, b *ArrayPredictionContext, rootIsWildcard bool, mergeCache *
|
|||
mergedParents[k] = mergedParent
|
||||
mergedReturnStates[k] = payload
|
||||
}
|
||||
i += 1 // hop over left one as usual
|
||||
j += 1 // but also Skip one in right side since we merge
|
||||
i++ // hop over left one as usual
|
||||
j++ // but also Skip one in right side since we merge
|
||||
} else if a.returnStates[i] < b.returnStates[j] { // copy a[i] to M
|
||||
mergedParents[k] = a_parent
|
||||
mergedReturnStates[k] = a.returnStates[i]
|
||||
i += 1
|
||||
i++
|
||||
} else { // b > a, copy b[j] to M
|
||||
mergedParents[k] = b_parent
|
||||
mergedReturnStates[k] = b.returnStates[j]
|
||||
j += 1
|
||||
j++
|
||||
}
|
||||
k += 1
|
||||
k++
|
||||
}
|
||||
// copy over any payloads remaining in either array
|
||||
if i < len(a.returnStates) {
|
||||
for p := i; p < len(a.returnStates); p++ {
|
||||
mergedParents[k] = a.parents[p]
|
||||
mergedReturnStates[k] = a.returnStates[p]
|
||||
k += 1
|
||||
k++
|
||||
}
|
||||
} else {
|
||||
for p := j; p < len(b.returnStates); p++ {
|
||||
mergedParents[k] = b.parents[p]
|
||||
mergedReturnStates[k] = b.returnStates[p]
|
||||
k += 1
|
||||
k++
|
||||
}
|
||||
}
|
||||
// trim merged if we combined a few that had same stack tops
|
||||
|
|
Loading…
Reference in New Issue