mirror of
https://git.mirrors.martin98.com/https://github.com/SigNoz/signoz
synced 2025-08-12 14:18:58 +08:00
live tail fetch only recent 100 logs every 10s
This commit is contained in:
parent
4825ed6e5f
commit
0fe4327877
@ -18,20 +18,21 @@ const (
|
|||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
defaultDatasource string = "tcp://localhost:9000"
|
defaultDatasource string = "tcp://localhost:9000"
|
||||||
defaultTraceDB string = "signoz_traces"
|
defaultTraceDB string = "signoz_traces"
|
||||||
defaultOperationsTable string = "signoz_operations"
|
defaultOperationsTable string = "signoz_operations"
|
||||||
defaultIndexTable string = "signoz_index_v2"
|
defaultIndexTable string = "signoz_index_v2"
|
||||||
defaultErrorTable string = "signoz_error_index_v2"
|
defaultErrorTable string = "signoz_error_index_v2"
|
||||||
defaulDurationTable string = "durationSortMV"
|
defaulDurationTable string = "durationSortMV"
|
||||||
defaultSpansTable string = "signoz_spans"
|
defaultSpansTable string = "signoz_spans"
|
||||||
defaultLogsDB string = "signoz_logs"
|
defaultLogsDB string = "signoz_logs"
|
||||||
defaultLogsTable string = "logs"
|
defaultLogsTable string = "logs"
|
||||||
defaultLogAttributeKeysTable string = "logs_atrribute_keys"
|
defaultLogAttributeKeysTable string = "logs_atrribute_keys"
|
||||||
defaultLogResourceKeysTable string = "logs_resource_keys"
|
defaultLogResourceKeysTable string = "logs_resource_keys"
|
||||||
defaultWriteBatchDelay time.Duration = 5 * time.Second
|
defaultLiveTailRefreshSeconds int = 10
|
||||||
defaultWriteBatchSize int = 10000
|
defaultWriteBatchDelay time.Duration = 5 * time.Second
|
||||||
defaultEncoding Encoding = EncodingJSON
|
defaultWriteBatchSize int = 10000
|
||||||
|
defaultEncoding Encoding = EncodingJSON
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -60,6 +61,7 @@ type namespaceConfig struct {
|
|||||||
LogsTable string
|
LogsTable string
|
||||||
LogsAttributeKeysTable string
|
LogsAttributeKeysTable string
|
||||||
LogsResourceKeysTable string
|
LogsResourceKeysTable string
|
||||||
|
LiveTailRefreshSeconds int
|
||||||
WriteBatchDelay time.Duration
|
WriteBatchDelay time.Duration
|
||||||
WriteBatchSize int
|
WriteBatchSize int
|
||||||
Encoding Encoding
|
Encoding Encoding
|
||||||
@ -123,6 +125,7 @@ func NewOptions(datasource string, primaryNamespace string, otherNamespaces ...s
|
|||||||
LogsTable: defaultLogsTable,
|
LogsTable: defaultLogsTable,
|
||||||
LogsAttributeKeysTable: defaultLogAttributeKeysTable,
|
LogsAttributeKeysTable: defaultLogAttributeKeysTable,
|
||||||
LogsResourceKeysTable: defaultLogResourceKeysTable,
|
LogsResourceKeysTable: defaultLogResourceKeysTable,
|
||||||
|
LiveTailRefreshSeconds: defaultLiveTailRefreshSeconds,
|
||||||
WriteBatchDelay: defaultWriteBatchDelay,
|
WriteBatchDelay: defaultWriteBatchDelay,
|
||||||
WriteBatchSize: defaultWriteBatchSize,
|
WriteBatchSize: defaultWriteBatchSize,
|
||||||
Encoding: defaultEncoding,
|
Encoding: defaultEncoding,
|
||||||
@ -144,6 +147,7 @@ func NewOptions(datasource string, primaryNamespace string, otherNamespaces ...s
|
|||||||
LogsTable: "",
|
LogsTable: "",
|
||||||
LogsAttributeKeysTable: "",
|
LogsAttributeKeysTable: "",
|
||||||
LogsResourceKeysTable: "",
|
LogsResourceKeysTable: "",
|
||||||
|
LiveTailRefreshSeconds: defaultLiveTailRefreshSeconds,
|
||||||
WriteBatchDelay: defaultWriteBatchDelay,
|
WriteBatchDelay: defaultWriteBatchDelay,
|
||||||
WriteBatchSize: defaultWriteBatchSize,
|
WriteBatchSize: defaultWriteBatchSize,
|
||||||
Encoding: defaultEncoding,
|
Encoding: defaultEncoding,
|
||||||
|
@ -94,6 +94,8 @@ type ClickHouseReader struct {
|
|||||||
promConfigFile string
|
promConfigFile string
|
||||||
promConfig *config.Config
|
promConfig *config.Config
|
||||||
alertManager am.Manager
|
alertManager am.Manager
|
||||||
|
|
||||||
|
liveTailRefreshSeconds int
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewTraceReader returns a TraceReader for the database
|
// NewTraceReader returns a TraceReader for the database
|
||||||
@ -116,20 +118,21 @@ func NewReader(localDB *sqlx.DB, configFile string) *ClickHouseReader {
|
|||||||
}
|
}
|
||||||
|
|
||||||
return &ClickHouseReader{
|
return &ClickHouseReader{
|
||||||
db: db,
|
db: db,
|
||||||
localDB: localDB,
|
localDB: localDB,
|
||||||
traceDB: options.primary.TraceDB,
|
traceDB: options.primary.TraceDB,
|
||||||
alertManager: alertManager,
|
alertManager: alertManager,
|
||||||
operationsTable: options.primary.OperationsTable,
|
operationsTable: options.primary.OperationsTable,
|
||||||
indexTable: options.primary.IndexTable,
|
indexTable: options.primary.IndexTable,
|
||||||
errorTable: options.primary.ErrorTable,
|
errorTable: options.primary.ErrorTable,
|
||||||
durationTable: options.primary.DurationTable,
|
durationTable: options.primary.DurationTable,
|
||||||
spansTable: options.primary.SpansTable,
|
spansTable: options.primary.SpansTable,
|
||||||
logsDB: options.primary.LogsDB,
|
logsDB: options.primary.LogsDB,
|
||||||
logsTable: options.primary.LogsTable,
|
logsTable: options.primary.LogsTable,
|
||||||
logsAttributeKeys: options.primary.LogsAttributeKeysTable,
|
logsAttributeKeys: options.primary.LogsAttributeKeysTable,
|
||||||
logsResourceKeys: options.primary.LogsResourceKeysTable,
|
logsResourceKeys: options.primary.LogsResourceKeysTable,
|
||||||
promConfigFile: configFile,
|
liveTailRefreshSeconds: options.primary.LiveTailRefreshSeconds,
|
||||||
|
promConfigFile: configFile,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2912,6 +2915,7 @@ func (r *ClickHouseReader) TailLogs(ctx context.Context, client *model.LogsTailC
|
|||||||
zap.S().Debug("closing go routine : " + client.Name)
|
zap.S().Debug("closing go routine : " + client.Name)
|
||||||
return
|
return
|
||||||
default:
|
default:
|
||||||
|
// get the new 100 logs as anything more older won't make sense
|
||||||
tmpQuery := fmt.Sprintf("%s where timestamp >='%d'", query, tsStart)
|
tmpQuery := fmt.Sprintf("%s where timestamp >='%d'", query, tsStart)
|
||||||
if filterSql != "" {
|
if filterSql != "" {
|
||||||
tmpQuery += fmt.Sprintf(" and %s", filterSql)
|
tmpQuery += fmt.Sprintf(" and %s", filterSql)
|
||||||
@ -2919,7 +2923,7 @@ func (r *ClickHouseReader) TailLogs(ctx context.Context, client *model.LogsTailC
|
|||||||
if idStart != "" {
|
if idStart != "" {
|
||||||
tmpQuery += fmt.Sprintf(" and id > '%s'", idStart)
|
tmpQuery += fmt.Sprintf(" and id > '%s'", idStart)
|
||||||
}
|
}
|
||||||
tmpQuery = fmt.Sprintf("%s order by timestamp asc, id asc limit 1000", tmpQuery)
|
tmpQuery = fmt.Sprintf("%s order by timestamp desc, id desc limit 100", tmpQuery)
|
||||||
zap.S().Debug(tmpQuery)
|
zap.S().Debug(tmpQuery)
|
||||||
response := []model.GetLogsResponse{}
|
response := []model.GetLogsResponse{}
|
||||||
err := r.db.Select(ctx, &response, tmpQuery)
|
err := r.db.Select(ctx, &response, tmpQuery)
|
||||||
@ -2929,7 +2933,7 @@ func (r *ClickHouseReader) TailLogs(ctx context.Context, client *model.LogsTailC
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
len := len(response)
|
len := len(response)
|
||||||
for i := 0; i < len; i++ {
|
for i := len - 1; i >= 0; i-- {
|
||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
done := true
|
done := true
|
||||||
@ -2938,13 +2942,13 @@ func (r *ClickHouseReader) TailLogs(ctx context.Context, client *model.LogsTailC
|
|||||||
return
|
return
|
||||||
default:
|
default:
|
||||||
client.Logs <- &response[i]
|
client.Logs <- &response[i]
|
||||||
if i == len-1 {
|
if i == 0 {
|
||||||
tsStart = response[i].Timestamp
|
tsStart = response[i].Timestamp
|
||||||
idStart = response[i].ID
|
idStart = response[i].ID
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
time.Sleep(10 * time.Second)
|
time.Sleep(time.Duration(r.liveTailRefreshSeconds) * time.Second)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user