mirror of
https://git.mirrors.martin98.com/https://github.com/SigNoz/signoz
synced 2025-08-12 16:28:59 +08:00
live tail fetch only recent 100 logs every 10s
This commit is contained in:
parent
4825ed6e5f
commit
0fe4327877
@ -29,6 +29,7 @@ const (
|
|||||||
defaultLogsTable string = "logs"
|
defaultLogsTable string = "logs"
|
||||||
defaultLogAttributeKeysTable string = "logs_atrribute_keys"
|
defaultLogAttributeKeysTable string = "logs_atrribute_keys"
|
||||||
defaultLogResourceKeysTable string = "logs_resource_keys"
|
defaultLogResourceKeysTable string = "logs_resource_keys"
|
||||||
|
defaultLiveTailRefreshSeconds int = 10
|
||||||
defaultWriteBatchDelay time.Duration = 5 * time.Second
|
defaultWriteBatchDelay time.Duration = 5 * time.Second
|
||||||
defaultWriteBatchSize int = 10000
|
defaultWriteBatchSize int = 10000
|
||||||
defaultEncoding Encoding = EncodingJSON
|
defaultEncoding Encoding = EncodingJSON
|
||||||
@ -60,6 +61,7 @@ type namespaceConfig struct {
|
|||||||
LogsTable string
|
LogsTable string
|
||||||
LogsAttributeKeysTable string
|
LogsAttributeKeysTable string
|
||||||
LogsResourceKeysTable string
|
LogsResourceKeysTable string
|
||||||
|
LiveTailRefreshSeconds int
|
||||||
WriteBatchDelay time.Duration
|
WriteBatchDelay time.Duration
|
||||||
WriteBatchSize int
|
WriteBatchSize int
|
||||||
Encoding Encoding
|
Encoding Encoding
|
||||||
@ -123,6 +125,7 @@ func NewOptions(datasource string, primaryNamespace string, otherNamespaces ...s
|
|||||||
LogsTable: defaultLogsTable,
|
LogsTable: defaultLogsTable,
|
||||||
LogsAttributeKeysTable: defaultLogAttributeKeysTable,
|
LogsAttributeKeysTable: defaultLogAttributeKeysTable,
|
||||||
LogsResourceKeysTable: defaultLogResourceKeysTable,
|
LogsResourceKeysTable: defaultLogResourceKeysTable,
|
||||||
|
LiveTailRefreshSeconds: defaultLiveTailRefreshSeconds,
|
||||||
WriteBatchDelay: defaultWriteBatchDelay,
|
WriteBatchDelay: defaultWriteBatchDelay,
|
||||||
WriteBatchSize: defaultWriteBatchSize,
|
WriteBatchSize: defaultWriteBatchSize,
|
||||||
Encoding: defaultEncoding,
|
Encoding: defaultEncoding,
|
||||||
@ -144,6 +147,7 @@ func NewOptions(datasource string, primaryNamespace string, otherNamespaces ...s
|
|||||||
LogsTable: "",
|
LogsTable: "",
|
||||||
LogsAttributeKeysTable: "",
|
LogsAttributeKeysTable: "",
|
||||||
LogsResourceKeysTable: "",
|
LogsResourceKeysTable: "",
|
||||||
|
LiveTailRefreshSeconds: defaultLiveTailRefreshSeconds,
|
||||||
WriteBatchDelay: defaultWriteBatchDelay,
|
WriteBatchDelay: defaultWriteBatchDelay,
|
||||||
WriteBatchSize: defaultWriteBatchSize,
|
WriteBatchSize: defaultWriteBatchSize,
|
||||||
Encoding: defaultEncoding,
|
Encoding: defaultEncoding,
|
||||||
|
@ -94,6 +94,8 @@ type ClickHouseReader struct {
|
|||||||
promConfigFile string
|
promConfigFile string
|
||||||
promConfig *config.Config
|
promConfig *config.Config
|
||||||
alertManager am.Manager
|
alertManager am.Manager
|
||||||
|
|
||||||
|
liveTailRefreshSeconds int
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewTraceReader returns a TraceReader for the database
|
// NewTraceReader returns a TraceReader for the database
|
||||||
@ -129,6 +131,7 @@ func NewReader(localDB *sqlx.DB, configFile string) *ClickHouseReader {
|
|||||||
logsTable: options.primary.LogsTable,
|
logsTable: options.primary.LogsTable,
|
||||||
logsAttributeKeys: options.primary.LogsAttributeKeysTable,
|
logsAttributeKeys: options.primary.LogsAttributeKeysTable,
|
||||||
logsResourceKeys: options.primary.LogsResourceKeysTable,
|
logsResourceKeys: options.primary.LogsResourceKeysTable,
|
||||||
|
liveTailRefreshSeconds: options.primary.LiveTailRefreshSeconds,
|
||||||
promConfigFile: configFile,
|
promConfigFile: configFile,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -2912,6 +2915,7 @@ func (r *ClickHouseReader) TailLogs(ctx context.Context, client *model.LogsTailC
|
|||||||
zap.S().Debug("closing go routine : " + client.Name)
|
zap.S().Debug("closing go routine : " + client.Name)
|
||||||
return
|
return
|
||||||
default:
|
default:
|
||||||
|
// get the new 100 logs as anything more older won't make sense
|
||||||
tmpQuery := fmt.Sprintf("%s where timestamp >='%d'", query, tsStart)
|
tmpQuery := fmt.Sprintf("%s where timestamp >='%d'", query, tsStart)
|
||||||
if filterSql != "" {
|
if filterSql != "" {
|
||||||
tmpQuery += fmt.Sprintf(" and %s", filterSql)
|
tmpQuery += fmt.Sprintf(" and %s", filterSql)
|
||||||
@ -2919,7 +2923,7 @@ func (r *ClickHouseReader) TailLogs(ctx context.Context, client *model.LogsTailC
|
|||||||
if idStart != "" {
|
if idStart != "" {
|
||||||
tmpQuery += fmt.Sprintf(" and id > '%s'", idStart)
|
tmpQuery += fmt.Sprintf(" and id > '%s'", idStart)
|
||||||
}
|
}
|
||||||
tmpQuery = fmt.Sprintf("%s order by timestamp asc, id asc limit 1000", tmpQuery)
|
tmpQuery = fmt.Sprintf("%s order by timestamp desc, id desc limit 100", tmpQuery)
|
||||||
zap.S().Debug(tmpQuery)
|
zap.S().Debug(tmpQuery)
|
||||||
response := []model.GetLogsResponse{}
|
response := []model.GetLogsResponse{}
|
||||||
err := r.db.Select(ctx, &response, tmpQuery)
|
err := r.db.Select(ctx, &response, tmpQuery)
|
||||||
@ -2929,7 +2933,7 @@ func (r *ClickHouseReader) TailLogs(ctx context.Context, client *model.LogsTailC
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
len := len(response)
|
len := len(response)
|
||||||
for i := 0; i < len; i++ {
|
for i := len - 1; i >= 0; i-- {
|
||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
done := true
|
done := true
|
||||||
@ -2938,13 +2942,13 @@ func (r *ClickHouseReader) TailLogs(ctx context.Context, client *model.LogsTailC
|
|||||||
return
|
return
|
||||||
default:
|
default:
|
||||||
client.Logs <- &response[i]
|
client.Logs <- &response[i]
|
||||||
if i == len-1 {
|
if i == 0 {
|
||||||
tsStart = response[i].Timestamp
|
tsStart = response[i].Timestamp
|
||||||
idStart = response[i].ID
|
idStart = response[i].ID
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
time.Sleep(10 * time.Second)
|
time.Sleep(time.Duration(r.liveTailRefreshSeconds) * time.Second)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user