diff --git a/.github/ISSUE_TEMPLATE/request_dashboard.md b/.github/ISSUE_TEMPLATE/request_dashboard.md new file mode 100644 index 0000000000..7972f40625 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/request_dashboard.md @@ -0,0 +1,58 @@ +--- +name: Request Dashboard +about: Request a new dashboard for the SigNoz Dashboards repository +title: '' +labels: 'dashboard-template' +assignees: '' + +--- + +## 📝 Dashboard Request Template + +*Use this template to request a new dashboard for the SigNoz Dashboards repository. Please provide as much detail as possible to help us understand your needs.* + +--- + +### 1. Dashboard Name + +Name of the requested dashboard (e.g., MySQL Monitoring Dashboard): + +--- + +### 2. Expected Dashboard Sections and Panels + +#### Section Name + +Brief description of the section (e.g., "Resource usage metrics for MySQL database"). + +#### Panel Name + +Panel description (e.g., "Value-type panels displaying current CPU usage, memory usage, etc."). + +- **Example:** + - **Section**: Resource Metrics + - **Panel**: CPU Usage - Displays the current CPU usage across all database instances. + - **Panel**: Memory Usage - Displays the total memory used by the MySQL process. + +(Repeat this format for additional sections and panels) + +--- + +### 3. Expected Variables + +List any variables you expect to use in the dashboard (e.g., `deployment.environment`, `hostname`, etc.). + +--- + +### 4. Additional Comments or Requirements + +Any additional details or special requirements for the dashboard? + +--- + +### 📋 Notes + +Please review the [CONTRIBUTING.md](https://github.com/SigNoz/dashboards/blob/main/CONTRIBUTING.md) for guidelines on dashboard structure, naming conventions, and how to submit a pull request. + +--- +Thank you for your request! We will review it and provide feedback or guidance as necessary. diff --git a/ee/query-service/anomaly/daily.go b/ee/query-service/anomaly/daily.go new file mode 100644 index 0000000000..bbafe1618e --- /dev/null +++ b/ee/query-service/anomaly/daily.go @@ -0,0 +1,44 @@ +package anomaly + +import ( + "context" + + querierV2 "go.signoz.io/signoz/pkg/query-service/app/querier/v2" + "go.signoz.io/signoz/pkg/query-service/app/queryBuilder" +) + +type DailyProvider struct { + BaseSeasonalProvider +} + +var _ BaseProvider = (*DailyProvider)(nil) + +func (dp *DailyProvider) GetBaseSeasonalProvider() *BaseSeasonalProvider { + return &dp.BaseSeasonalProvider +} + +// NewDailyProvider uses the same generic option type +func NewDailyProvider(opts ...GenericProviderOption[*DailyProvider]) *DailyProvider { + dp := &DailyProvider{ + BaseSeasonalProvider: BaseSeasonalProvider{}, + } + + for _, opt := range opts { + opt(dp) + } + + dp.querierV2 = querierV2.NewQuerier(querierV2.QuerierOptions{ + Reader: dp.reader, + Cache: dp.cache, + KeyGenerator: queryBuilder.NewKeyGenerator(), + FluxInterval: dp.fluxInterval, + FeatureLookup: dp.ff, + }) + + return dp +} + +func (p *DailyProvider) GetAnomalies(ctx context.Context, req *GetAnomaliesRequest) (*GetAnomaliesResponse, error) { + req.Seasonality = SeasonalityDaily + return p.getAnomalies(ctx, req) +} diff --git a/ee/query-service/anomaly/hourly.go b/ee/query-service/anomaly/hourly.go new file mode 100644 index 0000000000..1ee08655f0 --- /dev/null +++ b/ee/query-service/anomaly/hourly.go @@ -0,0 +1,44 @@ +package anomaly + +import ( + "context" + + querierV2 "go.signoz.io/signoz/pkg/query-service/app/querier/v2" + "go.signoz.io/signoz/pkg/query-service/app/queryBuilder" +) + +type HourlyProvider struct { + BaseSeasonalProvider +} + +var _ BaseProvider = (*HourlyProvider)(nil) + +func (hp *HourlyProvider) GetBaseSeasonalProvider() *BaseSeasonalProvider { + return &hp.BaseSeasonalProvider +} + +// NewHourlyProvider now uses the generic option type +func NewHourlyProvider(opts ...GenericProviderOption[*HourlyProvider]) *HourlyProvider { + hp := &HourlyProvider{ + BaseSeasonalProvider: BaseSeasonalProvider{}, + } + + for _, opt := range opts { + opt(hp) + } + + hp.querierV2 = querierV2.NewQuerier(querierV2.QuerierOptions{ + Reader: hp.reader, + Cache: hp.cache, + KeyGenerator: queryBuilder.NewKeyGenerator(), + FluxInterval: hp.fluxInterval, + FeatureLookup: hp.ff, + }) + + return hp +} + +func (p *HourlyProvider) GetAnomalies(ctx context.Context, req *GetAnomaliesRequest) (*GetAnomaliesResponse, error) { + req.Seasonality = SeasonalityHourly + return p.getAnomalies(ctx, req) +} diff --git a/ee/query-service/anomaly/params.go b/ee/query-service/anomaly/params.go new file mode 100644 index 0000000000..d39b2fa80f --- /dev/null +++ b/ee/query-service/anomaly/params.go @@ -0,0 +1,244 @@ +package anomaly + +import ( + "math" + "time" + + "go.signoz.io/signoz/pkg/query-service/common" + v3 "go.signoz.io/signoz/pkg/query-service/model/v3" +) + +type Seasonality string + +const ( + SeasonalityHourly Seasonality = "hourly" + SeasonalityDaily Seasonality = "daily" + SeasonalityWeekly Seasonality = "weekly" +) + +var ( + oneWeekOffset = 24 * 7 * time.Hour.Milliseconds() + oneDayOffset = 24 * time.Hour.Milliseconds() + oneHourOffset = time.Hour.Milliseconds() + fiveMinOffset = 5 * time.Minute.Milliseconds() +) + +func (s Seasonality) IsValid() bool { + switch s { + case SeasonalityHourly, SeasonalityDaily, SeasonalityWeekly: + return true + default: + return false + } +} + +type GetAnomaliesRequest struct { + Params *v3.QueryRangeParamsV3 + Seasonality Seasonality +} + +type GetAnomaliesResponse struct { + Results []*v3.Result +} + +// anomalyParams is the params for anomaly detection +// prediction = avg(past_period_query) + avg(current_season_query) - mean(past_season_query, past2_season_query, past3_season_query) +// +// ^ ^ +// | | +// (rounded value for past peiod) + (seasonal growth) +// +// score = abs(value - prediction) / stddev (current_season_query) +type anomalyQueryParams struct { + // CurrentPeriodQuery is the query range params for period user is looking at or eval window + // Example: (now-5m, now), (now-30m, now), (now-1h, now) + // The results obtained from this query are used to compare with predicted values + // and to detect anomalies + CurrentPeriodQuery *v3.QueryRangeParamsV3 + // PastPeriodQuery is the query range params for past seasonal period + // Example: For weekly seasonality, (now-1w-5m, now-1w) + // : For daily seasonality, (now-1d-5m, now-1d) + // : For hourly seasonality, (now-1h-5m, now-1h) + PastPeriodQuery *v3.QueryRangeParamsV3 + // CurrentSeasonQuery is the query range params for current period (seasonal) + // Example: For weekly seasonality, this is the query range params for the (now-1w-5m, now) + // : For daily seasonality, this is the query range params for the (now-1d-5m, now) + // : For hourly seasonality, this is the query range params for the (now-1h-5m, now) + CurrentSeasonQuery *v3.QueryRangeParamsV3 + // PastSeasonQuery is the query range params for past seasonal period to the current season + // Example: For weekly seasonality, this is the query range params for the (now-2w-5m, now-1w) + // : For daily seasonality, this is the query range params for the (now-2d-5m, now-1d) + // : For hourly seasonality, this is the query range params for the (now-2h-5m, now-1h) + PastSeasonQuery *v3.QueryRangeParamsV3 + + // Past2SeasonQuery is the query range params for past 2 seasonal period to the current season + // Example: For weekly seasonality, this is the query range params for the (now-3w-5m, now-2w) + // : For daily seasonality, this is the query range params for the (now-3d-5m, now-2d) + // : For hourly seasonality, this is the query range params for the (now-3h-5m, now-2h) + Past2SeasonQuery *v3.QueryRangeParamsV3 + // Past3SeasonQuery is the query range params for past 3 seasonal period to the current season + // Example: For weekly seasonality, this is the query range params for the (now-4w-5m, now-3w) + // : For daily seasonality, this is the query range params for the (now-4d-5m, now-3d) + // : For hourly seasonality, this is the query range params for the (now-4h-5m, now-3h) + Past3SeasonQuery *v3.QueryRangeParamsV3 +} + +func updateStepInterval(req *v3.QueryRangeParamsV3) { + start := req.Start + end := req.End + + req.Step = int64(math.Max(float64(common.MinAllowedStepInterval(start, end)), 60)) + for _, q := range req.CompositeQuery.BuilderQueries { + // If the step interval is less than the minimum allowed step interval, set it to the minimum allowed step interval + if minStep := common.MinAllowedStepInterval(start, end); q.StepInterval < minStep { + q.StepInterval = minStep + } + } +} + +func prepareAnomalyQueryParams(req *v3.QueryRangeParamsV3, seasonality Seasonality) *anomalyQueryParams { + start := req.Start + end := req.End + + currentPeriodQuery := &v3.QueryRangeParamsV3{ + Start: start, + End: end, + CompositeQuery: req.CompositeQuery.Clone(), + Variables: make(map[string]interface{}, 0), + NoCache: false, + } + updateStepInterval(currentPeriodQuery) + + var pastPeriodStart, pastPeriodEnd int64 + + switch seasonality { + // for one week period, we fetch the data from the past week with 5 min offset + case SeasonalityWeekly: + pastPeriodStart = start - oneWeekOffset - fiveMinOffset + pastPeriodEnd = end - oneWeekOffset + // for one day period, we fetch the data from the past day with 5 min offset + case SeasonalityDaily: + pastPeriodStart = start - oneDayOffset - fiveMinOffset + pastPeriodEnd = end - oneDayOffset + // for one hour period, we fetch the data from the past hour with 5 min offset + case SeasonalityHourly: + pastPeriodStart = start - oneHourOffset - fiveMinOffset + pastPeriodEnd = end - oneHourOffset + } + + pastPeriodQuery := &v3.QueryRangeParamsV3{ + Start: pastPeriodStart, + End: pastPeriodEnd, + CompositeQuery: req.CompositeQuery.Clone(), + Variables: make(map[string]interface{}, 0), + NoCache: false, + } + updateStepInterval(pastPeriodQuery) + + // seasonality growth trend + var currentGrowthPeriodStart, currentGrowthPeriodEnd int64 + switch seasonality { + case SeasonalityWeekly: + currentGrowthPeriodStart = start - oneWeekOffset + currentGrowthPeriodEnd = end + case SeasonalityDaily: + currentGrowthPeriodStart = start - oneDayOffset + currentGrowthPeriodEnd = end + case SeasonalityHourly: + currentGrowthPeriodStart = start - oneHourOffset + currentGrowthPeriodEnd = end + } + + currentGrowthQuery := &v3.QueryRangeParamsV3{ + Start: currentGrowthPeriodStart, + End: currentGrowthPeriodEnd, + CompositeQuery: req.CompositeQuery.Clone(), + Variables: make(map[string]interface{}, 0), + NoCache: false, + } + updateStepInterval(currentGrowthQuery) + + var pastGrowthPeriodStart, pastGrowthPeriodEnd int64 + switch seasonality { + case SeasonalityWeekly: + pastGrowthPeriodStart = start - 2*oneWeekOffset + pastGrowthPeriodEnd = start - 1*oneWeekOffset + case SeasonalityDaily: + pastGrowthPeriodStart = start - 2*oneDayOffset + pastGrowthPeriodEnd = start - 1*oneDayOffset + case SeasonalityHourly: + pastGrowthPeriodStart = start - 2*oneHourOffset + pastGrowthPeriodEnd = start - 1*oneHourOffset + } + + pastGrowthQuery := &v3.QueryRangeParamsV3{ + Start: pastGrowthPeriodStart, + End: pastGrowthPeriodEnd, + CompositeQuery: req.CompositeQuery.Clone(), + Variables: make(map[string]interface{}, 0), + NoCache: false, + } + updateStepInterval(pastGrowthQuery) + + var past2GrowthPeriodStart, past2GrowthPeriodEnd int64 + switch seasonality { + case SeasonalityWeekly: + past2GrowthPeriodStart = start - 3*oneWeekOffset + past2GrowthPeriodEnd = start - 2*oneWeekOffset + case SeasonalityDaily: + past2GrowthPeriodStart = start - 3*oneDayOffset + past2GrowthPeriodEnd = start - 2*oneDayOffset + case SeasonalityHourly: + past2GrowthPeriodStart = start - 3*oneHourOffset + past2GrowthPeriodEnd = start - 2*oneHourOffset + } + + past2GrowthQuery := &v3.QueryRangeParamsV3{ + Start: past2GrowthPeriodStart, + End: past2GrowthPeriodEnd, + CompositeQuery: req.CompositeQuery.Clone(), + Variables: make(map[string]interface{}, 0), + NoCache: false, + } + updateStepInterval(past2GrowthQuery) + + var past3GrowthPeriodStart, past3GrowthPeriodEnd int64 + switch seasonality { + case SeasonalityWeekly: + past3GrowthPeriodStart = start - 4*oneWeekOffset + past3GrowthPeriodEnd = start - 3*oneWeekOffset + case SeasonalityDaily: + past3GrowthPeriodStart = start - 4*oneDayOffset + past3GrowthPeriodEnd = start - 3*oneDayOffset + case SeasonalityHourly: + past3GrowthPeriodStart = start - 4*oneHourOffset + past3GrowthPeriodEnd = start - 3*oneHourOffset + } + + past3GrowthQuery := &v3.QueryRangeParamsV3{ + Start: past3GrowthPeriodStart, + End: past3GrowthPeriodEnd, + CompositeQuery: req.CompositeQuery.Clone(), + Variables: make(map[string]interface{}, 0), + NoCache: false, + } + updateStepInterval(past3GrowthQuery) + + return &anomalyQueryParams{ + CurrentPeriodQuery: currentPeriodQuery, + PastPeriodQuery: pastPeriodQuery, + CurrentSeasonQuery: currentGrowthQuery, + PastSeasonQuery: pastGrowthQuery, + Past2SeasonQuery: past2GrowthQuery, + Past3SeasonQuery: past3GrowthQuery, + } +} + +type anomalyQueryResults struct { + CurrentPeriodResults []*v3.Result + PastPeriodResults []*v3.Result + CurrentSeasonResults []*v3.Result + PastSeasonResults []*v3.Result + Past2SeasonResults []*v3.Result + Past3SeasonResults []*v3.Result +} diff --git a/ee/query-service/anomaly/provider.go b/ee/query-service/anomaly/provider.go new file mode 100644 index 0000000000..6e0f7a8cd0 --- /dev/null +++ b/ee/query-service/anomaly/provider.go @@ -0,0 +1,9 @@ +package anomaly + +import ( + "context" +) + +type Provider interface { + GetAnomalies(ctx context.Context, req *GetAnomaliesRequest) (*GetAnomaliesResponse, error) +} diff --git a/ee/query-service/anomaly/seasonal.go b/ee/query-service/anomaly/seasonal.go new file mode 100644 index 0000000000..485ab7f460 --- /dev/null +++ b/ee/query-service/anomaly/seasonal.go @@ -0,0 +1,464 @@ +package anomaly + +import ( + "context" + "math" + "time" + + "go.signoz.io/signoz/pkg/query-service/cache" + "go.signoz.io/signoz/pkg/query-service/interfaces" + v3 "go.signoz.io/signoz/pkg/query-service/model/v3" + "go.signoz.io/signoz/pkg/query-service/postprocess" + "go.signoz.io/signoz/pkg/query-service/utils/labels" + "go.uber.org/zap" +) + +var ( + // TODO(srikanthccv): make this configurable? + movingAvgWindowSize = 7 +) + +// BaseProvider is an interface that includes common methods for all provider types +type BaseProvider interface { + GetBaseSeasonalProvider() *BaseSeasonalProvider +} + +// GenericProviderOption is a generic type for provider options +type GenericProviderOption[T BaseProvider] func(T) + +func WithCache[T BaseProvider](cache cache.Cache) GenericProviderOption[T] { + return func(p T) { + p.GetBaseSeasonalProvider().cache = cache + } +} + +func WithKeyGenerator[T BaseProvider](keyGenerator cache.KeyGenerator) GenericProviderOption[T] { + return func(p T) { + p.GetBaseSeasonalProvider().keyGenerator = keyGenerator + } +} + +func WithFeatureLookup[T BaseProvider](ff interfaces.FeatureLookup) GenericProviderOption[T] { + return func(p T) { + p.GetBaseSeasonalProvider().ff = ff + } +} + +func WithReader[T BaseProvider](reader interfaces.Reader) GenericProviderOption[T] { + return func(p T) { + p.GetBaseSeasonalProvider().reader = reader + } +} + +type BaseSeasonalProvider struct { + querierV2 interfaces.Querier + reader interfaces.Reader + fluxInterval time.Duration + cache cache.Cache + keyGenerator cache.KeyGenerator + ff interfaces.FeatureLookup +} + +func (p *BaseSeasonalProvider) getQueryParams(req *GetAnomaliesRequest) *anomalyQueryParams { + if !req.Seasonality.IsValid() { + req.Seasonality = SeasonalityDaily + } + return prepareAnomalyQueryParams(req.Params, req.Seasonality) +} + +func (p *BaseSeasonalProvider) getResults(ctx context.Context, params *anomalyQueryParams) (*anomalyQueryResults, error) { + currentPeriodResults, _, err := p.querierV2.QueryRange(ctx, params.CurrentPeriodQuery) + if err != nil { + return nil, err + } + + currentPeriodResults, err = postprocess.PostProcessResult(currentPeriodResults, params.CurrentPeriodQuery) + if err != nil { + return nil, err + } + + pastPeriodResults, _, err := p.querierV2.QueryRange(ctx, params.PastPeriodQuery) + if err != nil { + return nil, err + } + + pastPeriodResults, err = postprocess.PostProcessResult(pastPeriodResults, params.PastPeriodQuery) + if err != nil { + return nil, err + } + + currentSeasonResults, _, err := p.querierV2.QueryRange(ctx, params.CurrentSeasonQuery) + if err != nil { + return nil, err + } + + currentSeasonResults, err = postprocess.PostProcessResult(currentSeasonResults, params.CurrentSeasonQuery) + if err != nil { + return nil, err + } + + pastSeasonResults, _, err := p.querierV2.QueryRange(ctx, params.PastSeasonQuery) + if err != nil { + return nil, err + } + + pastSeasonResults, err = postprocess.PostProcessResult(pastSeasonResults, params.PastSeasonQuery) + if err != nil { + return nil, err + } + + past2SeasonResults, _, err := p.querierV2.QueryRange(ctx, params.Past2SeasonQuery) + if err != nil { + return nil, err + } + + past2SeasonResults, err = postprocess.PostProcessResult(past2SeasonResults, params.Past2SeasonQuery) + if err != nil { + return nil, err + } + + past3SeasonResults, _, err := p.querierV2.QueryRange(ctx, params.Past3SeasonQuery) + if err != nil { + return nil, err + } + + past3SeasonResults, err = postprocess.PostProcessResult(past3SeasonResults, params.Past3SeasonQuery) + if err != nil { + return nil, err + } + + return &anomalyQueryResults{ + CurrentPeriodResults: currentPeriodResults, + PastPeriodResults: pastPeriodResults, + CurrentSeasonResults: currentSeasonResults, + PastSeasonResults: pastSeasonResults, + Past2SeasonResults: past2SeasonResults, + Past3SeasonResults: past3SeasonResults, + }, nil +} + +// getMatchingSeries gets the matching series from the query result +// for the given series +func (p *BaseSeasonalProvider) getMatchingSeries(queryResult *v3.Result, series *v3.Series) *v3.Series { + if queryResult == nil || len(queryResult.Series) == 0 { + return nil + } + + for _, curr := range queryResult.Series { + currLabels := labels.FromMap(curr.Labels) + seriesLabels := labels.FromMap(series.Labels) + if currLabels.Hash() == seriesLabels.Hash() { + return curr + } + } + return nil +} + +func (p *BaseSeasonalProvider) getAvg(series *v3.Series) float64 { + if series == nil || len(series.Points) == 0 { + return 0 + } + var sum float64 + for _, smpl := range series.Points { + sum += smpl.Value + } + return sum / float64(len(series.Points)) +} + +func (p *BaseSeasonalProvider) getStdDev(series *v3.Series) float64 { + if series == nil || len(series.Points) == 0 { + return 0 + } + avg := p.getAvg(series) + var sum float64 + for _, smpl := range series.Points { + sum += math.Pow(smpl.Value-avg, 2) + } + return math.Sqrt(sum / float64(len(series.Points))) +} + +// getMovingAvg gets the moving average for the given series +// for the given window size and start index +func (p *BaseSeasonalProvider) getMovingAvg(series *v3.Series, movingAvgWindowSize, startIdx int) float64 { + if series == nil || len(series.Points) == 0 { + return 0 + } + if startIdx >= len(series.Points)-movingAvgWindowSize { + startIdx = len(series.Points) - movingAvgWindowSize + } + var sum float64 + points := series.Points[startIdx:] + for i := 0; i < movingAvgWindowSize && i < len(points); i++ { + sum += points[i].Value + } + avg := sum / float64(movingAvgWindowSize) + return avg +} + +func (p *BaseSeasonalProvider) getMean(floats ...float64) float64 { + if len(floats) == 0 { + return 0 + } + var sum float64 + for _, f := range floats { + sum += f + } + return sum / float64(len(floats)) +} + +func (p *BaseSeasonalProvider) getPredictedSeries( + series, prevSeries, currentSeasonSeries, pastSeasonSeries, past2SeasonSeries, past3SeasonSeries *v3.Series, +) *v3.Series { + predictedSeries := &v3.Series{ + Labels: series.Labels, + LabelsArray: series.LabelsArray, + Points: []v3.Point{}, + } + + // for each point in the series, get the predicted value + // the predicted value is the moving average (with window size = 7) of the previous period series + // plus the average of the current season series + // minus the mean of the past season series, past2 season series and past3 season series + for idx, curr := range series.Points { + predictedValue := + p.getMovingAvg(prevSeries, movingAvgWindowSize, idx) + + p.getAvg(currentSeasonSeries) - + p.getMean(p.getAvg(pastSeasonSeries), p.getAvg(past2SeasonSeries), p.getAvg(past3SeasonSeries)) + + if predictedValue < 0 { + predictedValue = p.getMovingAvg(prevSeries, movingAvgWindowSize, idx) + } + + zap.L().Info("predictedSeries", + zap.Float64("movingAvg", p.getMovingAvg(prevSeries, movingAvgWindowSize, idx)), + zap.Float64("avg", p.getAvg(currentSeasonSeries)), + zap.Float64("mean", p.getMean(p.getAvg(pastSeasonSeries), p.getAvg(past2SeasonSeries), p.getAvg(past3SeasonSeries))), + zap.Any("labels", series.Labels), + zap.Float64("predictedValue", predictedValue), + ) + predictedSeries.Points = append(predictedSeries.Points, v3.Point{ + Timestamp: curr.Timestamp, + Value: predictedValue, + }) + } + + return predictedSeries +} + +// getBounds gets the upper and lower bounds for the given series +// for the given z score threshold +// moving avg of the previous period series + z score threshold * std dev of the series +// moving avg of the previous period series - z score threshold * std dev of the series +func (p *BaseSeasonalProvider) getBounds( + series, prevSeries, _, _, _, _ *v3.Series, + zScoreThreshold float64, +) (*v3.Series, *v3.Series) { + upperBoundSeries := &v3.Series{ + Labels: series.Labels, + LabelsArray: series.LabelsArray, + Points: []v3.Point{}, + } + + lowerBoundSeries := &v3.Series{ + Labels: series.Labels, + LabelsArray: series.LabelsArray, + Points: []v3.Point{}, + } + + for idx, curr := range series.Points { + upperBound := p.getMovingAvg(prevSeries, movingAvgWindowSize, idx) + zScoreThreshold*p.getStdDev(series) + lowerBound := p.getMovingAvg(prevSeries, movingAvgWindowSize, idx) - zScoreThreshold*p.getStdDev(series) + upperBoundSeries.Points = append(upperBoundSeries.Points, v3.Point{ + Timestamp: curr.Timestamp, + Value: upperBound, + }) + lowerBoundSeries.Points = append(lowerBoundSeries.Points, v3.Point{ + Timestamp: curr.Timestamp, + Value: math.Max(lowerBound, 0), + }) + } + + return upperBoundSeries, lowerBoundSeries +} + +// getExpectedValue gets the expected value for the given series +// for the given index +// prevSeriesAvg + currentSeasonSeriesAvg - mean of past season series, past2 season series and past3 season series +func (p *BaseSeasonalProvider) getExpectedValue( + _, prevSeries, currentSeasonSeries, pastSeasonSeries, past2SeasonSeries, past3SeasonSeries *v3.Series, idx int, +) float64 { + prevSeriesAvg := p.getMovingAvg(prevSeries, movingAvgWindowSize, idx) + currentSeasonSeriesAvg := p.getAvg(currentSeasonSeries) + pastSeasonSeriesAvg := p.getAvg(pastSeasonSeries) + past2SeasonSeriesAvg := p.getAvg(past2SeasonSeries) + past3SeasonSeriesAvg := p.getAvg(past3SeasonSeries) + return prevSeriesAvg + currentSeasonSeriesAvg - p.getMean(pastSeasonSeriesAvg, past2SeasonSeriesAvg, past3SeasonSeriesAvg) +} + +// getScore gets the anomaly score for the given series +// for the given index +// (value - expectedValue) / std dev of the series +func (p *BaseSeasonalProvider) getScore( + series, prevSeries, weekSeries, weekPrevSeries, past2SeasonSeries, past3SeasonSeries *v3.Series, value float64, idx int, +) float64 { + expectedValue := p.getExpectedValue(series, prevSeries, weekSeries, weekPrevSeries, past2SeasonSeries, past3SeasonSeries, idx) + return (value - expectedValue) / p.getStdDev(weekSeries) +} + +// getAnomalyScores gets the anomaly scores for the given series +// for the given index +// (value - expectedValue) / std dev of the series +func (p *BaseSeasonalProvider) getAnomalyScores( + series, prevSeries, currentSeasonSeries, pastSeasonSeries, past2SeasonSeries, past3SeasonSeries *v3.Series, +) *v3.Series { + anomalyScoreSeries := &v3.Series{ + Labels: series.Labels, + LabelsArray: series.LabelsArray, + Points: []v3.Point{}, + } + + for idx, curr := range series.Points { + anomalyScore := p.getScore(series, prevSeries, currentSeasonSeries, pastSeasonSeries, past2SeasonSeries, past3SeasonSeries, curr.Value, idx) + anomalyScoreSeries.Points = append(anomalyScoreSeries.Points, v3.Point{ + Timestamp: curr.Timestamp, + Value: anomalyScore, + }) + } + + return anomalyScoreSeries +} + +func (p *BaseSeasonalProvider) getAnomalies(ctx context.Context, req *GetAnomaliesRequest) (*GetAnomaliesResponse, error) { + anomalyParams := p.getQueryParams(req) + anomalyQueryResults, err := p.getResults(ctx, anomalyParams) + if err != nil { + return nil, err + } + + currentPeriodResultsMap := make(map[string]*v3.Result) + for _, result := range anomalyQueryResults.CurrentPeriodResults { + currentPeriodResultsMap[result.QueryName] = result + } + + pastPeriodResultsMap := make(map[string]*v3.Result) + for _, result := range anomalyQueryResults.PastPeriodResults { + pastPeriodResultsMap[result.QueryName] = result + } + + currentSeasonResultsMap := make(map[string]*v3.Result) + for _, result := range anomalyQueryResults.CurrentSeasonResults { + currentSeasonResultsMap[result.QueryName] = result + } + + pastSeasonResultsMap := make(map[string]*v3.Result) + for _, result := range anomalyQueryResults.PastSeasonResults { + pastSeasonResultsMap[result.QueryName] = result + } + + past2SeasonResultsMap := make(map[string]*v3.Result) + for _, result := range anomalyQueryResults.Past2SeasonResults { + past2SeasonResultsMap[result.QueryName] = result + } + + past3SeasonResultsMap := make(map[string]*v3.Result) + for _, result := range anomalyQueryResults.Past3SeasonResults { + past3SeasonResultsMap[result.QueryName] = result + } + + for _, result := range currentPeriodResultsMap { + funcs := req.Params.CompositeQuery.BuilderQueries[result.QueryName].Functions + + var zScoreThreshold float64 + for _, f := range funcs { + if f.Name == v3.FunctionNameAnomaly { + value, ok := f.NamedArgs["z_score_threshold"] + if ok { + zScoreThreshold = value.(float64) + } else { + zScoreThreshold = 3 + } + break + } + } + + pastPeriodResult, ok := pastPeriodResultsMap[result.QueryName] + if !ok { + continue + } + currentSeasonResult, ok := currentSeasonResultsMap[result.QueryName] + if !ok { + continue + } + pastSeasonResult, ok := pastSeasonResultsMap[result.QueryName] + if !ok { + continue + } + past2SeasonResult, ok := past2SeasonResultsMap[result.QueryName] + if !ok { + continue + } + past3SeasonResult, ok := past3SeasonResultsMap[result.QueryName] + if !ok { + continue + } + + for _, series := range result.Series { + stdDev := p.getStdDev(series) + zap.L().Info("stdDev", zap.Float64("stdDev", stdDev), zap.Any("labels", series.Labels)) + + pastPeriodSeries := p.getMatchingSeries(pastPeriodResult, series) + currentSeasonSeries := p.getMatchingSeries(currentSeasonResult, series) + pastSeasonSeries := p.getMatchingSeries(pastSeasonResult, series) + past2SeasonSeries := p.getMatchingSeries(past2SeasonResult, series) + past3SeasonSeries := p.getMatchingSeries(past3SeasonResult, series) + + prevSeriesAvg := p.getAvg(pastPeriodSeries) + currentSeasonSeriesAvg := p.getAvg(currentSeasonSeries) + pastSeasonSeriesAvg := p.getAvg(pastSeasonSeries) + past2SeasonSeriesAvg := p.getAvg(past2SeasonSeries) + past3SeasonSeriesAvg := p.getAvg(past3SeasonSeries) + zap.L().Info("getAvg", zap.Float64("prevSeriesAvg", prevSeriesAvg), zap.Float64("currentSeasonSeriesAvg", currentSeasonSeriesAvg), zap.Float64("pastSeasonSeriesAvg", pastSeasonSeriesAvg), zap.Float64("past2SeasonSeriesAvg", past2SeasonSeriesAvg), zap.Float64("past3SeasonSeriesAvg", past3SeasonSeriesAvg), zap.Any("labels", series.Labels)) + + predictedSeries := p.getPredictedSeries( + series, + pastPeriodSeries, + currentSeasonSeries, + pastSeasonSeries, + past2SeasonSeries, + past3SeasonSeries, + ) + result.PredictedSeries = append(result.PredictedSeries, predictedSeries) + + upperBoundSeries, lowerBoundSeries := p.getBounds( + series, + pastPeriodSeries, + currentSeasonSeries, + pastSeasonSeries, + past2SeasonSeries, + past3SeasonSeries, + zScoreThreshold, + ) + result.UpperBoundSeries = append(result.UpperBoundSeries, upperBoundSeries) + result.LowerBoundSeries = append(result.LowerBoundSeries, lowerBoundSeries) + + anomalyScoreSeries := p.getAnomalyScores( + series, + pastPeriodSeries, + currentSeasonSeries, + pastSeasonSeries, + past2SeasonSeries, + past3SeasonSeries, + ) + result.AnomalyScores = append(result.AnomalyScores, anomalyScoreSeries) + } + } + + results := make([]*v3.Result, 0, len(currentPeriodResultsMap)) + for _, result := range currentPeriodResultsMap { + results = append(results, result) + } + + return &GetAnomaliesResponse{ + Results: results, + }, nil +} diff --git a/ee/query-service/anomaly/weekly.go b/ee/query-service/anomaly/weekly.go new file mode 100644 index 0000000000..407e7e6440 --- /dev/null +++ b/ee/query-service/anomaly/weekly.go @@ -0,0 +1,43 @@ +package anomaly + +import ( + "context" + + querierV2 "go.signoz.io/signoz/pkg/query-service/app/querier/v2" + "go.signoz.io/signoz/pkg/query-service/app/queryBuilder" +) + +type WeeklyProvider struct { + BaseSeasonalProvider +} + +var _ BaseProvider = (*WeeklyProvider)(nil) + +func (wp *WeeklyProvider) GetBaseSeasonalProvider() *BaseSeasonalProvider { + return &wp.BaseSeasonalProvider +} + +func NewWeeklyProvider(opts ...GenericProviderOption[*WeeklyProvider]) *WeeklyProvider { + wp := &WeeklyProvider{ + BaseSeasonalProvider: BaseSeasonalProvider{}, + } + + for _, opt := range opts { + opt(wp) + } + + wp.querierV2 = querierV2.NewQuerier(querierV2.QuerierOptions{ + Reader: wp.reader, + Cache: wp.cache, + KeyGenerator: queryBuilder.NewKeyGenerator(), + FluxInterval: wp.fluxInterval, + FeatureLookup: wp.ff, + }) + + return wp +} + +func (p *WeeklyProvider) GetAnomalies(ctx context.Context, req *GetAnomaliesRequest) (*GetAnomaliesResponse, error) { + req.Seasonality = SeasonalityWeekly + return p.getAnomalies(ctx, req) +} diff --git a/ee/query-service/app/api/api.go b/ee/query-service/app/api/api.go index bb36fdf479..2e2eb8ded5 100644 --- a/ee/query-service/app/api/api.go +++ b/ee/query-service/app/api/api.go @@ -38,8 +38,7 @@ type APIHandlerOptions struct { Cache cache.Cache Gateway *httputil.ReverseProxy // Querier Influx Interval - FluxInterval time.Duration - + FluxInterval time.Duration UseLogsNewSchema bool } diff --git a/ee/query-service/app/db/metrics.go b/ee/query-service/app/db/metrics.go deleted file mode 100644 index 0cc8a55c32..0000000000 --- a/ee/query-service/app/db/metrics.go +++ /dev/null @@ -1,401 +0,0 @@ -package db - -import ( - "context" - "crypto/md5" - "encoding/json" - "fmt" - "reflect" - "regexp" - "sort" - "strings" - "time" - - "go.signoz.io/signoz/ee/query-service/model" - baseconst "go.signoz.io/signoz/pkg/query-service/constants" - basemodel "go.signoz.io/signoz/pkg/query-service/model" - "go.signoz.io/signoz/pkg/query-service/utils" - "go.uber.org/zap" -) - -// GetMetricResultEE runs the query and returns list of time series -func (r *ClickhouseReader) GetMetricResultEE(ctx context.Context, query string) ([]*basemodel.Series, string, error) { - - defer utils.Elapsed("GetMetricResult", nil)() - zap.L().Info("Executing metric result query: ", zap.String("query", query)) - - var hash string - // If getSubTreeSpans function is used in the clickhouse query - if strings.Contains(query, "getSubTreeSpans(") { - var err error - query, hash, err = r.getSubTreeSpansCustomFunction(ctx, query, hash) - if err == fmt.Errorf("no spans found for the given query") { - return nil, "", nil - } - if err != nil { - return nil, "", err - } - } - - rows, err := r.conn.Query(ctx, query) - if err != nil { - zap.L().Error("Error in processing query", zap.Error(err)) - return nil, "", fmt.Errorf("error in processing query") - } - - var ( - columnTypes = rows.ColumnTypes() - columnNames = rows.Columns() - vars = make([]interface{}, len(columnTypes)) - ) - for i := range columnTypes { - vars[i] = reflect.New(columnTypes[i].ScanType()).Interface() - } - // when group by is applied, each combination of cartesian product - // of attributes is separate series. each item in metricPointsMap - // represent a unique series. - metricPointsMap := make(map[string][]basemodel.MetricPoint) - // attribute key-value pairs for each group selection - attributesMap := make(map[string]map[string]string) - - defer rows.Close() - for rows.Next() { - if err := rows.Scan(vars...); err != nil { - return nil, "", err - } - var groupBy []string - var metricPoint basemodel.MetricPoint - groupAttributes := make(map[string]string) - // Assuming that the end result row contains a timestamp, value and option labels - // Label key and value are both strings. - for idx, v := range vars { - colName := columnNames[idx] - switch v := v.(type) { - case *string: - // special case for returning all labels - if colName == "fullLabels" { - var metric map[string]string - err := json.Unmarshal([]byte(*v), &metric) - if err != nil { - return nil, "", err - } - for key, val := range metric { - groupBy = append(groupBy, val) - groupAttributes[key] = val - } - } else { - groupBy = append(groupBy, *v) - groupAttributes[colName] = *v - } - case *time.Time: - metricPoint.Timestamp = v.UnixMilli() - case *float64: - metricPoint.Value = *v - case **float64: - // ch seems to return this type when column is derived from - // SELECT count(*)/ SELECT count(*) - floatVal := *v - if floatVal != nil { - metricPoint.Value = *floatVal - } - case *float32: - float32Val := float32(*v) - metricPoint.Value = float64(float32Val) - case *uint8, *uint64, *uint16, *uint32: - if _, ok := baseconst.ReservedColumnTargetAliases[colName]; ok { - metricPoint.Value = float64(reflect.ValueOf(v).Elem().Uint()) - } else { - groupBy = append(groupBy, fmt.Sprintf("%v", reflect.ValueOf(v).Elem().Uint())) - groupAttributes[colName] = fmt.Sprintf("%v", reflect.ValueOf(v).Elem().Uint()) - } - case *int8, *int16, *int32, *int64: - if _, ok := baseconst.ReservedColumnTargetAliases[colName]; ok { - metricPoint.Value = float64(reflect.ValueOf(v).Elem().Int()) - } else { - groupBy = append(groupBy, fmt.Sprintf("%v", reflect.ValueOf(v).Elem().Int())) - groupAttributes[colName] = fmt.Sprintf("%v", reflect.ValueOf(v).Elem().Int()) - } - default: - zap.L().Error("invalid var found in metric builder query result", zap.Any("var", v), zap.String("colName", colName)) - } - } - sort.Strings(groupBy) - key := strings.Join(groupBy, "") - attributesMap[key] = groupAttributes - metricPointsMap[key] = append(metricPointsMap[key], metricPoint) - } - - var seriesList []*basemodel.Series - for key := range metricPointsMap { - points := metricPointsMap[key] - // first point in each series could be invalid since the - // aggregations are applied with point from prev series - if len(points) != 0 && len(points) > 1 { - points = points[1:] - } - attributes := attributesMap[key] - series := basemodel.Series{Labels: attributes, Points: points} - seriesList = append(seriesList, &series) - } - // err = r.conn.Exec(ctx, "DROP TEMPORARY TABLE IF EXISTS getSubTreeSpans"+hash) - // if err != nil { - // zap.L().Error("Error in dropping temporary table: ", err) - // return nil, err - // } - if hash == "" { - return seriesList, hash, nil - } else { - return seriesList, "getSubTreeSpans" + hash, nil - } -} - -func (r *ClickhouseReader) getSubTreeSpansCustomFunction(ctx context.Context, query string, hash string) (string, string, error) { - - zap.L().Debug("Executing getSubTreeSpans function") - - // str1 := `select fromUnixTimestamp64Milli(intDiv( toUnixTimestamp64Milli ( timestamp ), 100) * 100) AS interval, toFloat64(count()) as count from (select timestamp, spanId, parentSpanId, durationNano from getSubTreeSpans(select * from signoz_traces.signoz_index_v2 where serviceName='frontend' and name='/driver.DriverService/FindNearest' and traceID='00000000000000004b0a863cb5ed7681') where name='FindDriverIDs' group by interval order by interval asc;` - - // process the query to fetch subTree query - var subtreeInput string - query, subtreeInput, hash = processQuery(query, hash) - - err := r.conn.Exec(ctx, "DROP TABLE IF EXISTS getSubTreeSpans"+hash) - if err != nil { - zap.L().Error("Error in dropping temporary table", zap.Error(err)) - return query, hash, err - } - - // Create temporary table to store the getSubTreeSpans() results - zap.L().Debug("Creating temporary table getSubTreeSpans", zap.String("hash", hash)) - err = r.conn.Exec(ctx, "CREATE TABLE IF NOT EXISTS "+"getSubTreeSpans"+hash+" (timestamp DateTime64(9) CODEC(DoubleDelta, LZ4), traceID FixedString(32) CODEC(ZSTD(1)), spanID String CODEC(ZSTD(1)), parentSpanID String CODEC(ZSTD(1)), rootSpanID String CODEC(ZSTD(1)), serviceName LowCardinality(String) CODEC(ZSTD(1)), name LowCardinality(String) CODEC(ZSTD(1)), rootName LowCardinality(String) CODEC(ZSTD(1)), durationNano UInt64 CODEC(T64, ZSTD(1)), kind Int8 CODEC(T64, ZSTD(1)), tagMap Map(LowCardinality(String), String) CODEC(ZSTD(1)), events Array(String) CODEC(ZSTD(2))) ENGINE = MergeTree() ORDER BY (timestamp)") - if err != nil { - zap.L().Error("Error in creating temporary table", zap.Error(err)) - return query, hash, err - } - - var getSpansSubQueryDBResponses []model.GetSpansSubQueryDBResponse - getSpansSubQuery := subtreeInput - // Execute the subTree query - zap.L().Debug("Executing subTree query", zap.String("query", getSpansSubQuery)) - err = r.conn.Select(ctx, &getSpansSubQueryDBResponses, getSpansSubQuery) - - // zap.L().Info(getSpansSubQuery) - - if err != nil { - zap.L().Error("Error in processing sql query", zap.Error(err)) - return query, hash, fmt.Errorf("error in processing sql query") - } - - var searchScanResponses []basemodel.SearchSpanDBResponseItem - - // TODO : @ankit: I think the algorithm does not need to assume that subtrees are from the same TraceID. We can take this as an improvement later. - // Fetch all the spans from of same TraceID so that we can build subtree - modelQuery := fmt.Sprintf("SELECT timestamp, traceID, model FROM %s.%s WHERE traceID=$1", r.TraceDB, r.SpansTable) - - if len(getSpansSubQueryDBResponses) == 0 { - return query, hash, fmt.Errorf("no spans found for the given query") - } - zap.L().Debug("Executing query to fetch all the spans from the same TraceID: ", zap.String("modelQuery", modelQuery)) - err = r.conn.Select(ctx, &searchScanResponses, modelQuery, getSpansSubQueryDBResponses[0].TraceID) - - if err != nil { - zap.L().Error("Error in processing sql query", zap.Error(err)) - return query, hash, fmt.Errorf("error in processing sql query") - } - - // Process model to fetch the spans - zap.L().Debug("Processing model to fetch the spans") - searchSpanResponses := []basemodel.SearchSpanResponseItem{} - for _, item := range searchScanResponses { - var jsonItem basemodel.SearchSpanResponseItem - json.Unmarshal([]byte(item.Model), &jsonItem) - jsonItem.TimeUnixNano = uint64(item.Timestamp.UnixNano()) - if jsonItem.Events == nil { - jsonItem.Events = []string{} - } - searchSpanResponses = append(searchSpanResponses, jsonItem) - } - // Build the subtree and store all the subtree spans in temporary table getSubTreeSpans+hash - // Use map to store pointer to the spans to avoid duplicates and save memory - zap.L().Debug("Building the subtree to store all the subtree spans in temporary table getSubTreeSpans", zap.String("hash", hash)) - - treeSearchResponse, err := getSubTreeAlgorithm(searchSpanResponses, getSpansSubQueryDBResponses) - if err != nil { - zap.L().Error("Error in getSubTreeAlgorithm function", zap.Error(err)) - return query, hash, err - } - zap.L().Debug("Preparing batch to store subtree spans in temporary table getSubTreeSpans", zap.String("hash", hash)) - statement, err := r.conn.PrepareBatch(context.Background(), fmt.Sprintf("INSERT INTO getSubTreeSpans"+hash)) - if err != nil { - zap.L().Error("Error in preparing batch statement", zap.Error(err)) - return query, hash, err - } - for _, span := range treeSearchResponse { - var parentID string - if len(span.References) > 0 && span.References[0].RefType == "CHILD_OF" { - parentID = span.References[0].SpanId - } - err = statement.Append( - time.Unix(0, int64(span.TimeUnixNano)), - span.TraceID, - span.SpanID, - parentID, - span.RootSpanID, - span.ServiceName, - span.Name, - span.RootName, - uint64(span.DurationNano), - int8(span.Kind), - span.TagMap, - span.Events, - ) - if err != nil { - zap.L().Error("Error in processing sql query", zap.Error(err)) - return query, hash, err - } - } - zap.L().Debug("Inserting the subtree spans in temporary table getSubTreeSpans", zap.String("hash", hash)) - err = statement.Send() - if err != nil { - zap.L().Error("Error in sending statement", zap.Error(err)) - return query, hash, err - } - return query, hash, nil -} - -//lint:ignore SA4009 return hash is feeded to the query -func processQuery(query string, hash string) (string, string, string) { - re3 := regexp.MustCompile(`getSubTreeSpans`) - - submatchall3 := re3.FindAllStringIndex(query, -1) - getSubtreeSpansMatchIndex := submatchall3[0][1] - - query2countParenthesis := query[getSubtreeSpansMatchIndex:] - - sqlCompleteIndex := 0 - countParenthesisImbalance := 0 - for i, char := range query2countParenthesis { - - if string(char) == "(" { - countParenthesisImbalance += 1 - } - if string(char) == ")" { - countParenthesisImbalance -= 1 - } - if countParenthesisImbalance == 0 { - sqlCompleteIndex = i - break - } - } - subtreeInput := query2countParenthesis[1:sqlCompleteIndex] - - // hash the subtreeInput - hmd5 := md5.Sum([]byte(subtreeInput)) - hash = fmt.Sprintf("%x", hmd5) - - // Reformat the query to use the getSubTreeSpans function - query = query[:getSubtreeSpansMatchIndex] + hash + " " + query2countParenthesis[sqlCompleteIndex+1:] - return query, subtreeInput, hash -} - -// getSubTreeAlgorithm is an algorithm to build the subtrees of the spans and return the list of spans -func getSubTreeAlgorithm(payload []basemodel.SearchSpanResponseItem, getSpansSubQueryDBResponses []model.GetSpansSubQueryDBResponse) (map[string]*basemodel.SearchSpanResponseItem, error) { - - var spans []*model.SpanForTraceDetails - for _, spanItem := range payload { - var parentID string - if len(spanItem.References) > 0 && spanItem.References[0].RefType == "CHILD_OF" { - parentID = spanItem.References[0].SpanId - } - span := &model.SpanForTraceDetails{ - TimeUnixNano: spanItem.TimeUnixNano, - SpanID: spanItem.SpanID, - TraceID: spanItem.TraceID, - ServiceName: spanItem.ServiceName, - Name: spanItem.Name, - Kind: spanItem.Kind, - DurationNano: spanItem.DurationNano, - TagMap: spanItem.TagMap, - ParentID: parentID, - Events: spanItem.Events, - HasError: spanItem.HasError, - } - spans = append(spans, span) - } - - zap.L().Debug("Building Tree") - roots, err := buildSpanTrees(&spans) - if err != nil { - return nil, err - } - searchSpansResult := make(map[string]*basemodel.SearchSpanResponseItem) - // Every span which was fetched from getSubTree Input SQL query is considered root - // For each root, get the subtree spans - for _, getSpansSubQueryDBResponse := range getSpansSubQueryDBResponses { - targetSpan := &model.SpanForTraceDetails{} - // zap.L().Debug("Building tree for span id: " + getSpansSubQueryDBResponse.SpanID + " " + strconv.Itoa(i+1) + " of " + strconv.Itoa(len(getSpansSubQueryDBResponses))) - // Search target span object in the tree - for _, root := range roots { - targetSpan, err = breadthFirstSearch(root, getSpansSubQueryDBResponse.SpanID) - if targetSpan != nil { - break - } - if err != nil { - zap.L().Error("Error during BreadthFirstSearch()", zap.Error(err)) - return nil, err - } - } - if targetSpan == nil { - return nil, nil - } - // Build subtree for the target span - // Mark the target span as root by setting parent ID as empty string - targetSpan.ParentID = "" - preParents := []*model.SpanForTraceDetails{targetSpan} - children := []*model.SpanForTraceDetails{} - - // Get the subtree child spans - for i := 0; len(preParents) != 0; i++ { - parents := []*model.SpanForTraceDetails{} - for _, parent := range preParents { - children = append(children, parent.Children...) - parents = append(parents, parent.Children...) - } - preParents = parents - } - - resultSpans := children - // Add the target span to the result spans - resultSpans = append(resultSpans, targetSpan) - - for _, item := range resultSpans { - references := []basemodel.OtelSpanRef{ - { - TraceId: item.TraceID, - SpanId: item.ParentID, - RefType: "CHILD_OF", - }, - } - - if item.Events == nil { - item.Events = []string{} - } - searchSpansResult[item.SpanID] = &basemodel.SearchSpanResponseItem{ - TimeUnixNano: item.TimeUnixNano, - SpanID: item.SpanID, - TraceID: item.TraceID, - ServiceName: item.ServiceName, - Name: item.Name, - Kind: item.Kind, - References: references, - DurationNano: item.DurationNano, - TagMap: item.TagMap, - Events: item.Events, - HasError: item.HasError, - RootSpanID: getSpansSubQueryDBResponse.SpanID, - RootName: targetSpan.Name, - } - } - } - return searchSpansResult, nil -} diff --git a/frontend/package.json b/frontend/package.json index 51097f7696..a9119d0e63 100644 --- a/frontend/package.json +++ b/frontend/package.json @@ -207,7 +207,6 @@ "eslint-plugin-sonarjs": "^0.12.0", "husky": "^7.0.4", "is-ci": "^3.0.1", - "jest-playwright-preset": "^1.7.2", "jest-styled-components": "^7.0.8", "lint-staged": "^12.5.0", "msw": "1.3.2", diff --git a/frontend/public/locales/en-GB/services.json b/frontend/public/locales/en-GB/services.json index 4c49847031..f04c851759 100644 --- a/frontend/public/locales/en-GB/services.json +++ b/frontend/public/locales/en-GB/services.json @@ -1,3 +1,3 @@ { - "rps_over_100": "You are sending data at more than 100 RPS, your ingestion may be rate limited. Please reach out to us via Intercom support." + "rps_over_100": "You are sending data at more than 100 RPS, your ingestion may be rate limited. Please reach out to us via Intercom support or " } diff --git a/frontend/public/locales/en/services.json b/frontend/public/locales/en/services.json index 4c49847031..f04c851759 100644 --- a/frontend/public/locales/en/services.json +++ b/frontend/public/locales/en/services.json @@ -1,3 +1,3 @@ { - "rps_over_100": "You are sending data at more than 100 RPS, your ingestion may be rate limited. Please reach out to us via Intercom support." + "rps_over_100": "You are sending data at more than 100 RPS, your ingestion may be rate limited. Please reach out to us via Intercom support or " } diff --git a/frontend/src/container/BillingContainer/BillingContainer.styles.scss b/frontend/src/container/BillingContainer/BillingContainer.styles.scss index e4c7deec06..2bc41d89e6 100644 --- a/frontend/src/container/BillingContainer/BillingContainer.styles.scss +++ b/frontend/src/container/BillingContainer/BillingContainer.styles.scss @@ -50,6 +50,13 @@ align-items: center; } } + + .billing-update-note { + text-align: left; + font-size: 13px; + color: var(--bg-vanilla-200); + margin-top: 16px; + } } .ant-skeleton.ant-skeleton-element.ant-skeleton-active { @@ -75,5 +82,9 @@ } } } + + .billing-update-note { + color: var(--bg-ink-200); + } } } diff --git a/frontend/src/container/BillingContainer/BillingContainer.tsx b/frontend/src/container/BillingContainer/BillingContainer.tsx index e366f068b2..449474a429 100644 --- a/frontend/src/container/BillingContainer/BillingContainer.tsx +++ b/frontend/src/container/BillingContainer/BillingContainer.tsx @@ -348,7 +348,12 @@ export default function BillingContainer(): JSX.Element { const BillingUsageGraphCallback = useCallback( () => !isLoading && !isFetchingBillingData ? ( - + <> + +
+ Note: Billing metrics are updated once every 24 hours. +
+ ) : ( diff --git a/frontend/src/container/CreateAlertRule/defaults.ts b/frontend/src/container/CreateAlertRule/defaults.ts index 20cd020158..f9735e7644 100644 --- a/frontend/src/container/CreateAlertRule/defaults.ts +++ b/frontend/src/container/CreateAlertRule/defaults.ts @@ -65,7 +65,7 @@ export const logAlertDefaults: AlertDef = { chQueries: { A: { name: 'A', - query: `select \ntoStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 30 MINUTE) AS interval, \ntoFloat64(count()) as value \nFROM signoz_logs.distributed_logs \nWHERE timestamp BETWEEN {{.start_timestamp_nano}} AND {{.end_timestamp_nano}} \nGROUP BY interval;\n\n-- available variables:\n-- \t{{.start_timestamp_nano}}\n-- \t{{.end_timestamp_nano}}\n\n-- required columns (or alias):\n-- \tvalue\n-- \tinterval`, + query: `select \ntoStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 30 MINUTE) AS interval, \ntoFloat64(count()) as value \nFROM signoz_logs.distributed_logs_v2 \nWHERE timestamp BETWEEN {{.start_timestamp_nano}} AND {{.end_timestamp_nano}} \nGROUP BY interval;\n\n-- available variables:\n-- \t{{.start_timestamp_nano}}\n-- \t{{.end_timestamp_nano}}\n\n-- required columns (or alias):\n-- \tvalue\n-- \tinterval`, legend: '', disabled: false, }, @@ -95,7 +95,7 @@ export const traceAlertDefaults: AlertDef = { chQueries: { A: { name: 'A', - query: `SELECT \n\ttoStartOfInterval(timestamp, INTERVAL 1 MINUTE) AS interval, \n\ttagMap['peer.service'] AS op_name, \n\ttoFloat64(avg(durationNano)) AS value \nFROM signoz_traces.distributed_signoz_index_v2 \nWHERE tagMap['peer.service']!='' \nAND timestamp BETWEEN {{.start_datetime}} AND {{.end_datetime}} \nGROUP BY (op_name, interval);\n\n-- available variables:\n-- \t{{.start_datetime}}\n-- \t{{.end_datetime}}\n\n-- required column alias:\n-- \tvalue\n-- \tinterval`, + query: `SELECT \n\ttoStartOfInterval(timestamp, INTERVAL 1 MINUTE) AS interval, \n\tstringTagMap['peer.service'] AS op_name, \n\ttoFloat64(avg(durationNano)) AS value \nFROM signoz_traces.distributed_signoz_index_v2 \nWHERE stringTagMap['peer.service']!='' \nAND timestamp BETWEEN {{.start_datetime}} AND {{.end_datetime}} \nGROUP BY (op_name, interval);\n\n-- available variables:\n-- \t{{.start_datetime}}\n-- \t{{.end_datetime}}\n\n-- required column alias:\n-- \tvalue\n-- \tinterval`, legend: '', disabled: false, }, diff --git a/frontend/src/container/GridCardLayout/GridCard/FullView/index.tsx b/frontend/src/container/GridCardLayout/GridCard/FullView/index.tsx index 7511a4d445..974a35a39c 100644 --- a/frontend/src/container/GridCardLayout/GridCard/FullView/index.tsx +++ b/frontend/src/container/GridCardLayout/GridCard/FullView/index.tsx @@ -140,7 +140,7 @@ function FullView({ const [graphsVisibilityStates, setGraphsVisibilityStates] = useState< boolean[] - >(Array(response.data?.payload.data.result.length).fill(true)); + >(Array(response.data?.payload?.data?.result?.length).fill(true)); useEffect(() => { const { diff --git a/frontend/src/container/LogsExplorerViews/index.tsx b/frontend/src/container/LogsExplorerViews/index.tsx index 9901243b2f..8dc46c5a5a 100644 --- a/frontend/src/container/LogsExplorerViews/index.tsx +++ b/frontend/src/container/LogsExplorerViews/index.tsx @@ -133,6 +133,9 @@ function LogsExplorerViews({ // State const [page, setPage] = useState(1); const [logs, setLogs] = useState([]); + const [lastLogLineTimestamp, setLastLogLineTimestamp] = useState< + number | string | null + >(); const [requestData, setRequestData] = useState(null); const [showFormatMenuItems, setShowFormatMenuItems] = useState(false); const [queryId, setQueryId] = useState(v4()); @@ -270,6 +273,14 @@ function LogsExplorerViews({ start: minTime, end: maxTime, }), + // send the lastLogTimeStamp only when the panel type is list and the orderBy is timestamp and the order is desc + lastLogLineTimestamp: + panelType === PANEL_TYPES.LIST && + requestData?.builder?.queryData?.[0]?.orderBy?.[0]?.columnName === + 'timestamp' && + requestData?.builder?.queryData?.[0]?.orderBy?.[0]?.order === 'desc' + ? lastLogLineTimestamp + : undefined, }, undefined, listQueryKeyRef, @@ -347,6 +358,10 @@ function LogsExplorerViews({ pageSize: nextPageSize, }); + // initialise the last log timestamp to null as we don't have the logs. + // as soon as we scroll to the end of the logs we set the lastLogLineTimestamp to the last log timestamp. + setLastLogLineTimestamp(lastLog.timestamp); + setPage((prevPage) => prevPage + 1); setRequestData(newRequestData); @@ -539,6 +554,11 @@ function LogsExplorerViews({ // eslint-disable-next-line react-hooks/exhaustive-deps }, [data]); + useEffect(() => { + // clear the lastLogLineTimestamp when the data changes + setLastLogLineTimestamp(null); + }, [data]); + useEffect(() => { if ( requestData?.id !== stagedQuery?.id || diff --git a/frontend/src/container/NewWidget/RightContainer/constants.ts b/frontend/src/container/NewWidget/RightContainer/constants.ts index 171f6b81d3..03cee96d21 100644 --- a/frontend/src/container/NewWidget/RightContainer/constants.ts +++ b/frontend/src/container/NewWidget/RightContainer/constants.ts @@ -74,7 +74,7 @@ export const panelTypeVsYAxisUnit: { [key in PANEL_TYPES]: boolean } = { [PANEL_TYPES.VALUE]: true, [PANEL_TYPES.TABLE]: false, [PANEL_TYPES.LIST]: false, - [PANEL_TYPES.PIE]: false, + [PANEL_TYPES.PIE]: true, [PANEL_TYPES.BAR]: true, [PANEL_TYPES.HISTOGRAM]: false, [PANEL_TYPES.TRACE]: false, diff --git a/frontend/src/container/NewWidget/RightContainer/index.tsx b/frontend/src/container/NewWidget/RightContainer/index.tsx index 84400737d4..43e3b5611d 100644 --- a/frontend/src/container/NewWidget/RightContainer/index.tsx +++ b/frontend/src/container/NewWidget/RightContainer/index.tsx @@ -211,7 +211,11 @@ function RightContainer({ )} {allowSoftMinMax && ( diff --git a/frontend/src/container/PanelWrapper/PiePanelWrapper.tsx b/frontend/src/container/PanelWrapper/PiePanelWrapper.tsx index a176247781..dce84ad78d 100644 --- a/frontend/src/container/PanelWrapper/PiePanelWrapper.tsx +++ b/frontend/src/container/PanelWrapper/PiePanelWrapper.tsx @@ -4,6 +4,7 @@ import { Color } from '@signozhq/design-tokens'; import { Group } from '@visx/group'; import { Pie } from '@visx/shape'; import { useTooltip, useTooltipInPortal } from '@visx/tooltip'; +import { getYAxisFormattedValue } from 'components/Graph/yAxisConfig'; import { themeColors } from 'constants/theme'; import { useIsDarkMode } from 'hooks/useDarkMode'; import { generateColor } from 'lib/uPlotLib/utils/generateColor'; @@ -129,7 +130,12 @@ function PiePanelWrapper({ showTooltip({ tooltipData: { label, - value: arc.data.value, + // do not update the unit in the data as the arc allotment is based on value + // and treats 4K smaller than 40 + value: getYAxisFormattedValue( + arc.data.value, + widget?.yAxisUnit || 'none', + ), color: arc.data.color, key: label, }, diff --git a/frontend/src/container/ServiceApplication/ServiceMetrics/ServiceMetricTable.tsx b/frontend/src/container/ServiceApplication/ServiceMetrics/ServiceMetricTable.tsx index 6430cc9c8f..1a3b99d6dd 100644 --- a/frontend/src/container/ServiceApplication/ServiceMetrics/ServiceMetricTable.tsx +++ b/frontend/src/container/ServiceApplication/ServiceMetrics/ServiceMetricTable.tsx @@ -92,9 +92,10 @@ function ServiceMetricTable({ return ( <> {RPS > MAX_RPS_LIMIT && ( - + {getText('rps_over_100')} + email )} diff --git a/frontend/src/container/ServiceApplication/ServiceTraces/ServiceTracesTable.tsx b/frontend/src/container/ServiceApplication/ServiceTraces/ServiceTracesTable.tsx index 6633b7a1aa..42d22e8980 100644 --- a/frontend/src/container/ServiceApplication/ServiceTraces/ServiceTracesTable.tsx +++ b/frontend/src/container/ServiceApplication/ServiceTraces/ServiceTracesTable.tsx @@ -49,10 +49,11 @@ function ServiceTraceTable({ return ( <> {RPS > MAX_RPS_LIMIT && ( - - + + {getText('rps_over_100')} - + email + )} diff --git a/frontend/src/container/SideNav/menuItems.tsx b/frontend/src/container/SideNav/menuItems.tsx index be694227a1..6d24b74c53 100644 --- a/frontend/src/container/SideNav/menuItems.tsx +++ b/frontend/src/container/SideNav/menuItems.tsx @@ -1,7 +1,6 @@ import { RocketOutlined } from '@ant-design/icons'; import ROUTES from 'constants/routes'; import { - AreaChart, BarChart2, BellDot, BugIcon, @@ -114,11 +113,6 @@ const menuItems: SidebarItem[] = [ icon: , isBeta: true, }, - { - key: ROUTES.USAGE_EXPLORER, - label: 'Usage Explorer', - icon: , - }, { key: ROUTES.BILLING, label: 'Billing', diff --git a/frontend/src/hooks/queryBuilder/useFetchKeysAndValues.ts b/frontend/src/hooks/queryBuilder/useFetchKeysAndValues.ts index 6fd42175ad..7b99b9d250 100644 --- a/frontend/src/hooks/queryBuilder/useFetchKeysAndValues.ts +++ b/frontend/src/hooks/queryBuilder/useFetchKeysAndValues.ts @@ -70,7 +70,7 @@ export const useFetchKeysAndValues = ( const queryFiltersWithoutId = useMemo( () => ({ ...query.filters, - items: query.filters.items.map((item) => { + items: query.filters?.items?.map((item) => { const filterWithoutId = cloneDeep(item); unset(filterWithoutId, 'id'); return filterWithoutId; diff --git a/frontend/src/lib/dashboard/prepareQueryRangePayload.ts b/frontend/src/lib/dashboard/prepareQueryRangePayload.ts index 181a83914b..ffc2f0477c 100644 --- a/frontend/src/lib/dashboard/prepareQueryRangePayload.ts +++ b/frontend/src/lib/dashboard/prepareQueryRangePayload.ts @@ -1,6 +1,7 @@ import getStartEndRangeTime from 'lib/getStartEndRangeTime'; import getStep from 'lib/getStep'; import { mapQueryDataToApi } from 'lib/newQueryBuilder/queryBuilderMappers/mapQueryDataToApi'; +import { isUndefined } from 'lodash-es'; import store from 'store'; import { QueryRangePayload } from 'types/api/metrics/getQueryRange'; import { EQueryType } from 'types/common/dashboard'; @@ -24,7 +25,11 @@ export const prepareQueryRangePayload = ({ fillGaps = false, }: GetQueryResultsProps): PrepareQueryRangePayload => { let legendMap: Record = {}; - const { allowSelectedIntervalForStepGen, ...restParams } = params; + const { + allowSelectedIntervalForStepGen, + lastLogLineTimestamp, + ...restParams + } = params; const compositeQuery: QueryRangePayload['compositeQuery'] = { queryType: query.queryType, @@ -90,9 +95,13 @@ export const prepareQueryRangePayload = ({ interval: globalSelectedInterval, }); + const endLogTimeStamp = !isUndefined(lastLogLineTimestamp) + ? new Date(lastLogLineTimestamp as string | number)?.getTime() || undefined + : undefined; + const queryPayload: QueryRangePayload = { start: parseInt(start, 10) * 1e3, - end: parseInt(end, 10) * 1e3, + end: endLogTimeStamp || parseInt(end, 10) * 1e3, step: getStep({ start: allowSelectedIntervalForStepGen ? start diff --git a/frontend/src/pages/LogsExplorer/index.tsx b/frontend/src/pages/LogsExplorer/index.tsx index 9e23b34c2c..5e4d1cf55f 100644 --- a/frontend/src/pages/LogsExplorer/index.tsx +++ b/frontend/src/pages/LogsExplorer/index.tsx @@ -67,7 +67,7 @@ function LogsExplorer(): JSX.Element { } if ( currentQuery.builder.queryData.length === 1 && - currentQuery.builder.queryData[0].groupBy.length > 0 + currentQuery.builder.queryData?.[0]?.groupBy?.length > 0 ) { handleChangeSelectedView(SELECTED_VIEWS.QUERY_BUILDER); } diff --git a/frontend/src/pages/Support/Support.tsx b/frontend/src/pages/Support/Support.tsx index 0dbd7a9526..9d3d8fff8f 100644 --- a/frontend/src/pages/Support/Support.tsx +++ b/frontend/src/pages/Support/Support.tsx @@ -83,7 +83,7 @@ const supportChannels = [ name: 'Schedule a call', icon: , title: 'Schedule a call with the founders.', - url: 'https://calendly.com/pranay-signoz/signoz-intro-calls', + url: 'https://calendly.com/vishal-signoz/30min', btnText: 'Schedule call', }, { diff --git a/frontend/src/periscope/components/KeyValueLabel/KeyValueLabel.styles.scss b/frontend/src/periscope/components/KeyValueLabel/KeyValueLabel.styles.scss index 88ae57f4e8..e325b85113 100644 --- a/frontend/src/periscope/components/KeyValueLabel/KeyValueLabel.styles.scss +++ b/frontend/src/periscope/components/KeyValueLabel/KeyValueLabel.styles.scss @@ -12,6 +12,10 @@ font-weight: 400; line-height: 18px; letter-spacing: -0.005em; + &, + &:hover { + color: var(--text-vanilla-400); + } } &__key { background: var(--bg-ink-400); @@ -20,13 +24,15 @@ &__value { background: var(--bg-slate-400); } - color: var(--text-vanilla-400); } .lightMode { .key-value-label { border-color: var(--bg-vanilla-400); - color: var(--text-ink-400); + &__key, + &__value { + color: var(--text-ink-400); + } &__key { background: var(--bg-vanilla-300); } diff --git a/frontend/src/periscope/components/KeyValueLabel/KeyValueLabel.tsx b/frontend/src/periscope/components/KeyValueLabel/KeyValueLabel.tsx index 377c647a3f..c0987ccff1 100644 --- a/frontend/src/periscope/components/KeyValueLabel/KeyValueLabel.tsx +++ b/frontend/src/periscope/components/KeyValueLabel/KeyValueLabel.tsx @@ -1,6 +1,7 @@ import './KeyValueLabel.styles.scss'; import { Tooltip } from 'antd'; +import { useMemo } from 'react'; import TrimmedText from '../TrimmedText/TrimmedText'; @@ -15,19 +16,33 @@ export default function KeyValueLabel({ badgeValue, maxCharacters = 20, }: KeyValueLabelProps): JSX.Element | null { + const isUrl = useMemo(() => /^https?:\/\//.test(badgeValue), [badgeValue]); + if (!badgeKey || !badgeValue) { return null; } + return (
- -
+ {isUrl ? ( + -
-
+ + ) : ( + +
+ +
+
+ )}
); } diff --git a/frontend/yarn.lock b/frontend/yarn.lock index 2ef8b540e0..1c501a08d3 100644 --- a/frontend/yarn.lock +++ b/frontend/yarn.lock @@ -169,7 +169,7 @@ resolved "https://registry.yarnpkg.com/@babel/compat-data/-/compat-data-7.23.5.tgz#ffb878728bb6bdcb6f4510aa51b1be9afb8cfd98" integrity sha512-uU27kfDRlhfKl+w1U6vp16IuvSLtjAxdArVXPa9BvLkrr7CYIsxH5adpHObeAGY/41+syctUWOZ140a2Rvkgjw== -"@babel/core@^7.1.0", "@babel/core@^7.12.3", "@babel/core@^7.16.0", "@babel/core@^7.7.2", "@babel/core@^7.7.5", "@babel/core@^7.8.0": +"@babel/core@^7.1.0", "@babel/core@^7.12.3", "@babel/core@^7.16.0", "@babel/core@^7.7.2", "@babel/core@^7.8.0": version "7.21.4" resolved "https://registry.npmjs.org/@babel/core/-/core-7.21.4.tgz" integrity sha512-qt/YV149Jman/6AfmlxJ04LMIu8bMoyl3RB91yTFrxQmgbrSvQMy7cI8Q62FHx1t8wJ8B5fu0UDoLwHAhUo1QA== @@ -2659,18 +2659,6 @@ dependencies: tslib "2.5.0" -"@hapi/hoek@^9.0.0": - version "9.3.0" - resolved "https://registry.npmjs.org/@hapi/hoek/-/hoek-9.3.0.tgz" - integrity sha512-/c6rf4UJlmHlC9b5BaNvzAcFv7HZ2QHaV0D4/HNlBdvFnvQq8RI4kYdhyPCl7Xj+oWvTWQ8ujhqS53LIgAe6KQ== - -"@hapi/topo@^5.0.0": - version "5.1.0" - resolved "https://registry.npmjs.org/@hapi/topo/-/topo-5.1.0.tgz" - integrity sha512-foQZKJig7Ob0BMAYBfcJk8d77QtOe7Wo4ox7ff1lQYoNNAb6jwcY1ncdoy2e9wQZzvNy7ODZCYJkK8kzmcAnAg== - dependencies: - "@hapi/hoek" "^9.0.0" - "@humanwhocodes/config-array@^0.5.0": version "0.5.0" resolved "https://registry.npmjs.org/@humanwhocodes/config-array/-/config-array-0.5.0.tgz" @@ -3751,23 +3739,6 @@ unplugin "1.0.1" uuid "^9.0.0" -"@sideway/address@^4.1.3": - version "4.1.4" - resolved "https://registry.npmjs.org/@sideway/address/-/address-4.1.4.tgz" - integrity sha512-7vwq+rOHVWjyXxVlR76Agnvhy8I9rpzjosTESvmhNeXOXdZZB15Fl+TI9x1SiHZH5Jv2wTGduSxFDIaq0m3DUw== - dependencies: - "@hapi/hoek" "^9.0.0" - -"@sideway/formula@^3.0.1": - version "3.0.1" - resolved "https://registry.npmjs.org/@sideway/formula/-/formula-3.0.1.tgz" - integrity sha512-/poHZJJVjx3L+zVD6g9KgHfYnb443oi7wLu/XKojDviHy6HOEOA6z1Trk5aR1dGcmPenJEgb2sK2I80LeS3MIg== - -"@sideway/pinpoint@^2.0.0": - version "2.0.0" - resolved "https://registry.npmjs.org/@sideway/pinpoint/-/pinpoint-2.0.0.tgz" - integrity sha512-RNiOoTPkptFtSVzQevY/yWtZwf/RxyVnPy/OcA9HBM3MlGDnBEYL5B41H0MTn0Uec8Hi+2qUtTfG2WWZBmMejQ== - "@signozhq/design-tokens@0.0.8": version "0.0.8" resolved "https://registry.yarnpkg.com/@signozhq/design-tokens/-/design-tokens-0.0.8.tgz#368dc92cfe01d0cd893df140445c5d9dfd944a88" @@ -4591,13 +4562,6 @@ resolved "https://registry.npmjs.org/@types/uuid/-/uuid-8.3.4.tgz" integrity sha512-c/I8ZRb51j+pYGAu5CrFMRxqZ2ke4y2grEBO5AUjgSkSk+qT2Ea+OdWElz/OiMf5MNpn2b17kuVBwZLQJXzihw== -"@types/wait-on@^5.2.0": - version "5.3.1" - resolved "https://registry.npmjs.org/@types/wait-on/-/wait-on-5.3.1.tgz" - integrity sha512-2FFOKCF/YydrMUaqg+fkk49qf0e5rDgwt6aQsMzFQzbS419h2gNOXyiwp/o2yYy27bi/C1z+HgfncryjGzlvgQ== - dependencies: - "@types/node" "*" - "@types/webpack-dev-server@^4.7.2": version "4.7.2" resolved "https://registry.yarnpkg.com/@types/webpack-dev-server/-/webpack-dev-server-4.7.2.tgz#a12d9881aa23cdd4cecbb2d31fa784a45c4967e0" @@ -5428,18 +5392,6 @@ anymatch@^3.0.3, anymatch@~3.1.2: normalize-path "^3.0.0" picomatch "^2.0.4" -append-transform@^2.0.0: - version "2.0.0" - resolved "https://registry.npmjs.org/append-transform/-/append-transform-2.0.0.tgz" - integrity sha512-7yeyCEurROLQJFv5Xj4lEGTy0borxepjFv1g22oAdqFu//SrAlDl1O1Nxx15SH1RoliUml6p8dwJW9jvZughhg== - dependencies: - default-require-extensions "^3.0.0" - -archy@^1.0.0: - version "1.0.0" - resolved "https://registry.npmjs.org/archy/-/archy-1.0.0.tgz" - integrity sha512-Xg+9RwCg/0p32teKdGMPTPnVXKD0w3DfHnFTficozsAgsvq2XenPJq/MYpzzQ/v8zrOyJn6Ds39VA4JIDwFfqw== - arg@^4.1.0: version "4.1.3" resolved "https://registry.npmjs.org/arg/-/arg-4.1.3.tgz" @@ -5635,13 +5587,6 @@ axios@1.7.4: form-data "^4.0.0" proxy-from-env "^1.1.0" -axios@^0.21.1: - version "0.21.4" - resolved "https://registry.yarnpkg.com/axios/-/axios-0.21.4.tgz#c67b90dc0568e5c1cf2b0b858c43ba28e2eda575" - integrity sha512-ut5vewkiu8jjGBdqpM44XxjuCjq9LAKeHVmoVfHVzy8eHgxxq8SbAVQNovDA8mVi05kP0Ea/n/UzcSHcTJQfNg== - dependencies: - follow-redirects "^1.14.0" - axobject-query@^3.1.1: version "3.1.1" resolved "https://registry.npmjs.org/axobject-query/-/axobject-query-3.1.1.tgz" @@ -6315,16 +6260,6 @@ bytes@3.1.2: resolved "https://registry.npmjs.org/bytes/-/bytes-3.1.2.tgz" integrity sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg== -caching-transform@^4.0.0: - version "4.0.0" - resolved "https://registry.npmjs.org/caching-transform/-/caching-transform-4.0.0.tgz" - integrity sha512-kpqOvwXnjjN44D89K5ccQC+RUrsy7jB/XLlRrx0D7/2HNcTPqzsb6XgYoErwko6QsV184CA2YgS1fxDiiDZMWA== - dependencies: - hasha "^5.0.0" - make-dir "^3.0.0" - package-hash "^4.0.0" - write-file-atomic "^3.0.0" - call-bind@^1.0.0, call-bind@^1.0.2: version "1.0.2" resolved "https://registry.npmjs.org/call-bind/-/call-bind-1.0.2.tgz" @@ -6355,7 +6290,7 @@ camelcase-keys@^6.2.2: map-obj "^4.0.0" quick-lru "^4.0.1" -camelcase@^5.0.0, camelcase@^5.3.1: +camelcase@^5.3.1: version "5.3.1" resolved "https://registry.npmjs.org/camelcase/-/camelcase-5.3.1.tgz" integrity sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg== @@ -6620,15 +6555,6 @@ cli-width@^3.0.0: resolved "https://registry.yarnpkg.com/cli-width/-/cli-width-3.0.0.tgz#a2f48437a2caa9a22436e794bf071ec9e61cedf6" integrity sha512-FxqpkPPwu1HjuN93Omfm4h8uIanXofW0RxVEW3k5RKx+mJJYSthzNhp32Kzxxy3YAEZ/Dc/EWN1vZRY0+kOhbw== -cliui@^6.0.0: - version "6.0.0" - resolved "https://registry.npmjs.org/cliui/-/cliui-6.0.0.tgz" - integrity sha512-t6wbgtoCXvAzst7QgXxJYqPt0usEfbgQdftEPbLL/cvv6HPE5VgvqCuAIDR0NgU52ds6rFwqrgakNLrHEjCbrQ== - dependencies: - string-width "^4.2.0" - strip-ansi "^6.0.0" - wrap-ansi "^6.2.0" - cliui@^7.0.2: version "7.0.4" resolved "https://registry.npmjs.org/cliui/-/cliui-7.0.4.tgz" @@ -6772,16 +6698,6 @@ commander@^10.0.0: resolved "https://registry.yarnpkg.com/commander/-/commander-10.0.1.tgz#881ee46b4f77d1c1dccc5823433aa39b022cbe06" integrity sha512-y4Mg2tXshplEbSGzx7amzPwKKOCGuoSRP/CjEdwwk0FOGlUbq6lKuoyDZTNZkmxHdJtp54hdfY/JUrdL7Xfdug== -commander@^3.0.2: - version "3.0.2" - resolved "https://registry.npmjs.org/commander/-/commander-3.0.2.tgz" - integrity sha512-Gar0ASD4BDyKC4hl4DwHqDrmvjoxWKZigVnAbn5H1owvm4CxCPdb0HQDehwNYMJpla5+M2tPmPARzhtYuwpHow== - -commander@^5.1.0: - version "5.1.0" - resolved "https://registry.yarnpkg.com/commander/-/commander-5.1.0.tgz#46abbd1652f8e059bddaef99bbdcb2ad9cf179ae" - integrity sha512-P0CysNDQ7rtVw4QIQtm+MRxV66vKFSvlsQvGYXZWR3qFU0jlMKHZZZgw8e+8DSah4UDKMqnknRDQz+xuQXQ/Zg== - commander@^7.0.0, commander@^7.2.0: version "7.2.0" resolved "https://registry.npmjs.org/commander/-/commander-7.2.0.tgz" @@ -6802,11 +6718,6 @@ common-path-prefix@^3.0.0: resolved "https://registry.yarnpkg.com/common-path-prefix/-/common-path-prefix-3.0.0.tgz#7d007a7e07c58c4b4d5f433131a19141b29f11e0" integrity sha512-QE33hToZseCH3jS0qN96O/bSh3kaw/h+Tq7ngyY9eWDUnTlTNUyqfqvCXioLe5Na5jFsL78ra/wuBU4iuEgd4w== -commondir@^1.0.1: - version "1.0.1" - resolved "https://registry.npmjs.org/commondir/-/commondir-1.0.1.tgz" - integrity sha512-W9pAhw0ja1Edb5GVdIF1mjZw/ASI0AlShXM83UUGe2DVr5TdAPEA1OA8m/g8zWp9x6On7gqufY+FatDbC3MDQg== - compare-func@^2.0.0: version "2.0.0" resolved "https://registry.npmjs.org/compare-func/-/compare-func-2.0.0.tgz" @@ -7073,7 +6984,7 @@ cross-spawn@^6.0.5: shebang-command "^1.2.0" which "^1.2.9" -cross-spawn@^7.0.0, cross-spawn@^7.0.1, cross-spawn@^7.0.2, cross-spawn@^7.0.3: +cross-spawn@^7.0.1, cross-spawn@^7.0.2, cross-spawn@^7.0.3: version "7.0.3" resolved "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz" integrity sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w== @@ -7303,14 +7214,6 @@ custom-event-polyfill@^1.0.6: resolved "https://registry.npmjs.org/custom-event-polyfill/-/custom-event-polyfill-1.0.7.tgz" integrity sha512-TDDkd5DkaZxZFM8p+1I3yAlvM3rSr1wbrOliG4yJiwinMZN8z/iGL7BTlDkrJcYTmgUSb4ywVCc3ZaUtOtC76w== -cwd@^0.10.0: - version "0.10.0" - resolved "https://registry.npmjs.org/cwd/-/cwd-0.10.0.tgz" - integrity sha512-YGZxdTTL9lmLkCUTpg4j0zQ7IhRB5ZmqNBbGCl3Tg6MP/d5/6sY7L5mmTjzbc6JKgVZYiqTQTNhPFsbXNGlRaA== - dependencies: - find-pkg "^0.1.2" - fs-exists-sync "^0.1.0" - "d3-array@1 - 3", "d3-array@2 - 3", "d3-array@2.10.0 - 3": version "3.2.3" resolved "https://registry.npmjs.org/d3-array/-/d3-array-3.2.3.tgz" @@ -7555,7 +7458,7 @@ decamelize-keys@^1.1.0: decamelize "^1.1.0" map-obj "^1.0.0" -decamelize@^1.1.0, decamelize@^1.2.0: +decamelize@^1.1.0: version "1.2.0" resolved "https://registry.yarnpkg.com/decamelize/-/decamelize-1.2.0.tgz#f6534d15148269b20352e7bee26f501f9a191290" integrity sha512-z2S+W9X73hAUUki+N+9Za2lBlun89zigOyGrsax+KUQ6wKW4ZoWpEYBkGhQjwAjjDCkWxhY0VKEhk8wzY7F5cA== @@ -7637,13 +7540,6 @@ default-gateway@^6.0.3: dependencies: execa "^5.0.0" -default-require-extensions@^3.0.0: - version "3.0.1" - resolved "https://registry.npmjs.org/default-require-extensions/-/default-require-extensions-3.0.1.tgz" - integrity sha512-eXTJmRbm2TIt9MgWTsOH1wEuhew6XGZcMeGKCtLedIg/NCsg1iBePXkceTdK4Fii7pzmN9tGsZhKzZ4h7O/fxw== - dependencies: - strip-bom "^4.0.0" - defaults@^1.0.3: version "1.0.4" resolved "https://registry.yarnpkg.com/defaults/-/defaults-1.0.4.tgz#b0b02062c1e2aa62ff5d9528f0f98baa90978d7a" @@ -8163,11 +8059,6 @@ es-to-primitive@^1.2.1: is-date-object "^1.0.1" is-symbol "^1.0.2" -es6-error@^4.0.1: - version "4.1.1" - resolved "https://registry.npmjs.org/es6-error/-/es6-error-4.1.1.tgz" - integrity sha512-Um/+FxMr9CISWh0bi5Zv0iOD+4cFh5qLeks1qhAopKVAJw3drgKbKySikp7wGhDL0HPeaja0P5ULZrxLkniUVg== - escalade@^3.1.1: version "3.1.1" resolved "https://registry.npmjs.org/escalade/-/escalade-3.1.1.tgz" @@ -8608,18 +8499,6 @@ exit@^0.1.2: resolved "https://registry.npmjs.org/exit/-/exit-0.1.2.tgz" integrity sha512-Zk/eNKV2zbjpKzrsQ+n1G6poVbErQxJ0LBOJXaKZ1EViLzH+hrLu9cdXI4zw9dBQJslwBEpbQ2P1oS7nDxs6jQ== -expand-tilde@^1.2.2: - version "1.2.2" - resolved "https://registry.npmjs.org/expand-tilde/-/expand-tilde-1.2.2.tgz" - integrity sha512-rtmc+cjLZqnu9dSYosX9EWmSJhTwpACgJQTfj4hgg2JjOD/6SIQalZrt4a3aQeh++oNxkazcaxrhPUj6+g5G/Q== - dependencies: - os-homedir "^1.0.1" - -expect-playwright@^0.8.0: - version "0.8.0" - resolved "https://registry.npmjs.org/expect-playwright/-/expect-playwright-0.8.0.tgz" - integrity sha512-+kn8561vHAY+dt+0gMqqj1oY+g5xWrsuGMk4QGxotT2WS545nVqqjs37z6hrYfIuucwqthzwJfCJUEYqixyljg== - expect@^27.5.1: version "27.5.1" resolved "https://registry.npmjs.org/expect/-/expect-27.5.1.tgz" @@ -8828,15 +8707,6 @@ finalhandler@1.2.0: statuses "2.0.1" unpipe "~1.0.0" -find-cache-dir@^3.2.0: - version "3.3.2" - resolved "https://registry.npmjs.org/find-cache-dir/-/find-cache-dir-3.3.2.tgz" - integrity sha512-wXZV5emFEjrridIgED11OoUKLxiYjAcqot/NJdAkOhlJ+vGzwhOAfcG5OX1jP+S0PcjEn8bdMJv+g2jwQ3Onig== - dependencies: - commondir "^1.0.1" - make-dir "^3.0.2" - pkg-dir "^4.1.0" - find-cache-dir@^4.0.0: version "4.0.0" resolved "https://registry.yarnpkg.com/find-cache-dir/-/find-cache-dir-4.0.0.tgz#a30ee0448f81a3990708f6453633c733e2f6eec2" @@ -8845,30 +8715,6 @@ find-cache-dir@^4.0.0: common-path-prefix "^3.0.0" pkg-dir "^7.0.0" -find-file-up@^0.1.2: - version "0.1.3" - resolved "https://registry.npmjs.org/find-file-up/-/find-file-up-0.1.3.tgz" - integrity sha512-mBxmNbVyjg1LQIIpgO8hN+ybWBgDQK8qjht+EbrTCGmmPV/sc7RF1i9stPTD6bpvXZywBdrwRYxhSdJv867L6A== - dependencies: - fs-exists-sync "^0.1.0" - resolve-dir "^0.1.0" - -find-pkg@^0.1.2: - version "0.1.2" - resolved "https://registry.npmjs.org/find-pkg/-/find-pkg-0.1.2.tgz" - integrity sha512-0rnQWcFwZr7eO0513HahrWafsc3CTFioEB7DRiEYCUM/70QXSY8f3mCST17HXLcPvEhzH/Ty/Bxd72ZZsr/yvw== - dependencies: - find-file-up "^0.1.2" - -find-process@^1.4.4: - version "1.4.7" - resolved "https://registry.npmjs.org/find-process/-/find-process-1.4.7.tgz" - integrity sha512-/U4CYp1214Xrp3u3Fqr9yNynUrr5Le4y0SsJh2lMDDSbpwYSz3M2SMWQC+wqcx79cN8PQtHQIL8KnuY9M66fdg== - dependencies: - chalk "^4.0.0" - commander "^5.1.0" - debug "^4.1.1" - find-up@^4.0.0, find-up@^4.1.0: version "4.1.0" resolved "https://registry.npmjs.org/find-up/-/find-up-4.1.0.tgz" @@ -8925,7 +8771,7 @@ flubber@^0.4.2: svgpath "^2.2.1" topojson-client "^3.0.0" -follow-redirects@^1.0.0, follow-redirects@^1.14.0, follow-redirects@^1.15.6: +follow-redirects@^1.0.0, follow-redirects@^1.15.6: version "1.15.6" resolved "https://registry.yarnpkg.com/follow-redirects/-/follow-redirects-1.15.6.tgz#7f815c0cda4249c74ff09e95ef97c23b5fd0399b" integrity sha512-wWN62YITEaOpSK584EZXJafH1AGpO8RVgElfkuXbTOrPX4fIfOyEpW/CsiNd8JdYrAoOvafRTOEnvsO++qCqFA== @@ -8962,14 +8808,6 @@ force-graph@1: kapsule "^1.14" lodash-es "4" -foreground-child@^2.0.0: - version "2.0.0" - resolved "https://registry.npmjs.org/foreground-child/-/foreground-child-2.0.0.tgz" - integrity sha512-dCIq9FpEcyQyXKCkyzmlPTFNgrCzPudOe+mhvJU5zAtlBnGVy2yKxtfsxK2tQBThwq225jcvBjpw1Gr40uzZCA== - dependencies: - cross-spawn "^7.0.0" - signal-exit "^3.0.2" - form-data@^3.0.0: version "3.0.1" resolved "https://registry.npmjs.org/form-data/-/form-data-3.0.1.tgz" @@ -9008,16 +8846,11 @@ fresh@0.5.2: resolved "https://registry.npmjs.org/fresh/-/fresh-0.5.2.tgz" integrity sha512-zJ2mQYM18rEFOudeV4GShTGIQ7RbzA7ozbU9I/XBpm7kqgMywgmylMwXHxZJmkVoYkna9d2pVXVXPdYTP9ej8Q== -fromentries@^1.2.0, fromentries@^1.3.2: +fromentries@^1.3.2: version "1.3.2" resolved "https://registry.npmjs.org/fromentries/-/fromentries-1.3.2.tgz" integrity sha512-cHEpEQHUg0f8XdtZCc2ZAhrHzKzT0MrFUTcvx+hfxYu7rGMDc5SKoXFh+n4YigxsHXRzc6OrCshdR1bWH6HHyg== -fs-exists-sync@^0.1.0: - version "0.1.0" - resolved "https://registry.npmjs.org/fs-exists-sync/-/fs-exists-sync-0.1.0.tgz" - integrity sha512-cR/vflFyPZtrN6b38ZyWxpWdhlXrzZEBawlpBQMq7033xVY7/kg0GDMBK5jg8lDYQckdJ5x/YC88lM3C7VMsLg== - fs-extra@^10.0.0: version "10.1.0" resolved "https://registry.npmjs.org/fs-extra/-/fs-extra-10.1.0.tgz" @@ -9104,7 +8937,7 @@ geotiff@^2.0.7: web-worker "^1.2.0" xml-utils "^1.0.2" -get-caller-file@^2.0.1, get-caller-file@^2.0.5: +get-caller-file@^2.0.5: version "2.0.5" resolved "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz" integrity sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg== @@ -9208,24 +9041,6 @@ global-dirs@^0.1.1: dependencies: ini "^1.3.4" -global-modules@^0.2.3: - version "0.2.3" - resolved "https://registry.npmjs.org/global-modules/-/global-modules-0.2.3.tgz" - integrity sha512-JeXuCbvYzYXcwE6acL9V2bAOeSIGl4dD+iwLY9iUx2VBJJ80R18HCn+JCwHM9Oegdfya3lEkGCdaRkSyc10hDA== - dependencies: - global-prefix "^0.1.4" - is-windows "^0.2.0" - -global-prefix@^0.1.4: - version "0.1.5" - resolved "https://registry.npmjs.org/global-prefix/-/global-prefix-0.1.5.tgz" - integrity sha512-gOPiyxcD9dJGCEArAhF4Hd0BAqvAe/JzERP7tYumE4yIkmIedPUVXcJFWbV3/p/ovIIvKjkrTk+f1UVkq7vvbw== - dependencies: - homedir-polyfill "^1.0.0" - ini "^1.3.4" - is-windows "^0.2.0" - which "^1.2.12" - global@^4.3.0, global@~4.4.0: version "4.4.0" resolved "https://registry.npmjs.org/global/-/global-4.4.0.tgz" @@ -9272,7 +9087,7 @@ gopd@^1.0.1: dependencies: get-intrinsic "^1.1.3" -graceful-fs@^4.1.15, graceful-fs@^4.1.2, graceful-fs@^4.1.6, graceful-fs@^4.2.0, graceful-fs@^4.2.4, graceful-fs@^4.2.6, graceful-fs@^4.2.9: +graceful-fs@^4.1.2, graceful-fs@^4.1.6, graceful-fs@^4.2.0, graceful-fs@^4.2.4, graceful-fs@^4.2.6, graceful-fs@^4.2.9: version "4.2.11" resolved "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz" integrity sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ== @@ -9345,14 +9160,6 @@ has@^1.0.3: dependencies: function-bind "^1.1.1" -hasha@^5.0.0: - version "5.2.2" - resolved "https://registry.npmjs.org/hasha/-/hasha-5.2.2.tgz" - integrity sha512-Hrp5vIK/xr5SkeN2onO32H0MgNZ0f17HRNH39WfL0SYUNOTZ5Lz1TJ8Pajo/87dYGEFlLMm7mIc/k/s6Bvz9HQ== - dependencies: - is-stream "^2.0.0" - type-fest "^0.8.0" - hasown@^2.0.0: version "2.0.0" resolved "https://registry.yarnpkg.com/hasown/-/hasown-2.0.0.tgz#f4c513d454a57b7c7e1650778de226b11700546c" @@ -9633,13 +9440,6 @@ hoist-non-react-statics@^3.0.0, hoist-non-react-statics@^3.1.0, hoist-non-react- dependencies: react-is "^16.7.0" -homedir-polyfill@^1.0.0: - version "1.0.3" - resolved "https://registry.npmjs.org/homedir-polyfill/-/homedir-polyfill-1.0.3.tgz" - integrity sha512-eSmmWE5bZTK2Nou4g0AI3zZ9rswp7GRKoKXS1BLUkvPviOqs4YTN1djQIqrXy9k5gEtdLPy86JjRwsNM9tnDcA== - dependencies: - parse-passwd "^1.0.0" - hosted-git-info@^2.1.4: version "2.8.9" resolved "https://registry.npmjs.org/hosted-git-info/-/hosted-git-info-2.8.9.tgz" @@ -10376,16 +10176,6 @@ is-what@^3.14.1: resolved "https://registry.npmjs.org/is-what/-/is-what-3.14.1.tgz" integrity sha512-sNxgpk9793nzSs7bA6JQJGeIuRBQhAaNGG77kzYQgMkrID+lS6SlK07K5LaptscDlSaIgH+GPFzf+d75FVxozA== -is-windows@^0.2.0: - version "0.2.0" - resolved "https://registry.npmjs.org/is-windows/-/is-windows-0.2.0.tgz" - integrity sha512-n67eJYmXbniZB7RF4I/FTjK1s6RPOCTxhYrVYLRaCt3lF0mpWZPKr3T2LSZAqyjQsxR2qMmGYXXzK0YWwcPM1Q== - -is-windows@^1.0.2: - version "1.0.2" - resolved "https://registry.npmjs.org/is-windows/-/is-windows-1.0.2.tgz" - integrity sha512-eXK1UInq2bPmjyX6e3VHIzMLobc4J94i4AWn+Hpq3OU5KkrRC96OAcR3PRJ/pGu6m8TRnBHP9dkXQVsT/COVIA== - is-wsl@^2.2.0: version "2.2.0" resolved "https://registry.yarnpkg.com/is-wsl/-/is-wsl-2.2.0.tgz#74a4c76e77ca9fd3f932f290c17ea326cd157271" @@ -10423,23 +10213,6 @@ istanbul-lib-coverage@^3.0.0, istanbul-lib-coverage@^3.2.0: resolved "https://registry.npmjs.org/istanbul-lib-coverage/-/istanbul-lib-coverage-3.2.0.tgz" integrity sha512-eOeJ5BHCmHYvQK7xt9GkdHuzuCGS1Y6g9Gvnx3Ym33fz/HpLRYxiS0wHNr+m/MBC8B647Xt608vCDEvhl9c6Mw== -istanbul-lib-hook@^3.0.0: - version "3.0.0" - resolved "https://registry.npmjs.org/istanbul-lib-hook/-/istanbul-lib-hook-3.0.0.tgz" - integrity sha512-Pt/uge1Q9s+5VAZ+pCo16TYMWPBIl+oaNIjgLQxcX0itS6ueeaA+pEfThZpH8WxhFgCiEb8sAJY6MdUKgiIWaQ== - dependencies: - append-transform "^2.0.0" - -istanbul-lib-instrument@^4.0.0: - version "4.0.3" - resolved "https://registry.npmjs.org/istanbul-lib-instrument/-/istanbul-lib-instrument-4.0.3.tgz" - integrity sha512-BXgQl9kf4WTCPCCpmFGoJkz/+uhvm7h7PFKUYxh7qarQd3ER33vHG//qaE8eN25l07YqZPpHXU9I09l/RD5aGQ== - dependencies: - "@babel/core" "^7.7.5" - "@istanbuljs/schema" "^0.1.2" - istanbul-lib-coverage "^3.0.0" - semver "^6.3.0" - istanbul-lib-instrument@^5.0.4, istanbul-lib-instrument@^5.1.0: version "5.2.1" resolved "https://registry.npmjs.org/istanbul-lib-instrument/-/istanbul-lib-instrument-5.2.1.tgz" @@ -10451,18 +10224,6 @@ istanbul-lib-instrument@^5.0.4, istanbul-lib-instrument@^5.1.0: istanbul-lib-coverage "^3.2.0" semver "^6.3.0" -istanbul-lib-processinfo@^2.0.2: - version "2.0.3" - resolved "https://registry.npmjs.org/istanbul-lib-processinfo/-/istanbul-lib-processinfo-2.0.3.tgz" - integrity sha512-NkwHbo3E00oybX6NGJi6ar0B29vxyvNwoC7eJ4G4Yq28UfY758Hgn/heV8VRFhevPED4LXfFz0DQ8z/0kw9zMg== - dependencies: - archy "^1.0.0" - cross-spawn "^7.0.3" - istanbul-lib-coverage "^3.2.0" - p-map "^3.0.0" - rimraf "^3.0.0" - uuid "^8.3.2" - istanbul-lib-report@^3.0.0: version "3.0.0" resolved "https://registry.npmjs.org/istanbul-lib-report/-/istanbul-lib-report-3.0.0.tgz" @@ -10481,7 +10242,7 @@ istanbul-lib-source-maps@^4.0.0: istanbul-lib-coverage "^3.0.0" source-map "^0.6.1" -istanbul-reports@^3.0.2, istanbul-reports@^3.1.3: +istanbul-reports@^3.1.3: version "3.1.5" resolved "https://registry.npmjs.org/istanbul-reports/-/istanbul-reports-3.1.5.tgz" integrity sha512-nUsEMa9pBt/NOHqbcbeJEgqIlY/K7rVWUX6Lql2orY5e9roQOthbR3vtY4zzf2orPELg80fnxxk9zUyPlgwD1w== @@ -10777,39 +10538,11 @@ jest-mock@^27.5.1: "@jest/types" "^27.5.1" "@types/node" "*" -jest-playwright-preset@^1.7.2: - version "1.7.2" - resolved "https://registry.yarnpkg.com/jest-playwright-preset/-/jest-playwright-preset-1.7.2.tgz#708942c4dcc1edc85429079d2b47a9382298c454" - integrity sha512-0M7M3z342bdKQLnS70cIptlJsW+uuGptbPnqIMg4K5Vp/L/DhqdTKZK7WM4n6miAUnZdUcjXKOdQWfZW/aBo7w== - dependencies: - expect-playwright "^0.8.0" - jest-process-manager "^0.3.1" - nyc "^15.1.0" - playwright-core ">=1.2.0" - rimraf "^3.0.2" - uuid "^8.3.2" - jest-pnp-resolver@^1.2.2: version "1.2.3" resolved "https://registry.npmjs.org/jest-pnp-resolver/-/jest-pnp-resolver-1.2.3.tgz" integrity sha512-+3NpwQEnRoIBtx4fyhblQDPgJI0H1IEIkX7ShLUjPGA7TtUTvI1oiKi3SR4oBR0hQhQR80l4WAe5RrXBwWMA8w== -jest-process-manager@^0.3.1: - version "0.3.1" - resolved "https://registry.npmjs.org/jest-process-manager/-/jest-process-manager-0.3.1.tgz" - integrity sha512-x9W54UgZ7IkzUHgXtnI1x4GKOVjxtwW0CA/7yGbTHtT/YhENO0Lic2yfVyC/gekn7OIEMcQmy0L1r9WLQABfqw== - dependencies: - "@types/wait-on" "^5.2.0" - chalk "^4.1.0" - cwd "^0.10.0" - exit "^0.1.2" - find-process "^1.4.4" - prompts "^2.4.1" - signal-exit "^3.0.3" - spawnd "^5.0.0" - tree-kill "^1.2.2" - wait-on "^5.3.0" - jest-regex-util@^27.5.1: version "27.5.1" resolved "https://registry.npmjs.org/jest-regex-util/-/jest-regex-util-27.5.1.tgz" @@ -11037,17 +10770,6 @@ jju@~1.4.0: resolved "https://registry.yarnpkg.com/jju/-/jju-1.4.0.tgz#a3abe2718af241a2b2904f84a625970f389ae32a" integrity sha512-8wb9Yw966OSxApiCt0K3yNJL8pnNeIv+OEq2YMidz4FKP6nonSRoOXc80iXY4JaN2FC11B9qsNmDsm+ZOfMROA== -joi@^17.3.0: - version "17.9.2" - resolved "https://registry.npmjs.org/joi/-/joi-17.9.2.tgz" - integrity sha512-Itk/r+V4Dx0V3c7RLFdRh12IOjySm2/WGPMubBT92cQvRfYZhPM2W0hZlctjj72iES8jsRCwp7S/cRmWBnJ4nw== - dependencies: - "@hapi/hoek" "^9.0.0" - "@hapi/topo" "^5.0.0" - "@sideway/address" "^4.1.3" - "@sideway/formula" "^3.0.1" - "@sideway/pinpoint" "^2.0.0" - js-base64@^3.7.2: version "3.7.5" resolved "https://registry.npmjs.org/js-base64/-/js-base64-3.7.5.tgz" @@ -11475,11 +11197,6 @@ lodash.debounce@^4.0.8: resolved "https://registry.npmjs.org/lodash.debounce/-/lodash.debounce-4.0.8.tgz" integrity sha512-FT1yDzDYEoYWhnSGnpE/4Kj1fLZkDFyqRb7fNt6FdYOSxlUWAtp42Eh6Wb0rGIv/m9Bgo7x4GhQbm5Ys4SG5ow== -lodash.flattendeep@^4.4.0: - version "4.4.0" - resolved "https://registry.npmjs.org/lodash.flattendeep/-/lodash.flattendeep-4.4.0.tgz" - integrity sha512-uHaJFihxmJcEX3kT4I23ABqKKalJ/zDrDg0lsFtc1h+3uw49SIJ5beyhx5ExVRti3AvKoOJngIj7xz3oylPdWQ== - lodash.get@^4.4.2: version "4.4.2" resolved "https://registry.yarnpkg.com/lodash.get/-/lodash.get-4.4.2.tgz#2d177f652fa31e939b4438d5341499dfa3825e99" @@ -11614,7 +11331,7 @@ make-dir@^2.1.0: pify "^4.0.1" semver "^5.6.0" -make-dir@^3.0.0, make-dir@^3.0.2: +make-dir@^3.0.0: version "3.1.0" resolved "https://registry.npmjs.org/make-dir/-/make-dir-3.1.0.tgz" integrity sha512-g3FeP20LNwhALb/6Cz6Dd4F2ngze0jz7tbzrD2wAV+o9FeNHe4rL+yK2md0J/fiSf1sa1ADhXqi5+oVwOM/eGw== @@ -12456,7 +12173,7 @@ minimist-options@4.1.0: is-plain-obj "^1.1.0" kind-of "^6.0.3" -minimist@^1.2.0, minimist@^1.2.5, minimist@^1.2.6: +minimist@^1.2.0, minimist@^1.2.6: version "1.2.8" resolved "https://registry.npmjs.org/minimist/-/minimist-1.2.8.tgz" integrity sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA== @@ -12691,13 +12408,6 @@ node-int64@^0.4.0: resolved "https://registry.npmjs.org/node-int64/-/node-int64-0.4.0.tgz" integrity sha512-O5lz91xSOeoXP6DulyHfllpq+Eg00MWitZIbtPfoSEvqIHdl5gfcY6hYzDWnj0qD5tz52PI08u9qUvSVeUBeHw== -node-preload@^0.2.1: - version "0.2.1" - resolved "https://registry.npmjs.org/node-preload/-/node-preload-0.2.1.tgz" - integrity sha512-RM5oyBy45cLEoHqCeh+MNuFAxO0vTFBLskvQbOKnEE7YTTSN4tbN8QWDIPQ6L+WvKsB/qLEGpYe2ZZ9d4W9OIQ== - dependencies: - process-on-spawn "^1.0.0" - node-releases@^2.0.13: version "2.0.13" resolved "https://registry.yarnpkg.com/node-releases/-/node-releases-2.0.13.tgz#d5ed1627c23e3461e819b02e57b75e4899b1c81d" @@ -12787,39 +12497,6 @@ nwsapi@^2.2.0: resolved "https://registry.npmjs.org/nwsapi/-/nwsapi-2.2.4.tgz" integrity sha512-NHj4rzRo0tQdijE9ZqAx6kYDcoRwYwSYzCA8MY3JzfxlrvEU0jhnhJT9BhqhJs7I/dKcrDm6TyulaRqZPIhN5g== -nyc@^15.1.0: - version "15.1.0" - resolved "https://registry.npmjs.org/nyc/-/nyc-15.1.0.tgz" - integrity sha512-jMW04n9SxKdKi1ZMGhvUTHBN0EICCRkHemEoE5jm6mTYcqcdas0ATzgUgejlQUHMvpnOZqGB5Xxsv9KxJW1j8A== - dependencies: - "@istanbuljs/load-nyc-config" "^1.0.0" - "@istanbuljs/schema" "^0.1.2" - caching-transform "^4.0.0" - convert-source-map "^1.7.0" - decamelize "^1.2.0" - find-cache-dir "^3.2.0" - find-up "^4.1.0" - foreground-child "^2.0.0" - get-package-type "^0.1.0" - glob "^7.1.6" - istanbul-lib-coverage "^3.0.0" - istanbul-lib-hook "^3.0.0" - istanbul-lib-instrument "^4.0.0" - istanbul-lib-processinfo "^2.0.2" - istanbul-lib-report "^3.0.0" - istanbul-lib-source-maps "^4.0.0" - istanbul-reports "^3.0.2" - make-dir "^3.0.0" - node-preload "^0.2.1" - p-map "^3.0.0" - process-on-spawn "^1.0.0" - resolve-from "^5.0.0" - rimraf "^3.0.0" - signal-exit "^3.0.2" - spawn-wrap "^2.0.0" - test-exclude "^6.0.0" - yargs "^15.0.2" - object-assign@^4.0.1, object-assign@^4.1.0, object-assign@^4.1.1: version "4.1.1" resolved "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz" @@ -13013,11 +12690,6 @@ ora@^5.4.1: strip-ansi "^6.0.0" wcwidth "^1.0.1" -os-homedir@^1.0.1: - version "1.0.2" - resolved "https://registry.npmjs.org/os-homedir/-/os-homedir-1.0.2.tgz" - integrity sha512-B5JU3cabzk8c67mRRd3ECmROafjYMXbuzlwtqdM8IbS8ktlTix8aFGb2bAGKrSRIlnfKwovGUUr72JUPyOb6kQ== - os-tmpdir@~1.0.2: version "1.0.2" resolved "https://registry.yarnpkg.com/os-tmpdir/-/os-tmpdir-1.0.2.tgz#bbe67406c79aa85c5cfec766fe5734555dfa1274" @@ -13080,13 +12752,6 @@ p-locate@^6.0.0: dependencies: p-limit "^4.0.0" -p-map@^3.0.0: - version "3.0.0" - resolved "https://registry.npmjs.org/p-map/-/p-map-3.0.0.tgz" - integrity sha512-d3qXVTF/s+W+CdJ5A29wywV2n8CQQYahlgz2bFiA+4eVNJbHJodPZ+/gXwPGh0bOqA+j8S+6+ckmvLGPk1QpxQ== - dependencies: - aggregate-error "^3.0.0" - p-map@^4.0.0: version "4.0.0" resolved "https://registry.npmjs.org/p-map/-/p-map-4.0.0.tgz" @@ -13107,16 +12772,6 @@ p-try@^2.0.0: resolved "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz" integrity sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ== -package-hash@^4.0.0: - version "4.0.0" - resolved "https://registry.npmjs.org/package-hash/-/package-hash-4.0.0.tgz" - integrity sha512-whdkPIooSu/bASggZ96BWVvZTRMOFxnyUG5PnTSGKoJE2gd5mbVNmR2Nj20QFzxYYgAXpoqC+AiXzl+UMRh7zQ== - dependencies: - graceful-fs "^4.1.15" - hasha "^5.0.0" - lodash.flattendeep "^4.4.0" - release-zalgo "^1.0.0" - pako@^2.0.4: version "2.1.0" resolved "https://registry.npmjs.org/pako/-/pako-2.1.0.tgz" @@ -13229,11 +12884,6 @@ parse-numeric-range@^1.3.0: resolved "https://registry.yarnpkg.com/parse-numeric-range/-/parse-numeric-range-1.3.0.tgz#7c63b61190d61e4d53a1197f0c83c47bb670ffa3" integrity sha512-twN+njEipszzlMJd4ONUYgSfZPDxgHhT9Ahed5uTigpQn90FggW4SA/AIPq/6a149fTbE9qBEcSwE3FAEp6wQQ== -parse-passwd@^1.0.0: - version "1.0.0" - resolved "https://registry.npmjs.org/parse-passwd/-/parse-passwd-1.0.0.tgz" - integrity sha512-1Y1A//QUXEZK7YKz+rD9WydcE1+EuPr6ZBgKecAB8tmoW6UFv0NREVJe1p+jRxtThkcbbKkfwIbWJe/IeE6m2Q== - parse5-htmlparser2-tree-adapter@^6.0.1: version "6.0.1" resolved "https://registry.npmjs.org/parse5-htmlparser2-tree-adapter/-/parse5-htmlparser2-tree-adapter-6.0.1.tgz" @@ -13410,7 +13060,7 @@ pirates@^4.0.4: resolved "https://registry.npmjs.org/pirates/-/pirates-4.0.5.tgz" integrity sha512-8V9+HQPupnaXMA23c5hvl69zXvTwTzyAYasnkb0Tts4XvO4CliqONMOnvlq26rkhLC3nWDFBJf73LU1e1VZLaQ== -pkg-dir@^4.1.0, pkg-dir@^4.2.0: +pkg-dir@^4.2.0: version "4.2.0" resolved "https://registry.npmjs.org/pkg-dir/-/pkg-dir-4.2.0.tgz" integrity sha512-HRDzbaKjC+AOWVXxAU/x54COGeIv9eb+6CkDSQoNTt4XyWoIJvuPsXizxu/Fr23EiekbtZwmh1IcIG/l/a10GQ== @@ -13424,7 +13074,7 @@ pkg-dir@^7.0.0: dependencies: find-up "^6.3.0" -playwright-core@1.33.0, playwright-core@>=1.2.0: +playwright-core@1.33.0: version "1.33.0" resolved "https://registry.npmjs.org/playwright-core/-/playwright-core-1.33.0.tgz" integrity sha512-aizyPE1Cj62vAECdph1iaMILpT0WUDCq3E6rW6I+dleSbBoGbktvJtzS6VHkZ4DKNEOG9qJpiom/ZxO+S15LAw== @@ -13807,13 +13457,6 @@ process-nextick-args@~2.0.0: resolved "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-2.0.1.tgz" integrity sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag== -process-on-spawn@^1.0.0: - version "1.0.0" - resolved "https://registry.npmjs.org/process-on-spawn/-/process-on-spawn-1.0.0.tgz" - integrity sha512-1WsPDsUSMmZH5LeMLegqkPDrsGgsWwk1Exipy2hvB0o/F0ASzbpIctSCcZIK1ykJvtTJULEH+20WOFjMvGnCTg== - dependencies: - fromentries "^1.2.0" - process@^0.11.10: version "0.11.10" resolved "https://registry.npmjs.org/process/-/process-0.11.10.tgz" @@ -13829,7 +13472,7 @@ promise-polyfill@^3.1.0: resolved "https://registry.npmjs.org/promise-polyfill/-/promise-polyfill-3.1.0.tgz" integrity sha512-t20OwHJ4ZOUj5fV+qms67oczphAVkRC6Rrjcrne+V1FJkQMym7n69xJmYyXHulm9OUQ0Ie5KSzg0QhOYgaxy+w== -prompts@^2.0.1, prompts@^2.4.1: +prompts@^2.0.1: version "2.4.2" resolved "https://registry.npmjs.org/prompts/-/prompts-2.4.2.tgz" integrity sha512-NxNv/kLguCA7p3jE8oL2aEBsrJWgAakBpgmgK6lpPWV+WuOmY6r2/zbAVnP+T8bQlA0nzHXSJSJW0Hq7ylaD2Q== @@ -15010,13 +14653,6 @@ relateurl@^0.2.7: resolved "https://registry.npmjs.org/relateurl/-/relateurl-0.2.7.tgz" integrity sha512-G08Dxvm4iDN3MLM0EsP62EDV9IuhXPR6blNz6Utcp7zyV3tr4HVNINt6MpaRWbxoOHT3Q7YN2P+jaHX8vUbgog== -release-zalgo@^1.0.0: - version "1.0.0" - resolved "https://registry.npmjs.org/release-zalgo/-/release-zalgo-1.0.0.tgz" - integrity sha512-gUAyHVHPPC5wdqX/LG4LWtRYtgjxyX78oanFNTMMyFEfOqdC54s3eE82imuWKbOeqYht2CrNf64Qb8vgmmtZGA== - dependencies: - es6-error "^4.0.1" - remark-gfm@~3.0.1: version "3.0.1" resolved "https://registry.yarnpkg.com/remark-gfm/-/remark-gfm-3.0.1.tgz#0b180f095e3036545e9dddac0e8df3fa5cfee54f" @@ -15080,11 +14716,6 @@ require-from-string@^2.0.2: resolved "https://registry.npmjs.org/require-from-string/-/require-from-string-2.0.2.tgz" integrity sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw== -require-main-filename@^2.0.0: - version "2.0.0" - resolved "https://registry.npmjs.org/require-main-filename/-/require-main-filename-2.0.0.tgz" - integrity sha512-NKN5kMDylKuldxYLSUfrbo5Tuzh4hd+2E8NPPX02mZtn1VuREQToYe/ZdlJy+J3uCpfaiGF05e7B8W0iXbQHmg== - requires-port@^1.0.0: version "1.0.0" resolved "https://registry.npmjs.org/requires-port/-/requires-port-1.0.0.tgz" @@ -15107,14 +14738,6 @@ resolve-cwd@^3.0.0: dependencies: resolve-from "^5.0.0" -resolve-dir@^0.1.0: - version "0.1.1" - resolved "https://registry.npmjs.org/resolve-dir/-/resolve-dir-0.1.1.tgz" - integrity sha512-QxMPqI6le2u0dCLyiGzgy92kjkkL6zO0XyvHzjdTNH3zM6e5Hz3BwG6+aEyNgiQ5Xz6PwTwgQEj3U50dByPKIA== - dependencies: - expand-tilde "^1.2.2" - global-modules "^0.2.3" - resolve-from@5.0.0, resolve-from@^5.0.0: version "5.0.0" resolved "https://registry.npmjs.org/resolve-from/-/resolve-from-5.0.0.tgz" @@ -15250,13 +14873,6 @@ rxjs@7.8.0: dependencies: tslib "^2.1.0" -rxjs@^6.6.3: - version "6.6.7" - resolved "https://registry.npmjs.org/rxjs/-/rxjs-6.6.7.tgz" - integrity sha512-hTdwr+7yYNIT5n4AMYp85KA6yw2Va0FLa3Rguvbpa4W3I5xynaBZo41cM3XM+4Q6fRMj3sBYIR1VAmZMXYJvRQ== - dependencies: - tslib "^1.9.0" - rxjs@^7.5.5: version "7.8.1" resolved "https://registry.npmjs.org/rxjs/-/rxjs-7.8.1.tgz" @@ -15483,11 +15099,6 @@ serve-static@1.15.0: parseurl "~1.3.3" send "0.18.0" -set-blocking@^2.0.0: - version "2.0.0" - resolved "https://registry.npmjs.org/set-blocking/-/set-blocking-2.0.0.tgz" - integrity sha512-KiKBS8AnWGEyLzofFfmvKwpdPzqiy16LvQfK3yv/fVH7Bj13/wl3JSR1J+rfgRE9q7xUJK4qvgS8raSOeLUehw== - set-cookie-parser@^2.4.6: version "2.6.0" resolved "https://registry.yarnpkg.com/set-cookie-parser/-/set-cookie-parser-2.6.0.tgz#131921e50f62ff1a66a461d7d62d7b21d5d15a51" @@ -15722,28 +15333,6 @@ space-separated-tokens@^2.0.0: resolved "https://registry.yarnpkg.com/space-separated-tokens/-/space-separated-tokens-2.0.2.tgz#1ecd9d2350a3844572c3f4a312bceb018348859f" integrity sha512-PEGlAwrG8yXGXRjW32fGbg66JAlOAwbObuqVoJpv/mRgoWDQfgH1wDPvtzWyUSNAXBGSk8h755YDbbcEy3SH2Q== -spawn-wrap@^2.0.0: - version "2.0.0" - resolved "https://registry.npmjs.org/spawn-wrap/-/spawn-wrap-2.0.0.tgz" - integrity sha512-EeajNjfN9zMnULLwhZZQU3GWBoFNkbngTUPfaawT4RkMiviTxcX0qfhVbGey39mfctfDHkWtuecgQ8NJcyQWHg== - dependencies: - foreground-child "^2.0.0" - is-windows "^1.0.2" - make-dir "^3.0.0" - rimraf "^3.0.0" - signal-exit "^3.0.2" - which "^2.0.1" - -spawnd@^5.0.0: - version "5.0.0" - resolved "https://registry.npmjs.org/spawnd/-/spawnd-5.0.0.tgz" - integrity sha512-28+AJr82moMVWolQvlAIv3JcYDkjkFTEmfDc503wxrF5l2rQ3dFz6DpbXp3kD4zmgGGldfM4xM4v1sFj/ZaIOA== - dependencies: - exit "^0.1.2" - signal-exit "^3.0.3" - tree-kill "^1.2.2" - wait-port "^0.2.9" - spdx-correct@^3.0.0: version "3.2.0" resolved "https://registry.npmjs.org/spdx-correct/-/spdx-correct-3.2.0.tgz" @@ -16450,11 +16039,6 @@ tr46@~0.0.3: resolved "https://registry.npmjs.org/tr46/-/tr46-0.0.3.tgz" integrity sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw== -tree-kill@^1.2.2: - version "1.2.2" - resolved "https://registry.npmjs.org/tree-kill/-/tree-kill-1.2.2.tgz" - integrity sha512-L0Orpi8qGpRG//Nd+H90vFB+3iHnue1zSSGmNOOCh1GLJ7rUKVwV2HvijphGQS2UmhUZewS9VgvxYIdgr+fG1A== - trim-lines@^3.0.0: version "3.0.1" resolved "https://registry.yarnpkg.com/trim-lines/-/trim-lines-3.0.1.tgz#d802e332a07df861c48802c04321017b1bd87338" @@ -16541,7 +16125,7 @@ tslib@2.5.0, tslib@^2.0.3, tslib@^2.1.0, tslib@^2.3.0: resolved "https://registry.npmjs.org/tslib/-/tslib-2.5.0.tgz" integrity sha512-336iVw3rtn2BUK7ORdIAHTyxHGRIHVReokCR3XjbckJMK7ms8FysBfhLR8IXnAgy7T0PTPNBWKiH514FOW/WSg== -tslib@^1.8.1, tslib@^1.9.0: +tslib@^1.8.1: version "1.14.1" resolved "https://registry.npmjs.org/tslib/-/tslib-1.14.1.tgz" integrity sha512-Xni35NKzjgMrwevysHTCArtLDpPvye8zV/0E4EyYn43P7/7qvQwPh9BGkHewbMulVntbigmcT7rdX3BNo9wRJg== @@ -16614,7 +16198,7 @@ type-fest@^0.6.0: resolved "https://registry.npmjs.org/type-fest/-/type-fest-0.6.0.tgz" integrity sha512-q+MB8nYR1KDLrgr4G5yemftpMC7/QLqVndBmEEdqzmNj5dcFOO4Oo8qlwZE3ULT3+Zim1F8Kq4cBnikNhlCMlg== -type-fest@^0.8.0, type-fest@^0.8.1: +type-fest@^0.8.1: version "0.8.1" resolved "https://registry.npmjs.org/type-fest/-/type-fest-0.8.1.tgz" integrity sha512-4dbzIzqvjtgiM5rw1k5rEHtBANKmdudhGyBEajN01fEyhaAIhsoKNy6y7+IN93IfpFtwY9iqi7kD+xwKhQsNJA== @@ -17161,26 +16745,6 @@ w3c-xmlserializer@^2.0.0: dependencies: xml-name-validator "^3.0.0" -wait-on@^5.3.0: - version "5.3.0" - resolved "https://registry.npmjs.org/wait-on/-/wait-on-5.3.0.tgz" - integrity sha512-DwrHrnTK+/0QFaB9a8Ol5Lna3k7WvUR4jzSKmz0YaPBpuN2sACyiPVKVfj6ejnjcajAcvn3wlbTyMIn9AZouOg== - dependencies: - axios "^0.21.1" - joi "^17.3.0" - lodash "^4.17.21" - minimist "^1.2.5" - rxjs "^6.6.3" - -wait-port@^0.2.9: - version "0.2.14" - resolved "https://registry.npmjs.org/wait-port/-/wait-port-0.2.14.tgz" - integrity sha512-kIzjWcr6ykl7WFbZd0TMae8xovwqcqbx6FM9l+7agOgUByhzdjfzZBPK2CPufldTOMxbUivss//Sh9MFawmPRQ== - dependencies: - chalk "^2.4.2" - commander "^3.0.2" - debug "^4.1.1" - walker@^1.0.7, walker@^1.0.8: version "1.0.8" resolved "https://registry.yarnpkg.com/walker/-/walker-1.0.8.tgz#bd498db477afe573dc04185f011d3ab8a8d7653f" @@ -17549,11 +17113,6 @@ which-collection@^1.0.1: is-weakmap "^2.0.1" is-weakset "^2.0.1" -which-module@^2.0.0: - version "2.0.1" - resolved "https://registry.npmjs.org/which-module/-/which-module-2.0.1.tgz" - integrity sha512-iBdZ57RDvnOR9AGBhML2vFZf7h8vmBjhoaZqODJBFWHVtKkDmKuHai3cx5PgVMrX5YDNp27AofYbAwctSS+vhQ== - which-typed-array@^1.1.10, which-typed-array@^1.1.11, which-typed-array@^1.1.2: version "1.1.11" resolved "https://registry.yarnpkg.com/which-typed-array/-/which-typed-array-1.1.11.tgz#99d691f23c72aab6768680805a271b69761ed61a" @@ -17577,7 +17136,7 @@ which-typed-array@^1.1.9: has-tostringtag "^1.0.0" is-typed-array "^1.1.10" -which@^1.2.12, which@^1.2.9: +which@^1.2.9: version "1.3.1" resolved "https://registry.yarnpkg.com/which/-/which-1.3.1.tgz#a45043d54f5805316da8d62f9f50918d3da70b0a" integrity sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ== @@ -17731,11 +17290,6 @@ xtend@^4.0.0: resolved "https://registry.npmjs.org/xtend/-/xtend-4.0.2.tgz" integrity sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ== -y18n@^4.0.0: - version "4.0.3" - resolved "https://registry.npmjs.org/y18n/-/y18n-4.0.3.tgz" - integrity sha512-JKhqTOwSrqNA1NY5lSztJ1GrBiUodLMmIZuLiDaMRJ+itFd+ABVE8XBjOvIWL+rSqNDC74LCSFmlb/U4UZ4hJQ== - y18n@^5.0.5: version "5.0.8" resolved "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz" @@ -17761,36 +17315,11 @@ yargs-parser@20.x, yargs-parser@^20.2.2, yargs-parser@^20.2.3: resolved "https://registry.npmjs.org/yargs-parser/-/yargs-parser-20.2.9.tgz" integrity sha512-y11nGElTIV+CT3Zv9t7VKl+Q3hTQoT9a1Qzezhhl6Rp21gJ/IVTW7Z3y9EWXhuUBC2Shnf+DX0antecpAwSP8w== -yargs-parser@^18.1.2: - version "18.1.3" - resolved "https://registry.npmjs.org/yargs-parser/-/yargs-parser-18.1.3.tgz" - integrity sha512-o50j0JeToy/4K6OZcaQmW6lyXXKhq7csREXcDwk2omFPJEwUNOVtJKvmDr9EI1fAJZUyZcRF7kxGBWmRXudrCQ== - dependencies: - camelcase "^5.0.0" - decamelize "^1.2.0" - yargs-parser@^21.1.1: version "21.1.1" resolved "https://registry.npmjs.org/yargs-parser/-/yargs-parser-21.1.1.tgz" integrity sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw== -yargs@^15.0.2: - version "15.4.1" - resolved "https://registry.npmjs.org/yargs/-/yargs-15.4.1.tgz" - integrity sha512-aePbxDmcYW++PaqBsJ+HYUFwCdv4LVvdnhBy78E57PIor8/OVvhMrADFFEDh8DHDFRv/O9i3lPhsENjO7QX0+A== - dependencies: - cliui "^6.0.0" - decamelize "^1.2.0" - find-up "^4.1.0" - get-caller-file "^2.0.1" - require-directory "^2.1.1" - require-main-filename "^2.0.0" - set-blocking "^2.0.0" - string-width "^4.2.0" - which-module "^2.0.0" - y18n "^4.0.0" - yargs-parser "^18.1.2" - yargs@^16.2.0: version "16.2.0" resolved "https://registry.npmjs.org/yargs/-/yargs-16.2.0.tgz" diff --git a/pkg/query-service/app/clickhouseReader/options.go b/pkg/query-service/app/clickhouseReader/options.go index 538cef33e5..695bef8570 100644 --- a/pkg/query-service/app/clickhouseReader/options.go +++ b/pkg/query-service/app/clickhouseReader/options.go @@ -40,6 +40,11 @@ const ( defaultWriteBatchDelay time.Duration = 5 * time.Second defaultWriteBatchSize int = 10000 defaultEncoding Encoding = EncodingJSON + + defaultLogsLocalTableV2 string = "logs_v2" + defaultLogsTableV2 string = "distributed_logs_v2" + defaultLogsResourceLocalTableV2 string = "logs_v2_resource" + defaultLogsResourceTableV2 string = "distributed_logs_v2_resource" ) // NamespaceConfig is Clickhouse's internal configuration data @@ -72,6 +77,11 @@ type namespaceConfig struct { WriteBatchSize int Encoding Encoding Connector Connector + + LogsLocalTableV2 string + LogsTableV2 string + LogsResourceLocalTableV2 string + LogsResourceTableV2 string } // Connecto defines how to connect to the database @@ -159,6 +169,11 @@ func NewOptions( WriteBatchSize: defaultWriteBatchSize, Encoding: defaultEncoding, Connector: defaultConnector, + + LogsTableV2: defaultLogsTableV2, + LogsLocalTableV2: defaultLogsLocalTableV2, + LogsResourceTableV2: defaultLogsResourceTableV2, + LogsResourceLocalTableV2: defaultLogsResourceLocalTableV2, }, others: make(map[string]*namespaceConfig, len(otherNamespaces)), } diff --git a/pkg/query-service/app/clickhouseReader/query_progress/inmemory_tracker.go b/pkg/query-service/app/clickhouseReader/query_progress/inmemory_tracker.go index d29a61cb5c..fcefe4cd7c 100644 --- a/pkg/query-service/app/clickhouseReader/query_progress/inmemory_tracker.go +++ b/pkg/query-service/app/clickhouseReader/query_progress/inmemory_tracker.go @@ -7,7 +7,6 @@ import ( "github.com/ClickHouse/clickhouse-go/v2" "github.com/google/uuid" "go.signoz.io/signoz/pkg/query-service/model" - v3 "go.signoz.io/signoz/pkg/query-service/model/v3" "go.uber.org/zap" "golang.org/x/exp/maps" ) @@ -52,7 +51,7 @@ func (tracker *inMemoryQueryProgressTracker) ReportQueryProgress( func (tracker *inMemoryQueryProgressTracker) SubscribeToQueryProgress( queryId string, -) (<-chan v3.QueryProgress, func(), *model.ApiError) { +) (<-chan model.QueryProgress, func(), *model.ApiError) { queryTracker, err := tracker.getQueryTracker(queryId) if err != nil { return nil, nil, err @@ -97,7 +96,7 @@ type queryTracker struct { queryId string isFinished bool - progress *v3.QueryProgress + progress *model.QueryProgress subscriptions map[string]*queryProgressSubscription lock sync.Mutex @@ -124,7 +123,7 @@ func (qt *queryTracker) handleProgressUpdate(p *clickhouse.Progress) { if qt.progress == nil { // This is the first update - qt.progress = &v3.QueryProgress{} + qt.progress = &model.QueryProgress{} } updateQueryProgress(qt.progress, p) @@ -135,7 +134,7 @@ func (qt *queryTracker) handleProgressUpdate(p *clickhouse.Progress) { } func (qt *queryTracker) subscribe() ( - <-chan v3.QueryProgress, func(), *model.ApiError, + <-chan model.QueryProgress, func(), *model.ApiError, ) { qt.lock.Lock() defer qt.lock.Unlock() @@ -200,20 +199,20 @@ func (qt *queryTracker) onFinished() { } type queryProgressSubscription struct { - ch chan v3.QueryProgress + ch chan model.QueryProgress isClosed bool lock sync.Mutex } func newQueryProgressSubscription() *queryProgressSubscription { - ch := make(chan v3.QueryProgress, 1000) + ch := make(chan model.QueryProgress, 1000) return &queryProgressSubscription{ ch: ch, } } // Must not block or panic in any scenario -func (ch *queryProgressSubscription) send(progress v3.QueryProgress) { +func (ch *queryProgressSubscription) send(progress model.QueryProgress) { ch.lock.Lock() defer ch.lock.Unlock() @@ -248,7 +247,7 @@ func (ch *queryProgressSubscription) close() { } } -func updateQueryProgress(qp *v3.QueryProgress, chProgress *clickhouse.Progress) { +func updateQueryProgress(qp *model.QueryProgress, chProgress *clickhouse.Progress) { qp.ReadRows += chProgress.Rows qp.ReadBytes += chProgress.Bytes qp.ElapsedMs += uint64(chProgress.Elapsed.Milliseconds()) diff --git a/pkg/query-service/app/clickhouseReader/query_progress/tracker.go b/pkg/query-service/app/clickhouseReader/query_progress/tracker.go index d424c99c57..c783dfbd5b 100644 --- a/pkg/query-service/app/clickhouseReader/query_progress/tracker.go +++ b/pkg/query-service/app/clickhouseReader/query_progress/tracker.go @@ -3,7 +3,6 @@ package queryprogress import ( "github.com/ClickHouse/clickhouse-go/v2" "go.signoz.io/signoz/pkg/query-service/model" - v3 "go.signoz.io/signoz/pkg/query-service/model/v3" ) type QueryProgressTracker interface { @@ -19,7 +18,7 @@ type QueryProgressTracker interface { // The returned channel will produce `QueryProgress` instances representing // the latest state of query progress stats. Also returns a function that // can be called to unsubscribe before the query finishes, if needed. - SubscribeToQueryProgress(queryId string) (ch <-chan v3.QueryProgress, unsubscribe func(), err *model.ApiError) + SubscribeToQueryProgress(queryId string) (ch <-chan model.QueryProgress, unsubscribe func(), err *model.ApiError) } func NewQueryProgressTracker() QueryProgressTracker { diff --git a/pkg/query-service/app/clickhouseReader/query_progress/tracker_test.go b/pkg/query-service/app/clickhouseReader/query_progress/tracker_test.go index 4babe47f82..9a51cd0fe6 100644 --- a/pkg/query-service/app/clickhouseReader/query_progress/tracker_test.go +++ b/pkg/query-service/app/clickhouseReader/query_progress/tracker_test.go @@ -7,7 +7,6 @@ import ( "github.com/ClickHouse/clickhouse-go/v2" "github.com/stretchr/testify/require" "go.signoz.io/signoz/pkg/query-service/model" - v3 "go.signoz.io/signoz/pkg/query-service/model/v3" ) func TestQueryProgressTracking(t *testing.T) { @@ -45,7 +44,7 @@ func TestQueryProgressTracking(t *testing.T) { require.NotNil(ch) require.NotNil(unsubscribe) - expectedProgress := v3.QueryProgress{} + expectedProgress := model.QueryProgress{} updateQueryProgress(&expectedProgress, testProgress1) require.Equal(expectedProgress.ReadRows, testProgress1.Rows) select { diff --git a/pkg/query-service/app/clickhouseReader/reader.go b/pkg/query-service/app/clickhouseReader/reader.go index b3ef773da0..bb2a84b487 100644 --- a/pkg/query-service/app/clickhouseReader/reader.go +++ b/pkg/query-service/app/clickhouseReader/reader.go @@ -1,15 +1,12 @@ package clickhouseReader import ( - "bytes" "context" "database/sql" "encoding/json" "fmt" - "io" "math" "math/rand" - "net/http" "os" "reflect" "regexp" @@ -42,14 +39,11 @@ import ( "go.uber.org/zap" queryprogress "go.signoz.io/signoz/pkg/query-service/app/clickhouseReader/query_progress" - "go.signoz.io/signoz/pkg/query-service/app/dashboards" - "go.signoz.io/signoz/pkg/query-service/app/explorer" "go.signoz.io/signoz/pkg/query-service/app/logs" "go.signoz.io/signoz/pkg/query-service/app/services" "go.signoz.io/signoz/pkg/query-service/auth" "go.signoz.io/signoz/pkg/query-service/common" "go.signoz.io/signoz/pkg/query-service/constants" - "go.signoz.io/signoz/pkg/query-service/dao" chErrors "go.signoz.io/signoz/pkg/query-service/errors" am "go.signoz.io/signoz/pkg/query-service/integrations/alertManager" "go.signoz.io/signoz/pkg/query-service/interfaces" @@ -89,6 +83,7 @@ const ( maxProgressiveSteps = 4 charset = "abcdefghijklmnopqrstuvwxyz" + "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789" + NANOSECOND = 1000000000 ) var ( @@ -125,6 +120,11 @@ type ClickHouseReader struct { fanoutStorage *storage.Storage queryProgressTracker queryprogress.QueryProgressTracker + logsTableV2 string + logsLocalTableV2 string + logsResourceTableV2 string + logsResourceLocalTableV2 string + promConfigFile string promConfig *config.Config alertManager am.Manager @@ -133,7 +133,9 @@ type ClickHouseReader struct { liveTailRefreshSeconds int cluster string - useLogsNewSchema bool + useLogsNewSchema bool + logsTableName string + logsLocalTableName string } // NewTraceReader returns a TraceReader for the database @@ -168,7 +170,7 @@ func NewReaderFromClickhouseConnection( cluster string, useLogsNewSchema bool, ) *ClickHouseReader { - alertManager, err := am.New("") + alertManager, err := am.New() if err != nil { zap.L().Error("failed to initialize alert manager", zap.Error(err)) zap.L().Error("check if the alert manager URL is correctly set and valid") @@ -197,6 +199,13 @@ func NewReaderFromClickhouseConnection( }, } + logsTableName := options.primary.LogsTable + logsLocalTableName := options.primary.LogsLocalTable + if useLogsNewSchema { + logsTableName = options.primary.LogsTableV2 + logsLocalTableName = options.primary.LogsLocalTableV2 + } + return &ClickHouseReader{ db: wrap, localDB: localDB, @@ -223,7 +232,15 @@ func NewReaderFromClickhouseConnection( featureFlags: featureFlag, cluster: cluster, queryProgressTracker: queryprogress.NewQueryProgressTracker(), - useLogsNewSchema: useLogsNewSchema, + + useLogsNewSchema: useLogsNewSchema, + + logsTableV2: options.primary.LogsTableV2, + logsLocalTableV2: options.primary.LogsLocalTableV2, + logsResourceTableV2: options.primary.LogsResourceTableV2, + logsResourceLocalTableV2: options.primary.LogsResourceLocalTableV2, + logsTableName: logsTableName, + logsLocalTableName: logsLocalTableName, } } @@ -394,267 +411,6 @@ func (r *ClickHouseReader) GetConn() clickhouse.Conn { return r.db } -func (r *ClickHouseReader) LoadChannel(channel *model.ChannelItem) *model.ApiError { - - receiver := &am.Receiver{} - if err := json.Unmarshal([]byte(channel.Data), receiver); err != nil { // Parse []byte to go struct pointer - return &model.ApiError{Typ: model.ErrorBadData, Err: err} - } - - response, err := http.Post(constants.GetAlertManagerApiPrefix()+"v1/receivers", "application/json", bytes.NewBuffer([]byte(channel.Data))) - - if err != nil { - zap.L().Error("Error in getting response of API call to alertmanager/v1/receivers", zap.Error(err)) - return &model.ApiError{Typ: model.ErrorInternal, Err: err} - } - if response.StatusCode > 299 { - responseData, _ := io.ReadAll(response.Body) - - err := fmt.Errorf("error in getting 2xx response in API call to alertmanager/v1/receivers") - zap.L().Error("Error in getting 2xx response in API call to alertmanager/v1/receivers", zap.String("Status", response.Status), zap.String("Data", string(responseData))) - - return &model.ApiError{Typ: model.ErrorInternal, Err: err} - } - - return nil -} - -func (r *ClickHouseReader) GetChannel(id string) (*model.ChannelItem, *model.ApiError) { - - idInt, _ := strconv.Atoi(id) - channel := model.ChannelItem{} - - query := "SELECT id, created_at, updated_at, name, type, data data FROM notification_channels WHERE id=? " - - stmt, err := r.localDB.Preparex(query) - - if err != nil { - zap.L().Error("Error in preparing sql query for GetChannel", zap.Error(err)) - return nil, &model.ApiError{Typ: model.ErrorInternal, Err: err} - } - - err = stmt.Get(&channel, idInt) - - if err != nil { - zap.L().Error("Error in getting channel with id", zap.Int("id", idInt), zap.Error(err)) - return nil, &model.ApiError{Typ: model.ErrorInternal, Err: err} - } - - return &channel, nil - -} - -func (r *ClickHouseReader) DeleteChannel(id string) *model.ApiError { - - idInt, _ := strconv.Atoi(id) - - channelToDelete, apiErrorObj := r.GetChannel(id) - - if apiErrorObj != nil { - return apiErrorObj - } - - tx, err := r.localDB.Begin() - if err != nil { - return &model.ApiError{Typ: model.ErrorInternal, Err: err} - } - - { - stmt, err := tx.Prepare(`DELETE FROM notification_channels WHERE id=$1;`) - if err != nil { - zap.L().Error("Error in preparing statement for INSERT to notification_channels", zap.Error(err)) - tx.Rollback() - return &model.ApiError{Typ: model.ErrorInternal, Err: err} - } - defer stmt.Close() - - if _, err := stmt.Exec(idInt); err != nil { - zap.L().Error("Error in Executing prepared statement for INSERT to notification_channels", zap.Error(err)) - tx.Rollback() // return an error too, we may want to wrap them - return &model.ApiError{Typ: model.ErrorInternal, Err: err} - } - } - - apiError := r.alertManager.DeleteRoute(channelToDelete.Name) - if apiError != nil { - tx.Rollback() - return apiError - } - - err = tx.Commit() - if err != nil { - zap.L().Error("Error in committing transaction for DELETE command to notification_channels", zap.Error(err)) - return &model.ApiError{Typ: model.ErrorInternal, Err: err} - } - - return nil - -} - -func (r *ClickHouseReader) GetChannels() (*[]model.ChannelItem, *model.ApiError) { - - channels := []model.ChannelItem{} - - query := "SELECT id, created_at, updated_at, name, type, data data FROM notification_channels" - - err := r.localDB.Select(&channels, query) - - zap.L().Info(query) - - if err != nil { - zap.L().Error("Error in processing sql query", zap.Error(err)) - return nil, &model.ApiError{Typ: model.ErrorInternal, Err: err} - } - - return &channels, nil - -} - -func getChannelType(receiver *am.Receiver) string { - - if receiver.EmailConfigs != nil { - return "email" - } - if receiver.OpsGenieConfigs != nil { - return "opsgenie" - } - if receiver.PagerdutyConfigs != nil { - return "pagerduty" - } - if receiver.PushoverConfigs != nil { - return "pushover" - } - if receiver.SNSConfigs != nil { - return "sns" - } - if receiver.SlackConfigs != nil { - return "slack" - } - if receiver.VictorOpsConfigs != nil { - return "victorops" - } - if receiver.WebhookConfigs != nil { - return "webhook" - } - if receiver.WechatConfigs != nil { - return "wechat" - } - if receiver.MSTeamsConfigs != nil { - return "msteams" - } - return "" -} - -func (r *ClickHouseReader) EditChannel(receiver *am.Receiver, id string) (*am.Receiver, *model.ApiError) { - - idInt, _ := strconv.Atoi(id) - - channel, apiErrObj := r.GetChannel(id) - - if apiErrObj != nil { - return nil, apiErrObj - } - if channel.Name != receiver.Name { - return nil, &model.ApiError{Typ: model.ErrorBadData, Err: fmt.Errorf("channel name cannot be changed")} - } - - tx, err := r.localDB.Begin() - if err != nil { - return nil, &model.ApiError{Typ: model.ErrorInternal, Err: err} - } - - channel_type := getChannelType(receiver) - - // check if channel type is supported in the current user plan - if err := r.featureFlags.CheckFeature(fmt.Sprintf("ALERT_CHANNEL_%s", strings.ToUpper(channel_type))); err != nil { - zap.L().Warn("an unsupported feature was blocked", zap.Error(err)) - return nil, &model.ApiError{Typ: model.ErrorBadData, Err: fmt.Errorf("unsupported feature. please upgrade your plan to access this feature")} - } - - receiverString, _ := json.Marshal(receiver) - - { - stmt, err := tx.Prepare(`UPDATE notification_channels SET updated_at=$1, type=$2, data=$3 WHERE id=$4;`) - - if err != nil { - zap.L().Error("Error in preparing statement for UPDATE to notification_channels", zap.Error(err)) - tx.Rollback() - return nil, &model.ApiError{Typ: model.ErrorInternal, Err: err} - } - defer stmt.Close() - - if _, err := stmt.Exec(time.Now(), channel_type, string(receiverString), idInt); err != nil { - zap.L().Error("Error in Executing prepared statement for UPDATE to notification_channels", zap.Error(err)) - tx.Rollback() // return an error too, we may want to wrap them - return nil, &model.ApiError{Typ: model.ErrorInternal, Err: err} - } - } - - apiError := r.alertManager.EditRoute(receiver) - if apiError != nil { - tx.Rollback() - return nil, apiError - } - - err = tx.Commit() - if err != nil { - zap.L().Error("Error in committing transaction for INSERT to notification_channels", zap.Error(err)) - return nil, &model.ApiError{Typ: model.ErrorInternal, Err: err} - } - - return receiver, nil - -} - -func (r *ClickHouseReader) CreateChannel(receiver *am.Receiver) (*am.Receiver, *model.ApiError) { - - channel_type := getChannelType(receiver) - - // check if channel type is supported in the current user plan - if err := r.featureFlags.CheckFeature(fmt.Sprintf("ALERT_CHANNEL_%s", strings.ToUpper(channel_type))); err != nil { - zap.L().Warn("an unsupported feature was blocked", zap.Error(err)) - return nil, &model.ApiError{Typ: model.ErrorBadData, Err: fmt.Errorf("unsupported feature. please upgrade your plan to access this feature")} - } - - receiverString, _ := json.Marshal(receiver) - - tx, err := r.localDB.Begin() - if err != nil { - return nil, &model.ApiError{Typ: model.ErrorInternal, Err: err} - } - - { - stmt, err := tx.Prepare(`INSERT INTO notification_channels (created_at, updated_at, name, type, data) VALUES($1,$2,$3,$4,$5);`) - if err != nil { - zap.L().Error("Error in preparing statement for INSERT to notification_channels", zap.Error(err)) - tx.Rollback() - return nil, &model.ApiError{Typ: model.ErrorInternal, Err: err} - } - defer stmt.Close() - - if _, err := stmt.Exec(time.Now(), time.Now(), receiver.Name, channel_type, string(receiverString)); err != nil { - zap.L().Error("Error in Executing prepared statement for INSERT to notification_channels", zap.Error(err)) - tx.Rollback() // return an error too, we may want to wrap them - return nil, &model.ApiError{Typ: model.ErrorInternal, Err: err} - } - } - - apiError := r.alertManager.AddRoute(receiver) - if apiError != nil { - tx.Rollback() - return nil, apiError - } - - err = tx.Commit() - if err != nil { - zap.L().Error("Error in committing transaction for INSERT to notification_channels", zap.Error(err)) - return nil, &model.ApiError{Typ: model.ErrorInternal, Err: err} - } - - return receiver, nil - -} - func (r *ClickHouseReader) GetInstantQueryMetricsResult(ctx context.Context, queryParams *model.InstantQueryMetricsParams) (*promql.Result, *stats.QueryStats, *model.ApiError) { qry, err := r.queryEngine.NewInstantQuery(ctx, r.remoteStorage, nil, queryParams.Query, queryParams.Time) if err != nil { @@ -972,7 +728,7 @@ func (r *ClickHouseReader) GetServiceOverview(ctx context.Context, queryParams * return &serviceOverviewItems, nil } -func buildFilterArrayQuery(ctx context.Context, excludeMap map[string]struct{}, params []string, filter string, query *string, args []interface{}) []interface{} { +func buildFilterArrayQuery(_ context.Context, excludeMap map[string]struct{}, params []string, filter string, query *string, args []interface{}) []interface{} { for i, e := range params { filterKey := filter + String(5) if i == 0 && i == len(params)-1 { @@ -1481,7 +1237,7 @@ func String(length int) string { return StringWithCharset(length, charset) } -func buildQueryWithTagParams(ctx context.Context, tags []model.TagQuery) (string, []interface{}, *model.ApiError) { +func buildQueryWithTagParams(_ context.Context, tags []model.TagQuery) (string, []interface{}, *model.ApiError) { query := "" var args []interface{} for _, item := range tags { @@ -1691,7 +1447,7 @@ func (r *ClickHouseReader) GetTagFilters(ctx context.Context, queryParams *model return &tagFiltersResult, nil } -func excludeTags(ctx context.Context, tags []string) []string { +func excludeTags(_ context.Context, tags []string) []string { excludedTagsMap := map[string]bool{ "http.code": true, "http.route": true, @@ -2445,7 +2201,7 @@ func (r *ClickHouseReader) SetTTL(ctx context.Context, return &model.SetTTLResponseItem{Message: "move ttl has been successfully set up"}, nil } -func (r *ClickHouseReader) deleteTtlTransactions(ctx context.Context, numberOfTransactionsStore int) { +func (r *ClickHouseReader) deleteTtlTransactions(_ context.Context, numberOfTransactionsStore int) { _, err := r.localDB.Exec("DELETE FROM ttl_status WHERE transaction_id NOT IN (SELECT distinct transaction_id FROM ttl_status ORDER BY created_at DESC LIMIT ?)", numberOfTransactionsStore) if err != nil { zap.L().Error("Error in processing ttl_status delete sql query", zap.Error(err)) @@ -2453,7 +2209,7 @@ func (r *ClickHouseReader) deleteTtlTransactions(ctx context.Context, numberOfTr } // checkTTLStatusItem checks if ttl_status table has an entry for the given table name -func (r *ClickHouseReader) checkTTLStatusItem(ctx context.Context, tableName string) (model.TTLStatusItem, *model.ApiError) { +func (r *ClickHouseReader) checkTTLStatusItem(_ context.Context, tableName string) (model.TTLStatusItem, *model.ApiError) { statusItem := []model.TTLStatusItem{} query := `SELECT id, status, ttl, cold_storage_ttl FROM ttl_status WHERE table_name = ? ORDER BY created_at DESC` @@ -3031,122 +2787,6 @@ func (r *ClickHouseReader) getPrevErrorID(ctx context.Context, queryParams *mode } } -func (r *ClickHouseReader) GetMetricResultEE(ctx context.Context, query string) ([]*model.Series, string, error) { - zap.L().Error("GetMetricResultEE is not implemented for opensource version") - return nil, "", fmt.Errorf("GetMetricResultEE is not implemented for opensource version") -} - -// GetMetricResult runs the query and returns list of time series -func (r *ClickHouseReader) GetMetricResult(ctx context.Context, query string) ([]*model.Series, error) { - - defer utils.Elapsed("GetMetricResult", nil)() - - zap.L().Info("Executing metric result query: ", zap.String("query", query)) - - rows, err := r.db.Query(ctx, query) - - if err != nil { - zap.L().Error("Error in processing query", zap.Error(err)) - return nil, err - } - - var ( - columnTypes = rows.ColumnTypes() - columnNames = rows.Columns() - vars = make([]interface{}, len(columnTypes)) - ) - for i := range columnTypes { - vars[i] = reflect.New(columnTypes[i].ScanType()).Interface() - } - // when group by is applied, each combination of cartesian product - // of attributes is separate series. each item in metricPointsMap - // represent a unique series. - metricPointsMap := make(map[string][]model.MetricPoint) - // attribute key-value pairs for each group selection - attributesMap := make(map[string]map[string]string) - - defer rows.Close() - for rows.Next() { - if err := rows.Scan(vars...); err != nil { - return nil, err - } - var groupBy []string - var metricPoint model.MetricPoint - groupAttributes := make(map[string]string) - // Assuming that the end result row contains a timestamp, value and option labels - // Label key and value are both strings. - for idx, v := range vars { - colName := columnNames[idx] - switch v := v.(type) { - case *string: - // special case for returning all labels - if colName == "fullLabels" { - var metric map[string]string - err := json.Unmarshal([]byte(*v), &metric) - if err != nil { - return nil, err - } - for key, val := range metric { - groupBy = append(groupBy, val) - groupAttributes[key] = val - } - } else { - groupBy = append(groupBy, *v) - groupAttributes[colName] = *v - } - case *time.Time: - metricPoint.Timestamp = v.UnixMilli() - case *float64: - metricPoint.Value = *v - case **float64: - // ch seems to return this type when column is derived from - // SELECT count(*)/ SELECT count(*) - floatVal := *v - if floatVal != nil { - metricPoint.Value = *floatVal - } - case *float32: - float32Val := float32(*v) - metricPoint.Value = float64(float32Val) - case *uint8, *uint64, *uint16, *uint32: - if _, ok := constants.ReservedColumnTargetAliases[colName]; ok { - metricPoint.Value = float64(reflect.ValueOf(v).Elem().Uint()) - } else { - groupBy = append(groupBy, fmt.Sprintf("%v", reflect.ValueOf(v).Elem().Uint())) - groupAttributes[colName] = fmt.Sprintf("%v", reflect.ValueOf(v).Elem().Uint()) - } - case *int8, *int16, *int32, *int64: - if _, ok := constants.ReservedColumnTargetAliases[colName]; ok { - metricPoint.Value = float64(reflect.ValueOf(v).Elem().Int()) - } else { - groupBy = append(groupBy, fmt.Sprintf("%v", reflect.ValueOf(v).Elem().Int())) - groupAttributes[colName] = fmt.Sprintf("%v", reflect.ValueOf(v).Elem().Int()) - } - default: - zap.L().Error("invalid var found in metric builder query result", zap.Any("v", v), zap.String("colName", colName)) - } - } - sort.Strings(groupBy) - key := strings.Join(groupBy, "") - attributesMap[key] = groupAttributes - metricPointsMap[key] = append(metricPointsMap[key], metricPoint) - } - - var seriesList []*model.Series - for key := range metricPointsMap { - points := metricPointsMap[key] - // first point in each series could be invalid since the - // aggregations are applied with point from prev series - if len(points) != 0 && len(points) > 1 { - points = points[1:] - } - attributes := attributesMap[key] - series := model.Series{Labels: attributes, Points: points} - seriesList = append(seriesList, &series) - } - return seriesList, nil -} - func (r *ClickHouseReader) GetTotalSpans(ctx context.Context) (uint64, error) { var totalSpans uint64 @@ -3340,156 +2980,6 @@ func removeUnderscoreDuplicateFields(fields []model.LogField) []model.LogField { return updatedFields } -// GetDashboardsInfo returns analytics data for dashboards -func (r *ClickHouseReader) GetDashboardsInfo(ctx context.Context) (*model.DashboardsInfo, error) { - dashboardsInfo := model.DashboardsInfo{} - // fetch dashboards from dashboard db - query := "SELECT data FROM dashboards" - var dashboardsData []dashboards.Dashboard - err := r.localDB.Select(&dashboardsData, query) - if err != nil { - zap.L().Error("Error in processing sql query", zap.Error(err)) - return &dashboardsInfo, err - } - totalDashboardsWithPanelAndName := 0 - var dashboardNames []string - count := 0 - for _, dashboard := range dashboardsData { - if isDashboardWithPanelAndName(dashboard.Data) { - totalDashboardsWithPanelAndName = totalDashboardsWithPanelAndName + 1 - } - dashboardName := extractDashboardName(dashboard.Data) - if dashboardName != "" { - dashboardNames = append(dashboardNames, dashboardName) - } - dashboardInfo := countPanelsInDashboard(dashboard.Data) - dashboardsInfo.LogsBasedPanels += dashboardInfo.LogsBasedPanels - dashboardsInfo.TracesBasedPanels += dashboardInfo.TracesBasedPanels - dashboardsInfo.MetricBasedPanels += dashboardsInfo.MetricBasedPanels - if isDashboardWithTSV2(dashboard.Data) { - count = count + 1 - } - } - - dashboardsInfo.DashboardNames = dashboardNames - dashboardsInfo.TotalDashboards = len(dashboardsData) - dashboardsInfo.TotalDashboardsWithPanelAndName = totalDashboardsWithPanelAndName - dashboardsInfo.QueriesWithTSV2 = count - return &dashboardsInfo, nil -} - -func isDashboardWithTSV2(data map[string]interface{}) bool { - jsonData, err := json.Marshal(data) - if err != nil { - return false - } - return strings.Contains(string(jsonData), "time_series_v2") -} - -func isDashboardWithPanelAndName(data map[string]interface{}) bool { - isDashboardName := false - isDashboardWithPanelAndName := false - if data != nil && data["title"] != nil && data["widgets"] != nil { - title, ok := data["title"].(string) - if ok && title != "Sample Title" { - isDashboardName = true - } - widgets, ok := data["widgets"] - if ok && isDashboardName { - data, ok := widgets.([]interface{}) - if ok && len(data) > 0 { - isDashboardWithPanelAndName = true - } - } - } - - return isDashboardWithPanelAndName -} - -func extractDashboardName(data map[string]interface{}) string { - - if data != nil && data["title"] != nil { - title, ok := data["title"].(string) - if ok { - return title - } - } - - return "" -} - -func countPanelsInDashboard(data map[string]interface{}) model.DashboardsInfo { - var logsPanelCount, tracesPanelCount, metricsPanelCount int - // totalPanels := 0 - if data != nil && data["widgets"] != nil { - widgets, ok := data["widgets"] - if ok { - data, ok := widgets.([]interface{}) - if ok { - for _, widget := range data { - sData, ok := widget.(map[string]interface{}) - if ok && sData["query"] != nil { - // totalPanels++ - query, ok := sData["query"].(map[string]interface{}) - if ok && query["queryType"] == "builder" && query["builder"] != nil { - builderData, ok := query["builder"].(map[string]interface{}) - if ok && builderData["queryData"] != nil { - builderQueryData, ok := builderData["queryData"].([]interface{}) - if ok { - for _, queryData := range builderQueryData { - data, ok := queryData.(map[string]interface{}) - if ok { - if data["dataSource"] == "traces" { - tracesPanelCount++ - } else if data["dataSource"] == "metrics" { - metricsPanelCount++ - } else if data["dataSource"] == "logs" { - logsPanelCount++ - } - } - } - } - } - } - } - } - } - } - } - return model.DashboardsInfo{ - LogsBasedPanels: logsPanelCount, - TracesBasedPanels: tracesPanelCount, - MetricBasedPanels: metricsPanelCount, - } -} - -func (r *ClickHouseReader) GetSavedViewsInfo(ctx context.Context) (*model.SavedViewsInfo, error) { - savedViewsInfo := model.SavedViewsInfo{} - savedViews, err := explorer.GetViews() - if err != nil { - zap.S().Debug("Error in fetching saved views info: ", err) - return &savedViewsInfo, err - } - savedViewsInfo.TotalSavedViews = len(savedViews) - for _, view := range savedViews { - if view.SourcePage == "traces" { - savedViewsInfo.TracesSavedViews += 1 - } else if view.SourcePage == "logs" { - savedViewsInfo.LogsSavedViews += 1 - } - } - return &savedViewsInfo, nil -} - -func (r *ClickHouseReader) GetUsers(ctx context.Context) ([]model.UserPayload, error) { - - users, apiErr := dao.DB().GetUsers(ctx) - if apiErr != nil { - return nil, apiErr.Err - } - return users, nil -} - func (r *ClickHouseReader) GetLogFields(ctx context.Context) (*model.GetFieldsResponse, *model.ApiError) { // response will contain top level fields from the otel log model response := model.GetFieldsResponse{ @@ -3518,23 +3008,23 @@ func (r *ClickHouseReader) GetLogFields(ctx context.Context) (*model.GetFieldsRe resources = removeUnderscoreDuplicateFields(resources) statements := []model.ShowCreateTableStatement{} - query = fmt.Sprintf("SHOW CREATE TABLE %s.%s", r.logsDB, r.logsLocalTable) + query = fmt.Sprintf("SHOW CREATE TABLE %s.%s", r.logsDB, r.logsLocalTableName) err = r.db.Select(ctx, &statements, query) if err != nil { return nil, &model.ApiError{Err: err, Typ: model.ErrorInternal} } - extractSelectedAndInterestingFields(statements[0].Statement, constants.Attributes, &attributes, &response) - extractSelectedAndInterestingFields(statements[0].Statement, constants.Resources, &resources, &response) + r.extractSelectedAndInterestingFields(statements[0].Statement, constants.Attributes, &attributes, &response) + r.extractSelectedAndInterestingFields(statements[0].Statement, constants.Resources, &resources, &response) return &response, nil } -func extractSelectedAndInterestingFields(tableStatement string, fieldType string, fields *[]model.LogField, response *model.GetFieldsResponse) { +func (r *ClickHouseReader) extractSelectedAndInterestingFields(tableStatement string, fieldType string, fields *[]model.LogField, response *model.GetFieldsResponse) { for _, field := range *fields { field.Type = fieldType // all static fields are assumed to be selected as we don't allow changing them - if isSelectedField(tableStatement, field) { + if isColumn(r.useLogsNewSchema, tableStatement, field.Type, field.Name, field.DataType) { response.Selected = append(response.Selected, field) } else { response.Interesting = append(response.Interesting, field) @@ -3542,11 +3032,70 @@ func extractSelectedAndInterestingFields(tableStatement string, fieldType string } } -func isSelectedField(tableStatement string, field model.LogField) bool { - // in case of attributes and resources, if there is a materialized column present then it is selected - // TODO: handle partial change complete eg:- index is removed but materialized column is still present - name := utils.GetClickhouseColumnName(field.Type, field.DataType, field.Name) - return strings.Contains(tableStatement, name) +func (r *ClickHouseReader) UpdateLogFieldV2(ctx context.Context, field *model.UpdateField) *model.ApiError { + if !field.Selected { + return model.ForbiddenError(errors.New("removing a selected field is not allowed, please reach out to support.")) + } + + colname := utils.GetClickhouseColumnNameV2(field.Type, field.DataType, field.Name) + + dataType := strings.ToLower(field.DataType) + if dataType == "int64" || dataType == "float64" { + dataType = "number" + } + attrColName := fmt.Sprintf("%s_%s", field.Type, dataType) + for _, table := range []string{r.logsLocalTableV2, r.logsTableV2} { + q := "ALTER TABLE %s.%s ON CLUSTER %s ADD COLUMN IF NOT EXISTS `%s` %s DEFAULT %s['%s'] CODEC(ZSTD(1))" + query := fmt.Sprintf(q, + r.logsDB, table, + r.cluster, + colname, field.DataType, + attrColName, + field.Name, + ) + err := r.db.Exec(ctx, query) + if err != nil { + return &model.ApiError{Err: err, Typ: model.ErrorInternal} + } + + query = fmt.Sprintf("ALTER TABLE %s.%s ON CLUSTER %s ADD COLUMN IF NOT EXISTS `%s_exists` bool DEFAULT if(mapContains(%s, '%s') != 0, true, false) CODEC(ZSTD(1))", + r.logsDB, table, + r.cluster, + colname, + attrColName, + field.Name, + ) + err = r.db.Exec(ctx, query) + if err != nil { + return &model.ApiError{Err: err, Typ: model.ErrorInternal} + } + } + + // create the index + if strings.ToLower(field.DataType) == "bool" { + // there is no point in creating index for bool attributes as the cardinality is just 2 + return nil + } + + if field.IndexType == "" { + field.IndexType = constants.DefaultLogSkipIndexType + } + if field.IndexGranularity == 0 { + field.IndexGranularity = constants.DefaultLogSkipIndexGranularity + } + query := fmt.Sprintf("ALTER TABLE %s.%s ON CLUSTER %s ADD INDEX IF NOT EXISTS `%s_idx` (`%s`) TYPE %s GRANULARITY %d", + r.logsDB, r.logsLocalTableV2, + r.cluster, + colname, + colname, + field.IndexType, + field.IndexGranularity, + ) + err := r.db.Exec(ctx, query) + if err != nil { + return &model.ApiError{Err: err, Typ: model.ErrorInternal} + } + return nil } func (r *ClickHouseReader) UpdateLogField(ctx context.Context, field *model.UpdateField) *model.ApiError { @@ -3556,10 +3105,14 @@ func (r *ClickHouseReader) UpdateLogField(ctx context.Context, field *model.Upda return &model.ApiError{Err: err, Typ: model.ErrorBadData} } - colname := utils.GetClickhouseColumnName(field.Type, field.DataType, field.Name) + if r.useLogsNewSchema { + return r.UpdateLogFieldV2(ctx, field) + } // if a field is selected it means that the field needs to be indexed if field.Selected { + colname := utils.GetClickhouseColumnName(field.Type, field.DataType, field.Name) + keyColName := fmt.Sprintf("%s_%s_key", field.Type, strings.ToLower(field.DataType)) valueColName := fmt.Sprintf("%s_%s_value", field.Type, strings.ToLower(field.DataType)) @@ -4150,10 +3703,15 @@ func (r *ClickHouseReader) GetLatestReceivedMetric( return result, nil } -func isColumn(tableStatement, attrType, field, datType string) bool { +func isColumn(useLogsNewSchema bool, tableStatement, attrType, field, datType string) bool { // value of attrType will be `resource` or `tag`, if `tag` change it to `attribute` - name := utils.GetClickhouseColumnName(attrType, datType, field) - + var name string + if useLogsNewSchema { + // adding explict '`' + name = fmt.Sprintf("`%s`", utils.GetClickhouseColumnNameV2(attrType, datType, field)) + } else { + name = utils.GetClickhouseColumnName(attrType, datType, field) + } return strings.Contains(tableStatement, fmt.Sprintf("%s ", name)) } @@ -4209,7 +3767,7 @@ func (r *ClickHouseReader) GetLogAggregateAttributes(ctx context.Context, req *v defer rows.Close() statements := []model.ShowCreateTableStatement{} - query = fmt.Sprintf("SHOW CREATE TABLE %s.%s", r.logsDB, r.logsLocalTable) + query = fmt.Sprintf("SHOW CREATE TABLE %s.%s", r.logsDB, r.logsLocalTableName) err = r.db.Select(ctx, &statements, query) if err != nil { return nil, fmt.Errorf("error while fetching logs schema: %s", err.Error()) @@ -4226,7 +3784,7 @@ func (r *ClickHouseReader) GetLogAggregateAttributes(ctx context.Context, req *v Key: tagKey, DataType: v3.AttributeKeyDataType(dataType), Type: v3.AttributeKeyType(attType), - IsColumn: isColumn(statements[0].Statement, attType, tagKey, dataType), + IsColumn: isColumn(r.useLogsNewSchema, statements[0].Statement, attType, tagKey, dataType), } response.AttributeKeys = append(response.AttributeKeys, key) } @@ -4263,7 +3821,7 @@ func (r *ClickHouseReader) GetLogAttributeKeys(ctx context.Context, req *v3.Filt defer rows.Close() statements := []model.ShowCreateTableStatement{} - query = fmt.Sprintf("SHOW CREATE TABLE %s.%s", r.logsDB, r.logsLocalTable) + query = fmt.Sprintf("SHOW CREATE TABLE %s.%s", r.logsDB, r.logsLocalTableName) err = r.db.Select(ctx, &statements, query) if err != nil { return nil, fmt.Errorf("error while fetching logs schema: %s", err.Error()) @@ -4281,7 +3839,7 @@ func (r *ClickHouseReader) GetLogAttributeKeys(ctx context.Context, req *v3.Filt Key: attributeKey, DataType: v3.AttributeKeyDataType(attributeDataType), Type: v3.AttributeKeyType(tagType), - IsColumn: isColumn(statements[0].Statement, tagType, attributeKey, attributeDataType), + IsColumn: isColumn(r.useLogsNewSchema, statements[0].Statement, tagType, attributeKey, attributeDataType), } response.AttributeKeys = append(response.AttributeKeys, key) @@ -4315,7 +3873,7 @@ func (r *ClickHouseReader) GetLogAttributeValues(ctx context.Context, req *v3.Fi } // ignore autocomplete request for body - if req.FilterAttributeKey == "body" { + if req.FilterAttributeKey == "body" || req.FilterAttributeKey == "__attrs" { return &v3.FilterAttributeValueResponse{}, nil } @@ -4350,10 +3908,10 @@ func (r *ClickHouseReader) GetLogAttributeValues(ctx context.Context, req *v3.Fi // prepare the query and run if len(req.SearchText) != 0 { - query = fmt.Sprintf("select distinct %s from %s.%s where timestamp >= toInt64(toUnixTimestamp(now() - INTERVAL 48 HOUR)*1000000000) and %s ILIKE $1 limit $2", selectKey, r.logsDB, r.logsTable, filterValueColumnWhere) + query = fmt.Sprintf("select distinct %s from %s.%s where timestamp >= toInt64(toUnixTimestamp(now() - INTERVAL 48 HOUR)*1000000000) and %s ILIKE $1 limit $2", selectKey, r.logsDB, r.logsLocalTableName, filterValueColumnWhere) rows, err = r.db.Query(ctx, query, searchText, req.Limit) } else { - query = fmt.Sprintf("select distinct %s from %s.%s where timestamp >= toInt64(toUnixTimestamp(now() - INTERVAL 48 HOUR)*1000000000) limit $1", selectKey, r.logsDB, r.logsTable) + query = fmt.Sprintf("select distinct %s from %s.%s where timestamp >= toInt64(toUnixTimestamp(now() - INTERVAL 48 HOUR)*1000000000) limit $1", selectKey, r.logsDB, r.logsLocalTableName) rows, err = r.db.Query(ctx, query, req.Limit) } } else if len(req.SearchText) != 0 { @@ -4540,41 +4098,65 @@ func (r *ClickHouseReader) GetQBFilterSuggestionsForLogs( func (r *ClickHouseReader) getValuesForLogAttributes( ctx context.Context, attributes []v3.AttributeKey, limit uint64, ) ([][]any, *model.ApiError) { - // query top `limit` distinct values seen for `tagKey`s of interest - // ordered by timestamp when the value was seen - query := fmt.Sprintf( - ` - select tagKey, stringTagValue, int64TagValue, float64TagValue - from ( - select - tagKey, - stringTagValue, - int64TagValue, - float64TagValue, - row_number() over (partition by tagKey order by ts desc) as rank - from ( - select - tagKey, - stringTagValue, - int64TagValue, - float64TagValue, - max(timestamp) as ts - from %s.%s - where tagKey in $1 - group by (tagKey, stringTagValue, int64TagValue, float64TagValue) - ) + /* + The query used here needs to be as cheap as possible, and while uncommon, it is possible for + a tag to have 100s of millions of values (eg: message, request_id) + + Construct a query to UNION the result of querying first `limit` values for each attribute. For example: + ``` + select * from ( + ( + select tagKey, stringTagValue, int64TagValue, float64TagValue + from signoz_logs.distributed_tag_attributes + where tagKey = $1 and ( + stringTagValue != '' or int64TagValue is not null or float64TagValue is not null + ) + limit 2 + ) UNION DISTINCT ( + select tagKey, stringTagValue, int64TagValue, float64TagValue + from signoz_logs.distributed_tag_attributes + where tagKey = $2 and ( + stringTagValue != '' or int64TagValue is not null or float64TagValue is not null + ) + limit 2 + ) + ) settings max_threads=2 + ``` + Since tag_attributes table uses ReplacingMergeTree, the values would be distinct and no order by + is being used to ensure the `limit` clause minimizes the amount of data scanned. + + This query scanned ~30k rows per attribute on fiscalnote-v2 for attributes like `message` and `time` + that had >~110M values each + */ + + if len(attributes) > 10 { + zap.L().Error( + "log attribute values requested for too many attributes. This can lead to slow and costly queries", + zap.Int("count", len(attributes)), ) - where rank <= %d - `, - r.logsDB, r.logsTagAttributeTable, limit, - ) + attributes = attributes[:10] + } + + tagQueries := []string{} + tagKeyQueryArgs := []any{} + for idx, attrib := range attributes { + tagQueries = append(tagQueries, fmt.Sprintf(`( + select tagKey, stringTagValue, int64TagValue, float64TagValue + from %s.%s + where tagKey = $%d and ( + stringTagValue != '' or int64TagValue is not null or float64TagValue is not null + ) + limit %d + )`, r.logsDB, r.logsTagAttributeTable, idx+1, limit)) - attribNames := []string{} - for _, attrib := range attributes { - attribNames = append(attribNames, attrib.Key) + tagKeyQueryArgs = append(tagKeyQueryArgs, attrib.Key) } - rows, err := r.db.Query(ctx, query, attribNames) + query := fmt.Sprintf(`select * from ( + %s + ) settings max_threads=2`, strings.Join(tagQueries, " UNION DISTINCT ")) + + rows, err := r.db.Query(ctx, query, tagKeyQueryArgs...) if err != nil { zap.L().Error("couldn't query attrib values for suggestions", zap.Error(err)) return nil, model.InternalError(fmt.Errorf( @@ -4889,7 +4471,7 @@ func (r *ClickHouseReader) GetTimeSeriesResultV3(ctx context.Context, query stri if err != nil { zap.L().Error("error while reading time series result", zap.Error(err)) - return nil, err + return nil, errors.New(err.Error()) } defer rows.Close() @@ -4936,7 +4518,7 @@ func (r *ClickHouseReader) GetListResultV3(ctx context.Context, query string) ([ if err != nil { zap.L().Error("error while reading time series result", zap.Error(err)) - return nil, err + return nil, errors.New(err.Error()) } defer rows.Close() @@ -5262,7 +4844,60 @@ func (r *ClickHouseReader) GetSpanAttributeKeys(ctx context.Context) (map[string return response, nil } -func (r *ClickHouseReader) LiveTailLogsV3(ctx context.Context, query string, timestampStart uint64, idStart string, client *v3.LogsLiveTailClient) { +func (r *ClickHouseReader) LiveTailLogsV4(ctx context.Context, query string, timestampStart uint64, idStart string, client *model.LogsLiveTailClientV2) { + if timestampStart == 0 { + timestampStart = uint64(time.Now().UnixNano()) + } else { + timestampStart = uint64(utils.GetEpochNanoSecs(int64(timestampStart))) + } + + ticker := time.NewTicker(time.Duration(r.liveTailRefreshSeconds) * time.Second) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + done := true + client.Done <- &done + zap.L().Debug("closing go routine : " + client.Name) + return + case <-ticker.C: + // get the new 100 logs as anything more older won't make sense + var tmpQuery string + bucketStart := (timestampStart / NANOSECOND) - 1800 + + // we have to form the query differently if the resource filters are used + if strings.Contains(query, r.logsResourceTableV2) { + tmpQuery = fmt.Sprintf("seen_at_ts_bucket_start >=%d)) AND ts_bucket_start >=%d AND timestamp >=%d", bucketStart, bucketStart, timestampStart) + } else { + tmpQuery = fmt.Sprintf("ts_bucket_start >=%d AND timestamp >=%d", bucketStart, timestampStart) + } + if idStart != "" { + tmpQuery = fmt.Sprintf("%s AND id > '%s'", tmpQuery, idStart) + } + + // the reason we are doing desc is that we need the latest logs first + tmpQuery = query + tmpQuery + " order by timestamp desc, id desc limit 100" + + // using the old structure since we can directly read it to the struct as use it. + response := []model.SignozLogV2{} + err := r.db.Select(ctx, &response, tmpQuery) + if err != nil { + zap.L().Error("Error while getting logs", zap.Error(err)) + client.Error <- err + return + } + for i := len(response) - 1; i >= 0; i-- { + client.Logs <- &response[i] + if i == 0 { + timestampStart = response[i].Timestamp + idStart = response[i].ID + } + } + } + } +} + +func (r *ClickHouseReader) LiveTailLogsV3(ctx context.Context, query string, timestampStart uint64, idStart string, client *model.LogsLiveTailClient) { if timestampStart == 0 { timestampStart = uint64(time.Now().UnixNano()) } else { @@ -5306,7 +4941,7 @@ func (r *ClickHouseReader) LiveTailLogsV3(ctx context.Context, query string, tim } } -func (r *ClickHouseReader) AddRuleStateHistory(ctx context.Context, ruleStateHistory []v3.RuleStateHistory) error { +func (r *ClickHouseReader) AddRuleStateHistory(ctx context.Context, ruleStateHistory []model.RuleStateHistory) error { var statement driver.Batch var err error @@ -5337,11 +4972,11 @@ func (r *ClickHouseReader) AddRuleStateHistory(ctx context.Context, ruleStateHis return nil } -func (r *ClickHouseReader) GetLastSavedRuleStateHistory(ctx context.Context, ruleID string) ([]v3.RuleStateHistory, error) { +func (r *ClickHouseReader) GetLastSavedRuleStateHistory(ctx context.Context, ruleID string) ([]model.RuleStateHistory, error) { query := fmt.Sprintf("SELECT * FROM %s.%s WHERE rule_id = '%s' AND state_changed = true ORDER BY unix_milli DESC LIMIT 1 BY fingerprint", signozHistoryDBName, ruleStateHistoryTableName, ruleID) - history := []v3.RuleStateHistory{} + history := []model.RuleStateHistory{} err := r.db.Select(ctx, &history, query) if err != nil { return nil, err @@ -5350,7 +4985,7 @@ func (r *ClickHouseReader) GetLastSavedRuleStateHistory(ctx context.Context, rul } func (r *ClickHouseReader) ReadRuleStateHistoryByRuleID( - ctx context.Context, ruleID string, params *v3.QueryRuleStateHistory) (*v3.RuleStateTimeline, error) { + ctx context.Context, ruleID string, params *model.QueryRuleStateHistory) (*model.RuleStateTimeline, error) { var conditions []string @@ -5413,7 +5048,7 @@ func (r *ClickHouseReader) ReadRuleStateHistoryByRuleID( query := fmt.Sprintf("SELECT * FROM %s.%s WHERE %s ORDER BY unix_milli %s LIMIT %d OFFSET %d", signozHistoryDBName, ruleStateHistoryTableName, whereClause, params.Order, params.Limit, params.Offset) - history := []v3.RuleStateHistory{} + history := []model.RuleStateHistory{} zap.L().Debug("rule state history query", zap.String("query", query)) err := r.db.Select(ctx, &history, query) if err != nil { @@ -5455,7 +5090,7 @@ func (r *ClickHouseReader) ReadRuleStateHistoryByRuleID( } } - timeline := &v3.RuleStateTimeline{ + timeline := &model.RuleStateTimeline{ Items: history, Total: total, Labels: labelsMap, @@ -5465,7 +5100,7 @@ func (r *ClickHouseReader) ReadRuleStateHistoryByRuleID( } func (r *ClickHouseReader) ReadRuleStateHistoryTopContributorsByRuleID( - ctx context.Context, ruleID string, params *v3.QueryRuleStateHistory) ([]v3.RuleStateHistoryContributor, error) { + ctx context.Context, ruleID string, params *model.QueryRuleStateHistory) ([]model.RuleStateHistoryContributor, error) { query := fmt.Sprintf(`SELECT fingerprint, any(labels) as labels, @@ -5478,7 +5113,7 @@ func (r *ClickHouseReader) ReadRuleStateHistoryTopContributorsByRuleID( signozHistoryDBName, ruleStateHistoryTableName, ruleID, model.StateFiring.String(), params.Start, params.End) zap.L().Debug("rule state history top contributors query", zap.String("query", query)) - contributors := []v3.RuleStateHistoryContributor{} + contributors := []model.RuleStateHistoryContributor{} err := r.db.Select(ctx, &contributors, query) if err != nil { zap.L().Error("Error while reading rule state history", zap.Error(err)) @@ -5488,7 +5123,7 @@ func (r *ClickHouseReader) ReadRuleStateHistoryTopContributorsByRuleID( return contributors, nil } -func (r *ClickHouseReader) GetOverallStateTransitions(ctx context.Context, ruleID string, params *v3.QueryRuleStateHistory) ([]v3.ReleStateItem, error) { +func (r *ClickHouseReader) GetOverallStateTransitions(ctx context.Context, ruleID string, params *model.QueryRuleStateHistory) ([]model.ReleStateItem, error) { tmpl := `WITH firing_events AS ( SELECT @@ -5534,18 +5169,18 @@ ORDER BY firing_time ASC;` zap.L().Debug("overall state transitions query", zap.String("query", query)) - transitions := []v3.RuleStateTransition{} + transitions := []model.RuleStateTransition{} err := r.db.Select(ctx, &transitions, query) if err != nil { return nil, err } - stateItems := []v3.ReleStateItem{} + stateItems := []model.ReleStateItem{} for idx, item := range transitions { start := item.FiringTime end := item.ResolutionTime - stateItems = append(stateItems, v3.ReleStateItem{ + stateItems = append(stateItems, model.ReleStateItem{ State: item.State, Start: start, End: end, @@ -5553,7 +5188,7 @@ ORDER BY firing_time ASC;` if idx < len(transitions)-1 { nextStart := transitions[idx+1].FiringTime if nextStart > end { - stateItems = append(stateItems, v3.ReleStateItem{ + stateItems = append(stateItems, model.ReleStateItem{ State: model.StateInactive, Start: end, End: nextStart, @@ -5575,7 +5210,7 @@ ORDER BY firing_time ASC;` if len(transitions) == 0 { // no transitions found, it is either firing or inactive for whole time range - stateItems = append(stateItems, v3.ReleStateItem{ + stateItems = append(stateItems, model.ReleStateItem{ State: state, Start: params.Start, End: params.End, @@ -5583,7 +5218,7 @@ ORDER BY firing_time ASC;` } else { // there were some transitions, we need to add the last state at the end if state == model.StateInactive { - stateItems = append(stateItems, v3.ReleStateItem{ + stateItems = append(stateItems, model.ReleStateItem{ State: model.StateInactive, Start: transitions[len(transitions)-1].ResolutionTime, End: params.End, @@ -5600,12 +5235,12 @@ ORDER BY firing_time ASC;` if err := r.db.QueryRow(ctx, firingQuery).Scan(&firingTime); err != nil { return nil, err } - stateItems = append(stateItems, v3.ReleStateItem{ + stateItems = append(stateItems, model.ReleStateItem{ State: model.StateInactive, Start: transitions[len(transitions)-1].ResolutionTime, End: firingTime, }) - stateItems = append(stateItems, v3.ReleStateItem{ + stateItems = append(stateItems, model.ReleStateItem{ State: model.StateFiring, Start: firingTime, End: params.End, @@ -5615,7 +5250,7 @@ ORDER BY firing_time ASC;` return stateItems, nil } -func (r *ClickHouseReader) GetAvgResolutionTime(ctx context.Context, ruleID string, params *v3.QueryRuleStateHistory) (float64, error) { +func (r *ClickHouseReader) GetAvgResolutionTime(ctx context.Context, ruleID string, params *model.QueryRuleStateHistory) (float64, error) { tmpl := ` WITH firing_events AS ( @@ -5670,7 +5305,7 @@ FROM matched_events; return avgResolutionTime, nil } -func (r *ClickHouseReader) GetAvgResolutionTimeByInterval(ctx context.Context, ruleID string, params *v3.QueryRuleStateHistory) (*v3.Series, error) { +func (r *ClickHouseReader) GetAvgResolutionTimeByInterval(ctx context.Context, ruleID string, params *model.QueryRuleStateHistory) (*v3.Series, error) { step := common.MinAllowedStepInterval(params.Start, params.End) @@ -5727,7 +5362,7 @@ ORDER BY ts ASC;` return result[0], nil } -func (r *ClickHouseReader) GetTotalTriggers(ctx context.Context, ruleID string, params *v3.QueryRuleStateHistory) (uint64, error) { +func (r *ClickHouseReader) GetTotalTriggers(ctx context.Context, ruleID string, params *model.QueryRuleStateHistory) (uint64, error) { query := fmt.Sprintf("SELECT count(*) FROM %s.%s WHERE rule_id = '%s' AND (state_changed = true) AND (state = '%s') AND unix_milli >= %d AND unix_milli <= %d", signozHistoryDBName, ruleStateHistoryTableName, ruleID, model.StateFiring.String(), params.Start, params.End) @@ -5741,7 +5376,7 @@ func (r *ClickHouseReader) GetTotalTriggers(ctx context.Context, ruleID string, return totalTriggers, nil } -func (r *ClickHouseReader) GetTriggersByInterval(ctx context.Context, ruleID string, params *v3.QueryRuleStateHistory) (*v3.Series, error) { +func (r *ClickHouseReader) GetTriggersByInterval(ctx context.Context, ruleID string, params *model.QueryRuleStateHistory) (*v3.Series, error) { step := common.MinAllowedStepInterval(params.Start, params.End) query := fmt.Sprintf("SELECT count(*), toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL %d SECOND) as ts FROM %s.%s WHERE rule_id = '%s' AND (state_changed = true) AND (state = '%s') AND unix_milli >= %d AND unix_milli <= %d GROUP BY ts ORDER BY ts ASC", @@ -5788,6 +5423,6 @@ func (r *ClickHouseReader) ReportQueryStartForProgressTracking( func (r *ClickHouseReader) SubscribeToQueryProgress( queryId string, -) (<-chan v3.QueryProgress, func(), *model.ApiError) { +) (<-chan model.QueryProgress, func(), *model.ApiError) { return r.queryProgressTracker.SubscribeToQueryProgress(queryId) } diff --git a/pkg/query-service/app/dashboards/model.go b/pkg/query-service/app/dashboards/model.go index a6c5d35c9e..989d266b51 100644 --- a/pkg/query-service/app/dashboards/model.go +++ b/pkg/query-service/app/dashboards/model.go @@ -5,19 +5,18 @@ import ( "encoding/base64" "encoding/json" "fmt" - "reflect" "regexp" - "strconv" "strings" "time" "github.com/google/uuid" "github.com/gosimple/slug" "github.com/jmoiron/sqlx" - "github.com/mitchellh/mapstructure" "go.signoz.io/signoz/pkg/query-service/common" "go.signoz.io/signoz/pkg/query-service/interfaces" "go.signoz.io/signoz/pkg/query-service/model" + + "go.signoz.io/signoz/pkg/query-service/telemetry" "go.uber.org/zap" ) @@ -152,6 +151,8 @@ func InitDB(dataSourceName string) (*sqlx.DB, error) { return nil, fmt.Errorf("error in adding column locked to dashboards table: %s", err.Error()) } + telemetry.GetInstance().SetDashboardsInfoCallback(GetDashboardsInfo) + return db, nil } @@ -216,14 +217,6 @@ func CreateDashboard(ctx context.Context, data map[string]interface{}, fm interf return nil, &model.ApiError{Typ: model.ErrorExec, Err: err} } - newCount, _ := countTraceAndLogsPanel(data) - if newCount > 0 { - fErr := checkFeatureUsage(fm, newCount) - if fErr != nil { - return nil, fErr - } - } - result, err := db.Exec("INSERT INTO dashboards (uuid, created_at, created_by, updated_at, updated_by, data) VALUES ($1, $2, $3, $4, $5, $6)", dash.Uuid, dash.CreatedAt, userEmail, dash.UpdatedAt, userEmail, mapData) @@ -237,11 +230,6 @@ func CreateDashboard(ctx context.Context, data map[string]interface{}, fm interf } dash.Id = int(lastInsertId) - traceAndLogsPanelUsage, _ := countTraceAndLogsPanel(data) - if traceAndLogsPanelUsage > 0 { - updateFeatureUsage(fm, traceAndLogsPanelUsage) - } - return dash, nil } @@ -287,11 +275,6 @@ func DeleteDashboard(ctx context.Context, uuid string, fm interfaces.FeatureLook return &model.ApiError{Typ: model.ErrorNotFound, Err: fmt.Errorf("no dashboard found with uuid: %s", uuid)} } - traceAndLogsPanelUsage, _ := countTraceAndLogsPanel(dashboard.Data) - if traceAndLogsPanelUsage > 0 { - updateFeatureUsage(fm, -traceAndLogsPanelUsage) - } - return nil } @@ -329,28 +312,15 @@ func UpdateDashboard(ctx context.Context, uuid string, data map[string]interface } } - // check if the count of trace and logs QB panel has changed, if yes, then check feature flag count - existingCount, existingTotal := countTraceAndLogsPanel(dashboard.Data) - newCount, newTotal := countTraceAndLogsPanel(data) - if newCount > existingCount { - err := checkFeatureUsage(fm, newCount-existingCount) - if err != nil { - return nil, err - } - } + // if the total count of panels has reduced by more than 1, + // return error + existingIds := getWidgetIds(dashboard.Data) + newIds := getWidgetIds(data) - if existingTotal > newTotal && existingTotal-newTotal > 1 { - // if the total count of panels has reduced by more than 1, - // return error - existingIds := getWidgetIds(dashboard.Data) - newIds := getWidgetIds(data) - - differenceIds := getIdDifference(existingIds, newIds) - - if len(differenceIds) > 1 { - return nil, model.BadRequest(fmt.Errorf("deleting more than one panel is not supported")) - } + differenceIds := getIdDifference(existingIds, newIds) + if len(differenceIds) > 1 { + return nil, model.BadRequest(fmt.Errorf("deleting more than one panel is not supported")) } dashboard.UpdatedAt = time.Now() @@ -364,10 +334,6 @@ func UpdateDashboard(ctx context.Context, uuid string, data map[string]interface zap.L().Error("Error in inserting dashboard data", zap.Any("data", data), zap.Error(err)) return nil, &model.ApiError{Typ: model.ErrorExec, Err: err} } - if existingCount != newCount { - // if the count of trace and logs panel has changed, we need to update feature flag count as well - updateFeatureUsage(fm, newCount-existingCount) - } return dashboard, nil } @@ -389,51 +355,6 @@ func LockUnlockDashboard(ctx context.Context, uuid string, lock bool) *model.Api return nil } -func updateFeatureUsage(fm interfaces.FeatureLookup, usage int64) *model.ApiError { - feature, err := fm.GetFeatureFlag(model.QueryBuilderPanels) - if err != nil { - switch err.(type) { - case model.ErrFeatureUnavailable: - zap.L().Error("feature unavailable", zap.String("featureKey", model.QueryBuilderPanels), zap.Error(err)) - return model.BadRequest(err) - default: - zap.L().Error("feature check failed", zap.String("featureKey", model.QueryBuilderPanels), zap.Error(err)) - return model.BadRequest(err) - } - } - feature.Usage += usage - if feature.Usage >= feature.UsageLimit && feature.UsageLimit != -1 { - feature.Active = false - } - if feature.Usage < feature.UsageLimit || feature.UsageLimit == -1 { - feature.Active = true - } - err = fm.UpdateFeatureFlag(feature) - if err != nil { - return model.BadRequest(err) - } - - return nil -} - -func checkFeatureUsage(fm interfaces.FeatureLookup, usage int64) *model.ApiError { - feature, err := fm.GetFeatureFlag(model.QueryBuilderPanels) - if err != nil { - switch err.(type) { - case model.ErrFeatureUnavailable: - zap.L().Error("feature unavailable", zap.String("featureKey", model.QueryBuilderPanels), zap.Error(err)) - return model.BadRequest(err) - default: - zap.L().Error("feature check failed", zap.String("featureKey", model.QueryBuilderPanels), zap.Error(err)) - return model.BadRequest(err) - } - } - if feature.UsageLimit-(feature.Usage+usage) < 0 && feature.UsageLimit != -1 { - return model.BadRequest(fmt.Errorf("feature usage exceeded")) - } - return nil -} - // UpdateSlug updates the slug func (d *Dashboard) UpdateSlug() { var title string @@ -469,242 +390,151 @@ func SlugifyTitle(title string) string { return s } -func widgetFromPanel(panel model.Panels, idx int, variables map[string]model.Variable) *model.Widget { - widget := model.Widget{ - Description: panel.Description, - ID: strconv.Itoa(idx), - IsStacked: false, - NullZeroValues: "zero", - Opacity: "1", - PanelTypes: "TIME_SERIES", // TODO: Need to figure out how to get this - Query: model.Query{ - ClickHouse: []model.ClickHouseQueryDashboard{ - { - Disabled: false, - Legend: "", - Name: "A", - Query: "", - }, - }, - MetricsBuilder: model.MetricsBuilder{ - Formulas: []string{}, - QueryBuilder: []model.QueryBuilder{ - { - AggregateOperator: 1, - Disabled: false, - GroupBy: []string{}, - Legend: "", - MetricName: "", - Name: "A", - ReduceTo: 1, - }, - }, - }, - PromQL: []model.PromQueryDashboard{}, - QueryType: int(model.PROM), - }, - QueryData: model.QueryDataDashboard{ - Data: model.Data{ - QueryData: []interface{}{}, - }, - }, - Title: panel.Title, - YAxisUnit: panel.FieldConfig.Defaults.Unit, - QueryType: int(model.PROM), // TODO: Supprot for multiple query types - } - for _, target := range panel.Targets { - if target.Expr != "" { - for name := range variables { - target.Expr = strings.ReplaceAll(target.Expr, "$"+name, "{{"+"."+name+"}}") - target.Expr = strings.ReplaceAll(target.Expr, "$"+"__rate_interval", "5m") - } +func getWidgetIds(data map[string]interface{}) []string { + widgetIds := []string{} + if data != nil && data["widgets"] != nil { + widgets, ok := data["widgets"] + if ok { + data, ok := widgets.([]interface{}) + if ok { + for _, widget := range data { + sData, ok := widget.(map[string]interface{}) + if ok && sData["query"] != nil && sData["id"] != nil { + id, ok := sData["id"].(string) + + if ok { + widgetIds = append(widgetIds, id) + } - // prometheus receiver in collector maps job,instance as service_name,service_instance_id - target.Expr = instanceEQRE.ReplaceAllString(target.Expr, "service_instance_id=\"{{.instance}}\"") - target.Expr = nodeEQRE.ReplaceAllString(target.Expr, "service_instance_id=\"{{.node}}\"") - target.Expr = jobEQRE.ReplaceAllString(target.Expr, "service_name=\"{{.job}}\"") - target.Expr = instanceRERE.ReplaceAllString(target.Expr, "service_instance_id=~\"{{.instance}}\"") - target.Expr = nodeRERE.ReplaceAllString(target.Expr, "service_instance_id=~\"{{.node}}\"") - target.Expr = jobRERE.ReplaceAllString(target.Expr, "service_name=~\"{{.job}}\"") - - widget.Query.PromQL = append( - widget.Query.PromQL, - model.PromQueryDashboard{ - Disabled: false, - Legend: target.LegendFormat, - Name: target.RefID, - Query: target.Expr, - }, - ) + } + } + } } } - return &widget + return widgetIds } -func TransformGrafanaJSONToSignoz(grafanaJSON model.GrafanaJSON) model.DashboardData { - var toReturn model.DashboardData - toReturn.Title = grafanaJSON.Title - toReturn.Tags = grafanaJSON.Tags - toReturn.Variables = make(map[string]model.Variable) - - for templateIdx, template := range grafanaJSON.Templating.List { - var sort, typ, textboxValue, customValue, queryValue string - if template.Sort == 1 { - sort = "ASC" - } else if template.Sort == 2 { - sort = "DESC" - } else { - sort = "DISABLED" - } +func getIdDifference(existingIds []string, newIds []string) []string { + // Convert newIds array to a map for faster lookups + newIdsMap := make(map[string]bool) + for _, id := range newIds { + newIdsMap[id] = true + } - if template.Type == "query" { - if template.Datasource == nil { - zap.L().Warn("Skipping panel as it has no datasource", zap.Int("templateIdx", templateIdx)) - continue - } - // Skip if the source is not prometheus - source, stringOk := template.Datasource.(string) - if stringOk && !strings.Contains(strings.ToLower(source), "prometheus") { - zap.L().Warn("Skipping template as it is not prometheus", zap.Int("templateIdx", templateIdx)) - continue - } - var result model.Datasource - var structOk bool - if reflect.TypeOf(template.Datasource).Kind() == reflect.Map { - err := mapstructure.Decode(template.Datasource, &result) - if err == nil { - structOk = true - } - } - if result.Type != "prometheus" && result.Type != "" { - zap.L().Warn("Skipping template as it is not prometheus", zap.Int("templateIdx", templateIdx)) - continue - } + // Initialize a map to keep track of elements in the difference array + differenceMap := make(map[string]bool) - if !stringOk && !structOk { - zap.L().Warn("Didn't recognize source, skipping") - continue - } - typ = "QUERY" - } else if template.Type == "custom" { - typ = "CUSTOM" - } else if template.Type == "textbox" { - typ = "TEXTBOX" - text, ok := template.Current.Text.(string) - if ok { - textboxValue = text - } - array, ok := template.Current.Text.([]string) - if ok { - textboxValue = strings.Join(array, ",") - } - } else { - continue + // Initialize the difference array + difference := []string{} + + // Iterate through existingIds + for _, id := range existingIds { + // If the id is not found in newIds, and it's not already in the difference array + if _, found := newIdsMap[id]; !found && !differenceMap[id] { + difference = append(difference, id) + differenceMap[id] = true // Mark the id as seen in the difference array } + } - var selectedValue string - text, ok := template.Current.Value.(string) - if ok { - selectedValue = text + return difference +} + +// GetDashboardsInfo returns analytics data for dashboards +func GetDashboardsInfo(ctx context.Context) (*model.DashboardsInfo, error) { + dashboardsInfo := model.DashboardsInfo{} + // fetch dashboards from dashboard db + query := "SELECT data FROM dashboards" + var dashboardsData []Dashboard + err := db.Select(&dashboardsData, query) + if err != nil { + zap.L().Error("Error in processing sql query", zap.Error(err)) + return &dashboardsInfo, err + } + totalDashboardsWithPanelAndName := 0 + var dashboardNames []string + count := 0 + logChQueriesCount := 0 + for _, dashboard := range dashboardsData { + if isDashboardWithPanelAndName(dashboard.Data) { + totalDashboardsWithPanelAndName = totalDashboardsWithPanelAndName + 1 } - array, ok := template.Current.Value.([]string) - if ok { - selectedValue = strings.Join(array, ",") + dashboardName := extractDashboardName(dashboard.Data) + if dashboardName != "" { + dashboardNames = append(dashboardNames, dashboardName) } - - toReturn.Variables[template.Name] = model.Variable{ - AllSelected: false, - CustomValue: customValue, - Description: template.Label, - MultiSelect: template.Multi, - QueryValue: queryValue, - SelectedValue: selectedValue, - ShowALLOption: template.IncludeAll, - Sort: sort, - TextboxValue: textboxValue, - Type: typ, + dashboardInfo := countPanelsInDashboard(dashboard.Data) + dashboardsInfo.LogsBasedPanels += dashboardInfo.LogsBasedPanels + dashboardsInfo.TracesBasedPanels += dashboardInfo.TracesBasedPanels + dashboardsInfo.MetricBasedPanels += dashboardsInfo.MetricBasedPanels + if isDashboardWithTSV2(dashboard.Data) { + count = count + 1 + } + if isDashboardWithLogsClickhouseQuery(dashboard.Data) { + logChQueriesCount = logChQueriesCount + 1 } } - row := 0 - idx := 0 - for _, panel := range grafanaJSON.Panels { - if panel.Type == "row" { - if panel.Panels != nil && len(panel.Panels) > 0 { - for _, innerPanel := range panel.Panels { - if idx%3 == 0 { - row++ - } - toReturn.Layout = append( - toReturn.Layout, - model.Layout{ - X: idx % 3 * 4, - Y: row * 3, - W: 4, - H: 3, - I: strconv.Itoa(idx), - }, - ) - - toReturn.Widgets = append(toReturn.Widgets, *widgetFromPanel(innerPanel, idx, toReturn.Variables)) - idx++ - } - } - continue - } - if panel.Datasource == nil { - zap.L().Warn("Skipping panel as it has no datasource", zap.Int("idx", idx)) - continue - } - // Skip if the datasource is not prometheus - source, stringOk := panel.Datasource.(string) - if stringOk && !strings.Contains(strings.ToLower(source), "prometheus") { - zap.L().Warn("Skipping panel as it is not prometheus", zap.Int("idx", idx)) - continue + dashboardsInfo.DashboardNames = dashboardNames + dashboardsInfo.TotalDashboards = len(dashboardsData) + dashboardsInfo.TotalDashboardsWithPanelAndName = totalDashboardsWithPanelAndName + dashboardsInfo.QueriesWithTSV2 = count + dashboardsInfo.DashboardsWithLogsChQuery = logChQueriesCount + return &dashboardsInfo, nil +} + +func isDashboardWithTSV2(data map[string]interface{}) bool { + jsonData, err := json.Marshal(data) + if err != nil { + return false + } + return strings.Contains(string(jsonData), "time_series_v2") +} + +func isDashboardWithLogsClickhouseQuery(data map[string]interface{}) bool { + jsonData, err := json.Marshal(data) + if err != nil { + return false + } + result := strings.Contains(string(jsonData), "signoz_logs.distributed_logs ") || + strings.Contains(string(jsonData), "signoz_logs.logs ") + return result +} + +func isDashboardWithPanelAndName(data map[string]interface{}) bool { + isDashboardName := false + isDashboardWithPanelAndName := false + if data != nil && data["title"] != nil && data["widgets"] != nil { + title, ok := data["title"].(string) + if ok && title != "Sample Title" { + isDashboardName = true } - var result model.Datasource - var structOk bool - if reflect.TypeOf(panel.Datasource).Kind() == reflect.Map { - err := mapstructure.Decode(panel.Datasource, &result) - if err == nil { - structOk = true + widgets, ok := data["widgets"] + if ok && isDashboardName { + data, ok := widgets.([]interface{}) + if ok && len(data) > 0 { + isDashboardWithPanelAndName = true } } - if result.Type != "prometheus" && result.Type != "" { - zap.L().Warn("Skipping panel as it is not prometheus", zap.Int("idx", idx)) - continue - } + } - if !stringOk && !structOk { - zap.L().Warn("Didn't recognize source, skipping") - continue - } + return isDashboardWithPanelAndName +} - // Create a panel from "gridPos" +func extractDashboardName(data map[string]interface{}) string { - if idx%3 == 0 { - row++ + if data != nil && data["title"] != nil { + title, ok := data["title"].(string) + if ok { + return title } - toReturn.Layout = append( - toReturn.Layout, - model.Layout{ - X: idx % 3 * 4, - Y: row * 3, - W: 4, - H: 3, - I: strconv.Itoa(idx), - }, - ) - - toReturn.Widgets = append(toReturn.Widgets, *widgetFromPanel(panel, idx, toReturn.Variables)) - idx++ - } - return toReturn + } + + return "" } -func countTraceAndLogsPanel(data map[string]interface{}) (int64, int64) { - count := int64(0) - totalPanels := int64(0) +func countPanelsInDashboard(data map[string]interface{}) model.DashboardsInfo { + var logsPanelCount, tracesPanelCount, metricsPanelCount int + // totalPanels := 0 if data != nil && data["widgets"] != nil { widgets, ok := data["widgets"] if ok { @@ -713,7 +543,7 @@ func countTraceAndLogsPanel(data map[string]interface{}) (int64, int64) { for _, widget := range data { sData, ok := widget.(map[string]interface{}) if ok && sData["query"] != nil { - totalPanels++ + // totalPanels++ query, ok := sData["query"].(map[string]interface{}) if ok && query["queryType"] == "builder" && query["builder"] != nil { builderData, ok := query["builder"].(map[string]interface{}) @@ -723,8 +553,12 @@ func countTraceAndLogsPanel(data map[string]interface{}) (int64, int64) { for _, queryData := range builderQueryData { data, ok := queryData.(map[string]interface{}) if ok { - if data["dataSource"] == "traces" || data["dataSource"] == "logs" { - count++ + if data["dataSource"] == "traces" { + tracesPanelCount++ + } else if data["dataSource"] == "metrics" { + metricsPanelCount++ + } else if data["dataSource"] == "logs" { + logsPanelCount++ } } } @@ -736,54 +570,9 @@ func countTraceAndLogsPanel(data map[string]interface{}) (int64, int64) { } } } - return count, totalPanels -} - -func getWidgetIds(data map[string]interface{}) []string { - widgetIds := []string{} - if data != nil && data["widgets"] != nil { - widgets, ok := data["widgets"] - if ok { - data, ok := widgets.([]interface{}) - if ok { - for _, widget := range data { - sData, ok := widget.(map[string]interface{}) - if ok && sData["query"] != nil && sData["id"] != nil { - id, ok := sData["id"].(string) - - if ok { - widgetIds = append(widgetIds, id) - } - - } - } - } - } - } - return widgetIds -} - -func getIdDifference(existingIds []string, newIds []string) []string { - // Convert newIds array to a map for faster lookups - newIdsMap := make(map[string]bool) - for _, id := range newIds { - newIdsMap[id] = true - } - - // Initialize a map to keep track of elements in the difference array - differenceMap := make(map[string]bool) - - // Initialize the difference array - difference := []string{} - - // Iterate through existingIds - for _, id := range existingIds { - // If the id is not found in newIds, and it's not already in the difference array - if _, found := newIdsMap[id]; !found && !differenceMap[id] { - difference = append(difference, id) - differenceMap[id] = true // Mark the id as seen in the difference array - } + return model.DashboardsInfo{ + LogsBasedPanels: logsPanelCount, + TracesBasedPanels: tracesPanelCount, + MetricBasedPanels: metricsPanelCount, } - - return difference } diff --git a/pkg/query-service/app/explorer/db.go b/pkg/query-service/app/explorer/db.go index e0fbee4e51..140b0b48d8 100644 --- a/pkg/query-service/app/explorer/db.go +++ b/pkg/query-service/app/explorer/db.go @@ -10,7 +10,10 @@ import ( "github.com/google/uuid" "github.com/jmoiron/sqlx" "go.signoz.io/signoz/pkg/query-service/auth" + "go.signoz.io/signoz/pkg/query-service/model" v3 "go.signoz.io/signoz/pkg/query-service/model/v3" + "go.signoz.io/signoz/pkg/query-service/telemetry" + "go.uber.org/zap" ) var db *sqlx.DB @@ -57,6 +60,8 @@ func InitWithDSN(dataSourceName string) (*sqlx.DB, error) { return nil, fmt.Errorf("error in creating saved views table: %s", err.Error()) } + telemetry.GetInstance().SetSavedViewsInfoCallback(GetSavedViewsInfo) + return db, nil } @@ -228,3 +233,21 @@ func DeleteView(uuid_ string) error { } return nil } + +func GetSavedViewsInfo(ctx context.Context) (*model.SavedViewsInfo, error) { + savedViewsInfo := model.SavedViewsInfo{} + savedViews, err := GetViews() + if err != nil { + zap.S().Debug("Error in fetching saved views info: ", err) + return &savedViewsInfo, err + } + savedViewsInfo.TotalSavedViews = len(savedViews) + for _, view := range savedViews { + if view.SourcePage == "traces" { + savedViewsInfo.TracesSavedViews += 1 + } else if view.SourcePage == "logs" { + savedViewsInfo.LogsSavedViews += 1 + } + } + return &savedViewsInfo, nil +} diff --git a/pkg/query-service/app/http_handler.go b/pkg/query-service/app/http_handler.go index d347bf576e..219181dc7f 100644 --- a/pkg/query-service/app/http_handler.go +++ b/pkg/query-service/app/http_handler.go @@ -29,6 +29,7 @@ import ( "go.signoz.io/signoz/pkg/query-service/app/integrations" "go.signoz.io/signoz/pkg/query-service/app/logs" logsv3 "go.signoz.io/signoz/pkg/query-service/app/logs/v3" + logsv4 "go.signoz.io/signoz/pkg/query-service/app/logs/v4" "go.signoz.io/signoz/pkg/query-service/app/metrics" metricsv3 "go.signoz.io/signoz/pkg/query-service/app/metrics/v3" "go.signoz.io/signoz/pkg/query-service/app/preferences" @@ -40,6 +41,7 @@ import ( "go.signoz.io/signoz/pkg/query-service/cache" "go.signoz.io/signoz/pkg/query-service/common" "go.signoz.io/signoz/pkg/query-service/constants" + "go.signoz.io/signoz/pkg/query-service/contextlinks" v3 "go.signoz.io/signoz/pkg/query-service/model/v3" "go.signoz.io/signoz/pkg/query-service/postprocess" @@ -143,14 +145,14 @@ type APIHandlerOpts struct { // Querier Influx Interval FluxInterval time.Duration - // Use new schema + // Use Logs New schema UseLogsNewSchema bool } // NewAPIHandler returns an APIHandler func NewAPIHandler(opts APIHandlerOpts) (*APIHandler, error) { - alertManager, err := am.New("") + alertManager, err := am.New() if err != nil { return nil, err } @@ -195,10 +197,15 @@ func NewAPIHandler(opts APIHandlerOpts) (*APIHandler, error) { UseLogsNewSchema: opts.UseLogsNewSchema, } + logsQueryBuilder := logsv3.PrepareLogsQuery + if opts.UseLogsNewSchema { + logsQueryBuilder = logsv4.PrepareLogsQuery + } + builderOpts := queryBuilder.QueryBuilderOptions{ BuildMetricQuery: metricsv3.PrepareMetricQuery, BuildTraceQuery: tracesV3.PrepareTracesQuery, - BuildLogQuery: logsv3.PrepareLogsQuery, + BuildLogQuery: logsQueryBuilder, } aH.queryBuilder = queryBuilder.NewQueryBuilder(builderOpts, aH.featureFlags) @@ -390,11 +397,9 @@ func (aH *APIHandler) RegisterRoutes(router *mux.Router, am *AuthMiddleware) { router.HandleFunc("/api/v1/dashboards", am.ViewAccess(aH.getDashboards)).Methods(http.MethodGet) router.HandleFunc("/api/v1/dashboards", am.EditAccess(aH.createDashboards)).Methods(http.MethodPost) - router.HandleFunc("/api/v1/dashboards/grafana", am.EditAccess(aH.createDashboardsTransform)).Methods(http.MethodPost) router.HandleFunc("/api/v1/dashboards/{uuid}", am.ViewAccess(aH.getDashboard)).Methods(http.MethodGet) router.HandleFunc("/api/v1/dashboards/{uuid}", am.EditAccess(aH.updateDashboard)).Methods(http.MethodPut) router.HandleFunc("/api/v1/dashboards/{uuid}", am.EditAccess(aH.deleteDashboard)).Methods(http.MethodDelete) - router.HandleFunc("/api/v1/variables/query", am.ViewAccess(aH.queryDashboardVars)).Methods(http.MethodGet) router.HandleFunc("/api/v2/variables/query", am.ViewAccess(aH.queryDashboardVarsV2)).Methods(http.MethodPost) router.HandleFunc("/api/v1/explorer/views", am.ViewAccess(aH.getSavedViews)).Methods(http.MethodGet) @@ -665,7 +670,7 @@ func (aH *APIHandler) deleteDowntimeSchedule(w http.ResponseWriter, r *http.Requ func (aH *APIHandler) getRuleStats(w http.ResponseWriter, r *http.Request) { ruleID := mux.Vars(r)["id"] - params := v3.QueryRuleStateHistory{} + params := model.QueryRuleStateHistory{} err := json.NewDecoder(r.Body).Decode(¶ms) if err != nil { RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, nil) @@ -731,7 +736,7 @@ func (aH *APIHandler) getRuleStats(w http.ResponseWriter, r *http.Request) { pastAvgResolutionTime = 0 } - stats := v3.Stats{ + stats := model.Stats{ TotalCurrentTriggers: totalCurrentTriggers, TotalPastTriggers: totalPastTriggers, CurrentTriggersSeries: currentTriggersSeries, @@ -747,7 +752,7 @@ func (aH *APIHandler) getRuleStats(w http.ResponseWriter, r *http.Request) { func (aH *APIHandler) getOverallStateTransitions(w http.ResponseWriter, r *http.Request) { ruleID := mux.Vars(r)["id"] - params := v3.QueryRuleStateHistory{} + params := model.QueryRuleStateHistory{} err := json.NewDecoder(r.Body).Decode(¶ms) if err != nil { RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, nil) @@ -763,9 +768,51 @@ func (aH *APIHandler) getOverallStateTransitions(w http.ResponseWriter, r *http. aH.Respond(w, stateItems) } +func (aH *APIHandler) metaForLinks(ctx context.Context, rule *rules.GettableRule) ([]v3.FilterItem, []v3.AttributeKey, map[string]v3.AttributeKey) { + filterItems := []v3.FilterItem{} + groupBy := []v3.AttributeKey{} + keys := make(map[string]v3.AttributeKey) + + if rule.AlertType == rules.AlertTypeLogs { + logFields, err := aH.reader.GetLogFields(ctx) + if err == nil { + params := &v3.QueryRangeParamsV3{ + CompositeQuery: rule.RuleCondition.CompositeQuery, + } + keys = model.GetLogFieldsV3(ctx, params, logFields) + } else { + zap.L().Error("failed to get log fields using empty keys; the link might not work as expected", zap.Error(err)) + } + } else if rule.AlertType == rules.AlertTypeTraces { + traceFields, err := aH.reader.GetSpanAttributeKeys(ctx) + if err == nil { + keys = traceFields + } else { + zap.L().Error("failed to get span attributes using empty keys; the link might not work as expected", zap.Error(err)) + } + } + + if rule.AlertType == rules.AlertTypeLogs || rule.AlertType == rules.AlertTypeTraces { + if rule.RuleCondition.CompositeQuery != nil { + if rule.RuleCondition.QueryType() == v3.QueryTypeBuilder { + selectedQuery := rule.RuleCondition.GetSelectedQueryName() + if rule.RuleCondition.CompositeQuery.BuilderQueries[selectedQuery] != nil && + rule.RuleCondition.CompositeQuery.BuilderQueries[selectedQuery].Filters != nil { + filterItems = rule.RuleCondition.CompositeQuery.BuilderQueries[selectedQuery].Filters.Items + } + if rule.RuleCondition.CompositeQuery.BuilderQueries[selectedQuery] != nil && + rule.RuleCondition.CompositeQuery.BuilderQueries[selectedQuery].GroupBy != nil { + groupBy = rule.RuleCondition.CompositeQuery.BuilderQueries[selectedQuery].GroupBy + } + } + } + } + return filterItems, groupBy, keys +} + func (aH *APIHandler) getRuleStateHistory(w http.ResponseWriter, r *http.Request) { ruleID := mux.Vars(r)["id"] - params := v3.QueryRuleStateHistory{} + params := model.QueryRuleStateHistory{} err := json.NewDecoder(r.Body).Decode(¶ms) if err != nil { RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, nil) @@ -790,24 +837,18 @@ func (aH *APIHandler) getRuleStateHistory(w http.ResponseWriter, r *http.Request if err != nil { continue } - filterItems := []v3.FilterItem{} - if rule.AlertType == rules.AlertTypeLogs || rule.AlertType == rules.AlertTypeTraces { - if rule.RuleCondition.CompositeQuery != nil { - if rule.RuleCondition.QueryType() == v3.QueryTypeBuilder { - for _, query := range rule.RuleCondition.CompositeQuery.BuilderQueries { - if query.Filters != nil && len(query.Filters.Items) > 0 { - filterItems = append(filterItems, query.Filters.Items...) - } - } - } - } - } - newFilters := common.PrepareFilters(lbls, filterItems) - ts := time.Unix(res.Items[idx].UnixMilli/1000, 0) + filterItems, groupBy, keys := aH.metaForLinks(r.Context(), rule) + newFilters := contextlinks.PrepareFilters(lbls, filterItems, groupBy, keys) + end := time.Unix(res.Items[idx].UnixMilli/1000, 0) + // why are we subtracting 3 minutes? + // the query range is calculated based on the rule's evalWindow and evalDelay + // alerts have 2 minutes delay built in, so we need to subtract that from the start time + // to get the correct query range + start := end.Add(-time.Duration(rule.EvalWindow)).Add(-3 * time.Minute) if rule.AlertType == rules.AlertTypeLogs { - res.Items[idx].RelatedLogsLink = common.PrepareLinksToLogs(ts, newFilters) + res.Items[idx].RelatedLogsLink = contextlinks.PrepareLinksToLogs(start, end, newFilters) } else if rule.AlertType == rules.AlertTypeTraces { - res.Items[idx].RelatedTracesLink = common.PrepareLinksToTraces(ts, newFilters) + res.Items[idx].RelatedTracesLink = contextlinks.PrepareLinksToTraces(start, end, newFilters) } } } @@ -817,7 +858,7 @@ func (aH *APIHandler) getRuleStateHistory(w http.ResponseWriter, r *http.Request func (aH *APIHandler) getRuleStateHistoryTopContributors(w http.ResponseWriter, r *http.Request) { ruleID := mux.Vars(r)["id"] - params := v3.QueryRuleStateHistory{} + params := model.QueryRuleStateHistory{} err := json.NewDecoder(r.Body).Decode(¶ms) if err != nil { RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, nil) @@ -838,12 +879,14 @@ func (aH *APIHandler) getRuleStateHistoryTopContributors(w http.ResponseWriter, if err != nil { continue } - ts := time.Unix(params.End/1000, 0) - filters := common.PrepareFilters(lbls, nil) + filterItems, groupBy, keys := aH.metaForLinks(r.Context(), rule) + newFilters := contextlinks.PrepareFilters(lbls, filterItems, groupBy, keys) + end := time.Unix(params.End/1000, 0) + start := time.Unix(params.Start/1000, 0) if rule.AlertType == rules.AlertTypeLogs { - res[idx].RelatedLogsLink = common.PrepareLinksToLogs(ts, filters) + res[idx].RelatedLogsLink = contextlinks.PrepareLinksToLogs(start, end, newFilters) } else if rule.AlertType == rules.AlertTypeTraces { - res[idx].RelatedTracesLink = common.PrepareLinksToTraces(ts, filters) + res[idx].RelatedTracesLink = contextlinks.PrepareLinksToTraces(start, end, newFilters) } } } @@ -935,25 +978,6 @@ func (aH *APIHandler) deleteDashboard(w http.ResponseWriter, r *http.Request) { } -func (aH *APIHandler) queryDashboardVars(w http.ResponseWriter, r *http.Request) { - - query := r.URL.Query().Get("query") - if query == "" { - RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: fmt.Errorf("query is required")}, nil) - return - } - if strings.Contains(strings.ToLower(query), "alter table") { - RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: fmt.Errorf("query shouldn't alter data")}, nil) - return - } - dashboardVars, err := aH.reader.QueryDashboardVars(r.Context(), query) - if err != nil { - RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, nil) - return - } - aH.Respond(w, dashboardVars) -} - func prepareQuery(r *http.Request) (string, error) { var postData *model.DashboardVars @@ -1066,44 +1090,6 @@ func (aH *APIHandler) getDashboard(w http.ResponseWriter, r *http.Request) { } -func (aH *APIHandler) saveAndReturn(w http.ResponseWriter, r *http.Request, signozDashboard model.DashboardData) { - toSave := make(map[string]interface{}) - toSave["title"] = signozDashboard.Title - toSave["description"] = signozDashboard.Description - toSave["tags"] = signozDashboard.Tags - toSave["layout"] = signozDashboard.Layout - toSave["widgets"] = signozDashboard.Widgets - toSave["variables"] = signozDashboard.Variables - - dashboard, apiError := dashboards.CreateDashboard(r.Context(), toSave, aH.featureFlags) - if apiError != nil { - RespondError(w, apiError, nil) - return - } - aH.Respond(w, dashboard) -} - -func (aH *APIHandler) createDashboardsTransform(w http.ResponseWriter, r *http.Request) { - - defer r.Body.Close() - b, err := io.ReadAll(r.Body) - - if err != nil { - RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, "Error reading request body") - return - } - - var importData model.GrafanaJSON - - err = json.Unmarshal(b, &importData) - if err == nil { - signozDashboard := dashboards.TransformGrafanaJSONToSignoz(importData) - aH.saveAndReturn(w, r, signozDashboard) - return - } - RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, "Error while creating dashboard from grafana json") -} - func (aH *APIHandler) createDashboards(w http.ResponseWriter, r *http.Request) { var postData map[string]interface{} @@ -1218,7 +1204,7 @@ func (aH *APIHandler) editRule(w http.ResponseWriter, r *http.Request) { func (aH *APIHandler) getChannel(w http.ResponseWriter, r *http.Request) { id := mux.Vars(r)["id"] - channel, apiErrorObj := aH.reader.GetChannel(id) + channel, apiErrorObj := aH.ruleManager.RuleDB().GetChannel(id) if apiErrorObj != nil { RespondError(w, apiErrorObj, nil) return @@ -1228,7 +1214,7 @@ func (aH *APIHandler) getChannel(w http.ResponseWriter, r *http.Request) { func (aH *APIHandler) deleteChannel(w http.ResponseWriter, r *http.Request) { id := mux.Vars(r)["id"] - apiErrorObj := aH.reader.DeleteChannel(id) + apiErrorObj := aH.ruleManager.RuleDB().DeleteChannel(id) if apiErrorObj != nil { RespondError(w, apiErrorObj, nil) return @@ -1237,7 +1223,7 @@ func (aH *APIHandler) deleteChannel(w http.ResponseWriter, r *http.Request) { } func (aH *APIHandler) listChannels(w http.ResponseWriter, r *http.Request) { - channels, apiErrorObj := aH.reader.GetChannels() + channels, apiErrorObj := aH.ruleManager.RuleDB().GetChannels() if apiErrorObj != nil { RespondError(w, apiErrorObj, nil) return @@ -1290,7 +1276,7 @@ func (aH *APIHandler) editChannel(w http.ResponseWriter, r *http.Request) { return } - _, apiErrorObj := aH.reader.EditChannel(receiver, id) + _, apiErrorObj := aH.ruleManager.RuleDB().EditChannel(receiver, id) if apiErrorObj != nil { RespondError(w, apiErrorObj, nil) @@ -1318,7 +1304,7 @@ func (aH *APIHandler) createChannel(w http.ResponseWriter, r *http.Request) { return } - _, apiErrorObj := aH.reader.CreateChannel(receiver) + _, apiErrorObj := aH.ruleManager.RuleDB().CreateChannel(receiver) if apiErrorObj != nil { RespondError(w, apiErrorObj, nil) @@ -2521,7 +2507,7 @@ func (aH *APIHandler) getNetworkData( var result []*v3.Result var errQueriesByName map[string]error - result, errQueriesByName, err = aH.querierV2.QueryRange(r.Context(), queryRangeParams, nil) + result, errQueriesByName, err = aH.querierV2.QueryRange(r.Context(), queryRangeParams) if err != nil { apiErrObj := &model.ApiError{Typ: model.ErrorBadData, Err: err} RespondError(w, apiErrObj, errQueriesByName) @@ -2556,7 +2542,7 @@ func (aH *APIHandler) getNetworkData( return } - resultFetchLatency, errQueriesByNameFetchLatency, err := aH.querierV2.QueryRange(r.Context(), queryRangeParams, nil) + resultFetchLatency, errQueriesByNameFetchLatency, err := aH.querierV2.QueryRange(r.Context(), queryRangeParams) if err != nil { apiErrObj := &model.ApiError{Typ: model.ErrorBadData, Err: err} RespondError(w, apiErrObj, errQueriesByNameFetchLatency) @@ -2617,7 +2603,7 @@ func (aH *APIHandler) getProducerData( var result []*v3.Result var errQuriesByName map[string]error - result, errQuriesByName, err = aH.querierV2.QueryRange(r.Context(), queryRangeParams, nil) + result, errQuriesByName, err = aH.querierV2.QueryRange(r.Context(), queryRangeParams) if err != nil { apiErrObj := &model.ApiError{Typ: model.ErrorBadData, Err: err} RespondError(w, apiErrObj, errQuriesByName) @@ -2658,7 +2644,7 @@ func (aH *APIHandler) getConsumerData( var result []*v3.Result var errQuriesByName map[string]error - result, errQuriesByName, err = aH.querierV2.QueryRange(r.Context(), queryRangeParams, nil) + result, errQuriesByName, err = aH.querierV2.QueryRange(r.Context(), queryRangeParams) if err != nil { apiErrObj := &model.ApiError{Typ: model.ErrorBadData, Err: err} RespondError(w, apiErrObj, errQuriesByName) @@ -3020,7 +3006,7 @@ func (aH *APIHandler) calculateLogsConnectionStatus( }, } queryRes, _, err := aH.querier.QueryRange( - ctx, qrParams, map[string]v3.AttributeKey{}, + ctx, qrParams, ) if err != nil { return nil, model.InternalError(fmt.Errorf( @@ -3563,55 +3549,6 @@ func (aH *APIHandler) autoCompleteAttributeValues(w http.ResponseWriter, r *http aH.Respond(w, response) } -func (aH *APIHandler) getLogFieldsV3(ctx context.Context, queryRangeParams *v3.QueryRangeParamsV3) (map[string]v3.AttributeKey, error) { - data := map[string]v3.AttributeKey{} - for _, query := range queryRangeParams.CompositeQuery.BuilderQueries { - if query.DataSource == v3.DataSourceLogs { - fields, apiError := aH.reader.GetLogFields(ctx) - if apiError != nil { - return nil, apiError.Err - } - - // top level fields meta will always be present in the frontend. (can be support for that as enchancement) - getType := func(t string) (v3.AttributeKeyType, bool) { - if t == "attributes" { - return v3.AttributeKeyTypeTag, false - } else if t == "resources" { - return v3.AttributeKeyTypeResource, false - } - return "", true - } - - for _, selectedField := range fields.Selected { - fieldType, pass := getType(selectedField.Type) - if pass { - continue - } - data[selectedField.Name] = v3.AttributeKey{ - Key: selectedField.Name, - Type: fieldType, - DataType: v3.AttributeKeyDataType(strings.ToLower(selectedField.DataType)), - IsColumn: true, - } - } - for _, interestingField := range fields.Interesting { - fieldType, pass := getType(interestingField.Type) - if pass { - continue - } - data[interestingField.Name] = v3.AttributeKey{ - Key: interestingField.Name, - Type: fieldType, - DataType: v3.AttributeKeyDataType(strings.ToLower(interestingField.DataType)), - IsColumn: false, - } - } - break - } - } - return data, nil -} - func (aH *APIHandler) getSpanKeysV3(ctx context.Context, queryRangeParams *v3.QueryRangeParamsV3) (map[string]v3.AttributeKey, error) { data := map[string]v3.AttributeKey{} for _, query := range queryRangeParams.CompositeQuery.BuilderQueries { @@ -3653,14 +3590,14 @@ func (aH *APIHandler) queryRangeV3(ctx context.Context, queryRangeParams *v3.Que if queryRangeParams.CompositeQuery.QueryType == v3.QueryTypeBuilder { // check if any enrichment is required for logs if yes then enrich them if logsv3.EnrichmentRequired(queryRangeParams) { - // get the fields if any logs query is present - var fields map[string]v3.AttributeKey - fields, err = aH.getLogFieldsV3(ctx, queryRangeParams) + logsFields, err := aH.reader.GetLogFields(ctx) if err != nil { apiErrObj := &model.ApiError{Typ: model.ErrorInternal, Err: err} RespondError(w, apiErrObj, errQuriesByName) return } + // get the fields if any logs query is present + fields := model.GetLogFieldsV3(ctx, queryRangeParams, logsFields) logsv3.Enrich(queryRangeParams, fields) } @@ -3670,13 +3607,14 @@ func (aH *APIHandler) queryRangeV3(ctx context.Context, queryRangeParams *v3.Que RespondError(w, apiErrObj, errQuriesByName) return } + tracesV3.Enrich(queryRangeParams, spanKeys) } // WARN: Only works for AND operator in traces query if queryRangeParams.CompositeQuery.QueryType == v3.QueryTypeBuilder { // check if traceID is used as filter (with equal/similar operator) in traces query if yes add timestamp filter to queryRange params isUsed, traceIDs := tracesV3.TraceIdFilterUsedWithEqual(queryRangeParams) - if isUsed == true && len(traceIDs) > 0 { + if isUsed && len(traceIDs) > 0 { zap.L().Debug("traceID used as filter in traces query") // query signoz_spans table with traceID to get min and max timestamp min, max, err := aH.reader.GetMinAndMaxTimestampForTraceID(ctx, traceIDs) @@ -3691,26 +3629,34 @@ func (aH *APIHandler) queryRangeV3(ctx context.Context, queryRangeParams *v3.Que // Hook up query progress tracking if requested queryIdHeader := r.Header.Get("X-SIGNOZ-QUERY-ID") if len(queryIdHeader) > 0 { - ctx = context.WithValue(ctx, "queryId", queryIdHeader) - onQueryFinished, err := aH.reader.ReportQueryStartForProgressTracking(queryIdHeader) + if err != nil { zap.L().Error( "couldn't report query start for progress tracking", zap.String("queryId", queryIdHeader), zap.Error(err), ) + } else { + // Adding queryId to the context signals clickhouse queries to report progress + //lint:ignore SA1029 ignore for now + ctx = context.WithValue(ctx, "queryId", queryIdHeader) + defer func() { go onQueryFinished() }() } } - result, errQuriesByName, err = aH.querier.QueryRange(ctx, queryRangeParams, spanKeys) + result, errQuriesByName, err = aH.querier.QueryRange(ctx, queryRangeParams) if err != nil { - apiErrObj := &model.ApiError{Typ: model.ErrorBadData, Err: err} - RespondError(w, apiErrObj, errQuriesByName) + queryErrors := map[string]string{} + for name, err := range errQuriesByName { + queryErrors[fmt.Sprintf("Query-%s", name)] = err.Error() + } + apiErrObj := &model.ApiError{Typ: model.ErrorInternal, Err: err} + RespondError(w, apiErrObj, queryErrors) return } @@ -3926,7 +3872,7 @@ func (aH *APIHandler) GetQueryProgressUpdates(w http.ResponseWriter, r *http.Req } } -func (aH *APIHandler) liveTailLogs(w http.ResponseWriter, r *http.Request) { +func (aH *APIHandler) liveTailLogsV2(w http.ResponseWriter, r *http.Request) { // get the param from url and add it to body stringReader := strings.NewReader(r.URL.Query().Get("q")) @@ -3946,13 +3892,99 @@ func (aH *APIHandler) liveTailLogs(w http.ResponseWriter, r *http.Request) { // check if any enrichment is required for logs if yes then enrich them if logsv3.EnrichmentRequired(queryRangeParams) { // get the fields if any logs query is present - var fields map[string]v3.AttributeKey - fields, err = aH.getLogFieldsV3(r.Context(), queryRangeParams) + logsFields, err := aH.reader.GetLogFields(r.Context()) if err != nil { apiErrObj := &model.ApiError{Typ: model.ErrorInternal, Err: err} RespondError(w, apiErrObj, nil) return } + fields := model.GetLogFieldsV3(r.Context(), queryRangeParams, logsFields) + logsv3.Enrich(queryRangeParams, fields) + } + + queryString, err = aH.queryBuilder.PrepareLiveTailQuery(queryRangeParams) + if err != nil { + RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, nil) + return + } + + default: + err = fmt.Errorf("invalid query type") + RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, nil) + return + } + + w.Header().Set("Connection", "keep-alive") + w.Header().Set("Content-Type", "text/event-stream") + w.Header().Set("Cache-Control", "no-cache") + w.Header().Set("Access-Control-Allow-Origin", "*") + w.WriteHeader(200) + + flusher, ok := w.(http.Flusher) + if !ok { + err := model.ApiError{Typ: model.ErrorStreamingNotSupported, Err: nil} + RespondError(w, &err, "streaming is not supported") + return + } + + // flush the headers + flusher.Flush() + + // create the client + client := &model.LogsLiveTailClientV2{Name: r.RemoteAddr, Logs: make(chan *model.SignozLogV2, 1000), Done: make(chan *bool), Error: make(chan error)} + go aH.reader.LiveTailLogsV4(r.Context(), queryString, uint64(queryRangeParams.Start), "", client) + for { + select { + case log := <-client.Logs: + var buf bytes.Buffer + enc := json.NewEncoder(&buf) + enc.Encode(log) + fmt.Fprintf(w, "data: %v\n\n", buf.String()) + flusher.Flush() + case <-client.Done: + zap.L().Debug("done!") + return + case err := <-client.Error: + zap.L().Error("error occurred", zap.Error(err)) + fmt.Fprintf(w, "event: error\ndata: %v\n\n", err.Error()) + flusher.Flush() + return + } + } + +} + +func (aH *APIHandler) liveTailLogs(w http.ResponseWriter, r *http.Request) { + if aH.UseLogsNewSchema { + aH.liveTailLogsV2(w, r) + return + } + + // get the param from url and add it to body + stringReader := strings.NewReader(r.URL.Query().Get("q")) + r.Body = io.NopCloser(stringReader) + + queryRangeParams, apiErrorObj := ParseQueryRangeParams(r) + if apiErrorObj != nil { + zap.L().Error(apiErrorObj.Err.Error()) + RespondError(w, apiErrorObj, nil) + return + } + + var err error + var queryString string + switch queryRangeParams.CompositeQuery.QueryType { + case v3.QueryTypeBuilder: + // check if any enrichment is required for logs if yes then enrich them + if logsv3.EnrichmentRequired(queryRangeParams) { + logsFields, err := aH.reader.GetLogFields(r.Context()) + if err != nil { + apiErrObj := &model.ApiError{Typ: model.ErrorInternal, Err: err} + RespondError(w, apiErrObj, nil) + return + } + // get the fields if any logs query is present + fields := model.GetLogFieldsV3(r.Context(), queryRangeParams, logsFields) logsv3.Enrich(queryRangeParams, fields) } @@ -3969,7 +4001,7 @@ func (aH *APIHandler) liveTailLogs(w http.ResponseWriter, r *http.Request) { } // create the client - client := &v3.LogsLiveTailClient{Name: r.RemoteAddr, Logs: make(chan *model.SignozLog, 1000), Done: make(chan *bool), Error: make(chan error)} + client := &model.LogsLiveTailClient{Name: r.RemoteAddr, Logs: make(chan *model.SignozLog, 1000), Done: make(chan *bool), Error: make(chan error)} go aH.reader.LiveTailLogsV3(r.Context(), queryString, uint64(queryRangeParams.Start), "", client) w.Header().Set("Connection", "keep-alive") @@ -4004,6 +4036,7 @@ func (aH *APIHandler) liveTailLogs(w http.ResponseWriter, r *http.Request) { return } } + } func (aH *APIHandler) getMetricMetadata(w http.ResponseWriter, r *http.Request) { @@ -4028,13 +4061,13 @@ func (aH *APIHandler) queryRangeV4(ctx context.Context, queryRangeParams *v3.Que // check if any enrichment is required for logs if yes then enrich them if logsv3.EnrichmentRequired(queryRangeParams) { // get the fields if any logs query is present - var fields map[string]v3.AttributeKey - fields, err = aH.getLogFieldsV3(ctx, queryRangeParams) + logsFields, err := aH.reader.GetLogFields(r.Context()) if err != nil { apiErrObj := &model.ApiError{Typ: model.ErrorInternal, Err: err} - RespondError(w, apiErrObj, errQuriesByName) + RespondError(w, apiErrObj, nil) return } + fields := model.GetLogFieldsV3(r.Context(), queryRangeParams, logsFields) logsv3.Enrich(queryRangeParams, fields) } @@ -4044,13 +4077,14 @@ func (aH *APIHandler) queryRangeV4(ctx context.Context, queryRangeParams *v3.Que RespondError(w, apiErrObj, errQuriesByName) return } + tracesV3.Enrich(queryRangeParams, spanKeys) } // WARN: Only works for AND operator in traces query if queryRangeParams.CompositeQuery.QueryType == v3.QueryTypeBuilder { // check if traceID is used as filter (with equal/similar operator) in traces query if yes add timestamp filter to queryRange params isUsed, traceIDs := tracesV3.TraceIdFilterUsedWithEqual(queryRangeParams) - if isUsed == true && len(traceIDs) > 0 { + if isUsed && len(traceIDs) > 0 { zap.L().Debug("traceID used as filter in traces query") // query signoz_spans table with traceID to get min and max timestamp min, max, err := aH.reader.GetMinAndMaxTimestampForTraceID(ctx, traceIDs) @@ -4062,11 +4096,15 @@ func (aH *APIHandler) queryRangeV4(ctx context.Context, queryRangeParams *v3.Que } } - result, errQuriesByName, err = aH.querierV2.QueryRange(ctx, queryRangeParams, spanKeys) + result, errQuriesByName, err = aH.querierV2.QueryRange(ctx, queryRangeParams) if err != nil { - apiErrObj := &model.ApiError{Typ: model.ErrorBadData, Err: err} - RespondError(w, apiErrObj, errQuriesByName) + queryErrors := map[string]string{} + for name, err := range errQuriesByName { + queryErrors[fmt.Sprintf("Query-%s", name)] = err.Error() + } + apiErrObj := &model.ApiError{Typ: model.ErrorInternal, Err: err} + RespondError(w, apiErrObj, queryErrors) return } diff --git a/pkg/query-service/app/logs/v3/enrich_query.go b/pkg/query-service/app/logs/v3/enrich_query.go index 8a7bc85970..b8ed0ff801 100644 --- a/pkg/query-service/app/logs/v3/enrich_query.go +++ b/pkg/query-service/app/logs/v3/enrich_query.go @@ -94,11 +94,11 @@ func Enrich(params *v3.QueryRangeParamsV3, fields map[string]v3.AttributeKey) { if query.Expression != queryName && query.DataSource != v3.DataSourceLogs { continue } - enrichLogsQuery(query, fields) + EnrichLogsQuery(query, fields) } } -func enrichLogsQuery(query *v3.BuilderQuery, fields map[string]v3.AttributeKey) error { +func EnrichLogsQuery(query *v3.BuilderQuery, fields map[string]v3.AttributeKey) error { // enrich aggregation attribute if query.AggregateAttribute.Key != "" { query.AggregateAttribute = enrichFieldWithMetadata(query.AggregateAttribute, fields) diff --git a/pkg/query-service/app/logs/v3/query_builder.go b/pkg/query-service/app/logs/v3/query_builder.go index bd64b4d0e6..05bd799712 100644 --- a/pkg/query-service/app/logs/v3/query_builder.go +++ b/pkg/query-service/app/logs/v3/query_builder.go @@ -486,12 +486,6 @@ func AddOffsetToQuery(query string, offset uint64) string { return fmt.Sprintf("%s OFFSET %d", query, offset) } -type Options struct { - GraphLimitQtype string - IsLivetailQuery bool - PreferRPM bool -} - func IsOrderByTs(orderBy []v3.OrderBy) bool { if len(orderBy) == 1 && (orderBy[0].Key == constants.TIMESTAMP || orderBy[0].ColumnName == constants.TIMESTAMP) { return true @@ -502,7 +496,7 @@ func IsOrderByTs(orderBy []v3.OrderBy) bool { // PrepareLogsQuery prepares the query for logs // start and end are in epoch millisecond // step is in seconds -func PrepareLogsQuery(start, end int64, queryType v3.QueryType, panelType v3.PanelType, mq *v3.BuilderQuery, options Options) (string, error) { +func PrepareLogsQuery(start, end int64, queryType v3.QueryType, panelType v3.PanelType, mq *v3.BuilderQuery, options v3.LogQBOptions) (string, error) { // adjust the start and end time to the step interval // NOTE: Disabling this as it's creating confusion between charts and actual data diff --git a/pkg/query-service/app/logs/v3/query_builder_test.go b/pkg/query-service/app/logs/v3/query_builder_test.go index db57cb2549..0eb0c202e5 100644 --- a/pkg/query-service/app/logs/v3/query_builder_test.go +++ b/pkg/query-service/app/logs/v3/query_builder_test.go @@ -1201,7 +1201,7 @@ var testPrepLogsQueryData = []struct { TableName string AggregateOperator v3.AggregateOperator ExpectedQuery string - Options Options + Options v3.LogQBOptions }{ { Name: "Test TS with limit- first", @@ -1223,7 +1223,7 @@ var testPrepLogsQueryData = []struct { }, TableName: "logs", ExpectedQuery: "SELECT `method` from (SELECT attributes_string_value[indexOf(attributes_string_key, 'method')] as `method`, toFloat64(count(distinct(attributes_string_value[indexOf(attributes_string_key, 'name')]))) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726000000 AND timestamp <= 1680066458000000000) AND attributes_string_value[indexOf(attributes_string_key, 'method')] = 'GET' AND has(attributes_string_key, 'method') AND has(attributes_string_key, 'name') group by `method` order by value DESC) LIMIT 10", - Options: Options{GraphLimitQtype: constants.FirstQueryGraphLimit, PreferRPM: true}, + Options: v3.LogQBOptions{GraphLimitQtype: constants.FirstQueryGraphLimit, PreferRPM: true}, }, { Name: "Test TS with limit- first - with order by value", @@ -1246,7 +1246,7 @@ var testPrepLogsQueryData = []struct { }, TableName: "logs", ExpectedQuery: "SELECT `method` from (SELECT attributes_string_value[indexOf(attributes_string_key, 'method')] as `method`, toFloat64(count(distinct(attributes_string_value[indexOf(attributes_string_key, 'name')]))) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726000000 AND timestamp <= 1680066458000000000) AND attributes_string_value[indexOf(attributes_string_key, 'method')] = 'GET' AND has(attributes_string_key, 'method') AND has(attributes_string_key, 'name') group by `method` order by value ASC) LIMIT 10", - Options: Options{GraphLimitQtype: constants.FirstQueryGraphLimit, PreferRPM: true}, + Options: v3.LogQBOptions{GraphLimitQtype: constants.FirstQueryGraphLimit, PreferRPM: true}, }, { Name: "Test TS with limit- first - with order by attribute", @@ -1269,7 +1269,7 @@ var testPrepLogsQueryData = []struct { }, TableName: "logs", ExpectedQuery: "SELECT `method` from (SELECT attributes_string_value[indexOf(attributes_string_key, 'method')] as `method`, toFloat64(count(distinct(attributes_string_value[indexOf(attributes_string_key, 'name')]))) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726000000 AND timestamp <= 1680066458000000000) AND attributes_string_value[indexOf(attributes_string_key, 'method')] = 'GET' AND has(attributes_string_key, 'method') AND has(attributes_string_key, 'name') group by `method` order by `method` ASC) LIMIT 10", - Options: Options{GraphLimitQtype: constants.FirstQueryGraphLimit, PreferRPM: true}, + Options: v3.LogQBOptions{GraphLimitQtype: constants.FirstQueryGraphLimit, PreferRPM: true}, }, { Name: "Test TS with limit- second", @@ -1291,7 +1291,7 @@ var testPrepLogsQueryData = []struct { }, TableName: "logs", ExpectedQuery: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts, attributes_string_value[indexOf(attributes_string_key, 'method')] as `method`, toFloat64(count(distinct(attributes_string_value[indexOf(attributes_string_key, 'name')]))) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726000000 AND timestamp <= 1680066458000000000) AND attributes_string_value[indexOf(attributes_string_key, 'method')] = 'GET' AND has(attributes_string_key, 'method') AND has(attributes_string_key, 'name') AND (`method`) GLOBAL IN (#LIMIT_PLACEHOLDER) group by `method`,ts order by value DESC", - Options: Options{GraphLimitQtype: constants.SecondQueryGraphLimit}, + Options: v3.LogQBOptions{GraphLimitQtype: constants.SecondQueryGraphLimit}, }, { Name: "Test TS with limit- second - with order by", @@ -1314,7 +1314,7 @@ var testPrepLogsQueryData = []struct { }, TableName: "logs", ExpectedQuery: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts, attributes_string_value[indexOf(attributes_string_key, 'method')] as `method`, toFloat64(count(distinct(attributes_string_value[indexOf(attributes_string_key, 'name')]))) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726000000 AND timestamp <= 1680066458000000000) AND attributes_string_value[indexOf(attributes_string_key, 'method')] = 'GET' AND has(attributes_string_key, 'method') AND has(attributes_string_key, 'name') AND (`method`) GLOBAL IN (#LIMIT_PLACEHOLDER) group by `method`,ts order by `method` ASC", - Options: Options{GraphLimitQtype: constants.SecondQueryGraphLimit}, + Options: v3.LogQBOptions{GraphLimitQtype: constants.SecondQueryGraphLimit}, }, // Live tail { @@ -1334,7 +1334,7 @@ var testPrepLogsQueryData = []struct { }, TableName: "logs", ExpectedQuery: "SELECT timestamp, id, trace_id, span_id, trace_flags, severity_text, severity_number, body,CAST((attributes_string_key, attributes_string_value), 'Map(String, String)') as attributes_string,CAST((attributes_int64_key, attributes_int64_value), 'Map(String, Int64)') as attributes_int64,CAST((attributes_float64_key, attributes_float64_value), 'Map(String, Float64)') as attributes_float64,CAST((attributes_bool_key, attributes_bool_value), 'Map(String, Bool)') as attributes_bool,CAST((resources_string_key, resources_string_value), 'Map(String, String)') as resources_string from signoz_logs.distributed_logs where attributes_string_value[indexOf(attributes_string_key, 'method')] = 'GET' AND ", - Options: Options{IsLivetailQuery: true}, + Options: v3.LogQBOptions{IsLivetailQuery: true}, }, { Name: "Live Tail Query with contains", @@ -1353,7 +1353,7 @@ var testPrepLogsQueryData = []struct { }, TableName: "logs", ExpectedQuery: "SELECT timestamp, id, trace_id, span_id, trace_flags, severity_text, severity_number, body,CAST((attributes_string_key, attributes_string_value), 'Map(String, String)') as attributes_string,CAST((attributes_int64_key, attributes_int64_value), 'Map(String, Int64)') as attributes_int64,CAST((attributes_float64_key, attributes_float64_value), 'Map(String, Float64)') as attributes_float64,CAST((attributes_bool_key, attributes_bool_value), 'Map(String, Bool)') as attributes_bool,CAST((resources_string_key, resources_string_value), 'Map(String, String)') as resources_string from signoz_logs.distributed_logs where attributes_string_value[indexOf(attributes_string_key, 'method')] ILIKE '%GET%' AND ", - Options: Options{IsLivetailQuery: true}, + Options: v3.LogQBOptions{IsLivetailQuery: true}, }, { Name: "Live Tail Query W/O filter", @@ -1369,7 +1369,7 @@ var testPrepLogsQueryData = []struct { }, TableName: "logs", ExpectedQuery: "SELECT timestamp, id, trace_id, span_id, trace_flags, severity_text, severity_number, body,CAST((attributes_string_key, attributes_string_value), 'Map(String, String)') as attributes_string,CAST((attributes_int64_key, attributes_int64_value), 'Map(String, Int64)') as attributes_int64,CAST((attributes_float64_key, attributes_float64_value), 'Map(String, Float64)') as attributes_float64,CAST((attributes_bool_key, attributes_bool_value), 'Map(String, Bool)') as attributes_bool,CAST((resources_string_key, resources_string_value), 'Map(String, String)') as resources_string from signoz_logs.distributed_logs where ", - Options: Options{IsLivetailQuery: true}, + Options: v3.LogQBOptions{IsLivetailQuery: true}, }, { Name: "Table query w/o limit", @@ -1385,7 +1385,7 @@ var testPrepLogsQueryData = []struct { }, TableName: "logs", ExpectedQuery: "SELECT now() as ts, toFloat64(count(*)) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726000000 AND timestamp <= 1680066458000000000) order by value DESC", - Options: Options{}, + Options: v3.LogQBOptions{}, }, { Name: "Table query with limit", @@ -1402,7 +1402,7 @@ var testPrepLogsQueryData = []struct { }, TableName: "logs", ExpectedQuery: "SELECT now() as ts, toFloat64(count(*)) as value from signoz_logs.distributed_logs where (timestamp >= 1680066360726000000 AND timestamp <= 1680066458000000000) order by value DESC LIMIT 10", - Options: Options{}, + Options: v3.LogQBOptions{}, }, { Name: "Ignore offset if order by is timestamp in list queries", @@ -1488,7 +1488,7 @@ var testPrepLogsQueryLimitOffsetData = []struct { TableName string AggregateOperator v3.AggregateOperator ExpectedQuery string - Options Options + Options v3.LogQBOptions }{ { Name: "Test limit less than pageSize - order by ts", diff --git a/pkg/query-service/app/logs/v4/query_builder.go b/pkg/query-service/app/logs/v4/query_builder.go index b96c5b9113..47fda73c2a 100644 --- a/pkg/query-service/app/logs/v4/query_builder.go +++ b/pkg/query-service/app/logs/v4/query_builder.go @@ -80,9 +80,23 @@ func getSelectLabels(aggregatorOperator v3.AggregateOperator, groupBy []v3.Attri func getExistsNexistsFilter(op v3.FilterOperator, item v3.FilterItem) string { if _, ok := constants.StaticFieldsLogsV3[item.Key.Key]; ok && item.Key.Type == v3.AttributeKeyTypeUnspecified { - // no exists filter for static fields as they exists everywhere - // TODO(nitya): Think what we can do here - return "" + // https://opentelemetry.io/docs/specs/otel/logs/data-model/ + // for top level keys of the log model: trace_id, span_id, severity_number, trace_flags etc + // we don't have an exists column. + // to check if they exists/nexists + // we can use = 0 or != 0 for numbers + // we can use = '' or != '' for strings + chOp := "!=" + if op == v3.FilterOperatorNotExists { + chOp = "=" + } + key := getClickhouseKey(item.Key) + if item.Key.DataType == v3.AttributeKeyDataTypeString { + return fmt.Sprintf("%s %s ''", key, chOp) + } + // we just have two types, number and string for top level columns + + return fmt.Sprintf("%s %s 0", key, chOp) } else if item.Key.IsColumn { // get filter for materialized columns val := true @@ -451,7 +465,11 @@ func buildLogsLiveTailQuery(mq *v3.BuilderQuery) (string, error) { } // join both the filter clauses if resourceSubQuery != "" { - filterSubQuery = filterSubQuery + " AND (resource_fingerprint GLOBAL IN " + resourceSubQuery + if filterSubQuery != "" { + filterSubQuery = filterSubQuery + " AND (resource_fingerprint GLOBAL IN " + resourceSubQuery + } else { + filterSubQuery = "(resource_fingerprint GLOBAL IN " + resourceSubQuery + } } // the reader will add the timestamp and id filters diff --git a/pkg/query-service/app/logs/v4/query_builder_test.go b/pkg/query-service/app/logs/v4/query_builder_test.go index 7bc831437c..1b24a6aac6 100644 --- a/pkg/query-service/app/logs/v4/query_builder_test.go +++ b/pkg/query-service/app/logs/v4/query_builder_test.go @@ -147,7 +147,15 @@ func Test_getExistsNexistsFilter(t *testing.T) { op: v3.FilterOperatorExists, item: v3.FilterItem{Key: v3.AttributeKey{Key: "trace_id", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeUnspecified}}, }, - want: "", + want: "trace_id != ''", + }, + { + name: "exists top level column- number", + args: args{ + op: v3.FilterOperatorNotExists, + item: v3.FilterItem{Key: v3.AttributeKey{Key: "severity_number", DataType: v3.AttributeKeyDataTypeArrayFloat64, Type: v3.AttributeKeyTypeUnspecified}}, + }, + want: "severity_number = 0", }, } for _, tt := range tests { diff --git a/pkg/query-service/app/querier/helper.go b/pkg/query-service/app/querier/helper.go index 7c45cc8781..a4ccfd047a 100644 --- a/pkg/query-service/app/querier/helper.go +++ b/pkg/query-service/app/querier/helper.go @@ -9,6 +9,7 @@ import ( "time" logsV3 "go.signoz.io/signoz/pkg/query-service/app/logs/v3" + logsV4 "go.signoz.io/signoz/pkg/query-service/app/logs/v4" metricsV3 "go.signoz.io/signoz/pkg/query-service/app/metrics/v3" tracesV3 "go.signoz.io/signoz/pkg/query-service/app/traces/v3" "go.signoz.io/signoz/pkg/query-service/cache/status" @@ -19,6 +20,7 @@ import ( ) func prepareLogsQuery(_ context.Context, + useLogsNewSchema bool, start, end int64, builderQuery *v3.BuilderQuery, @@ -27,30 +29,35 @@ func prepareLogsQuery(_ context.Context, ) (string, error) { query := "" + logsQueryBuilder := logsV3.PrepareLogsQuery + if useLogsNewSchema { + logsQueryBuilder = logsV4.PrepareLogsQuery + } + if params == nil || builderQuery == nil { return query, fmt.Errorf("params and builderQuery cannot be nil") } // for ts query with limit replace it as it is already formed if params.CompositeQuery.PanelType == v3.PanelTypeGraph && builderQuery.Limit > 0 && len(builderQuery.GroupBy) > 0 { - limitQuery, err := logsV3.PrepareLogsQuery( + limitQuery, err := logsQueryBuilder( start, end, params.CompositeQuery.QueryType, params.CompositeQuery.PanelType, builderQuery, - logsV3.Options{GraphLimitQtype: constants.FirstQueryGraphLimit, PreferRPM: preferRPM}, + v3.LogQBOptions{GraphLimitQtype: constants.FirstQueryGraphLimit, PreferRPM: preferRPM}, ) if err != nil { return query, err } - placeholderQuery, err := logsV3.PrepareLogsQuery( + placeholderQuery, err := logsQueryBuilder( start, end, params.CompositeQuery.QueryType, params.CompositeQuery.PanelType, builderQuery, - logsV3.Options{GraphLimitQtype: constants.SecondQueryGraphLimit, PreferRPM: preferRPM}, + v3.LogQBOptions{GraphLimitQtype: constants.SecondQueryGraphLimit, PreferRPM: preferRPM}, ) if err != nil { return query, err @@ -59,13 +66,13 @@ func prepareLogsQuery(_ context.Context, return query, err } - query, err := logsV3.PrepareLogsQuery( + query, err := logsQueryBuilder( start, end, params.CompositeQuery.QueryType, params.CompositeQuery.PanelType, builderQuery, - logsV3.Options{PreferRPM: preferRPM}, + v3.LogQBOptions{PreferRPM: preferRPM}, ) if err != nil { return query, err @@ -77,7 +84,6 @@ func (q *querier) runBuilderQuery( ctx context.Context, builderQuery *v3.BuilderQuery, params *v3.QueryRangeParamsV3, - keys map[string]v3.AttributeKey, cacheKeys map[string]string, ch chan channelResult, wg *sync.WaitGroup, @@ -102,7 +108,7 @@ func (q *querier) runBuilderQuery( var query string var err error if _, ok := cacheKeys[queryName]; !ok { - query, err = prepareLogsQuery(ctx, start, end, builderQuery, params, preferRPM) + query, err = prepareLogsQuery(ctx, q.UseLogsNewSchema, start, end, builderQuery, params, preferRPM) if err != nil { ch <- channelResult{Err: err, Name: queryName, Query: query, Series: nil} return @@ -126,7 +132,7 @@ func (q *querier) runBuilderQuery( missedSeries := make([]*v3.Series, 0) cachedSeries := make([]*v3.Series, 0) for _, miss := range misses { - query, err = prepareLogsQuery(ctx, miss.start, miss.end, builderQuery, params, preferRPM) + query, err = prepareLogsQuery(ctx, q.UseLogsNewSchema, miss.start, miss.end, builderQuery, params, preferRPM) if err != nil { ch <- channelResult{Err: err, Name: queryName, Query: query, Series: nil} return @@ -196,7 +202,6 @@ func (q *querier) runBuilderQuery( end, params.CompositeQuery.PanelType, builderQuery, - keys, tracesV3.Options{GraphLimitQtype: constants.FirstQueryGraphLimit, PreferRPM: preferRPM}, ) if err != nil { @@ -208,7 +213,6 @@ func (q *querier) runBuilderQuery( end, params.CompositeQuery.PanelType, builderQuery, - keys, tracesV3.Options{GraphLimitQtype: constants.SecondQueryGraphLimit, PreferRPM: preferRPM}, ) if err != nil { @@ -222,7 +226,6 @@ func (q *querier) runBuilderQuery( end, params.CompositeQuery.PanelType, builderQuery, - keys, tracesV3.Options{PreferRPM: preferRPM}, ) if err != nil { @@ -333,7 +336,6 @@ func (q *querier) runBuilderExpression( ctx context.Context, builderQuery *v3.BuilderQuery, params *v3.QueryRangeParamsV3, - keys map[string]v3.AttributeKey, cacheKeys map[string]string, ch chan channelResult, wg *sync.WaitGroup, @@ -342,7 +344,7 @@ func (q *querier) runBuilderExpression( queryName := builderQuery.QueryName - queries, err := q.builder.PrepareQueries(params, keys) + queries, err := q.builder.PrepareQueries(params) if err != nil { ch <- channelResult{Err: err, Name: queryName, Query: "", Series: nil} return @@ -377,7 +379,7 @@ func (q *querier) runBuilderExpression( NoCache: params.NoCache, CompositeQuery: params.CompositeQuery, Variables: params.Variables, - }, keys) + }) query := missQueries[queryName] series, err := q.execClickHouseQuery(ctx, query) if err != nil { diff --git a/pkg/query-service/app/querier/querier.go b/pkg/query-service/app/querier/querier.go index 2113b3f8fc..50ef63394a 100644 --- a/pkg/query-service/app/querier/querier.go +++ b/pkg/query-service/app/querier/querier.go @@ -11,6 +11,7 @@ import ( "time" logsV3 "go.signoz.io/signoz/pkg/query-service/app/logs/v3" + logsV4 "go.signoz.io/signoz/pkg/query-service/app/logs/v4" metricsV3 "go.signoz.io/signoz/pkg/query-service/app/metrics/v3" "go.signoz.io/signoz/pkg/query-service/app/queryBuilder" tracesV3 "go.signoz.io/signoz/pkg/query-service/app/traces/v3" @@ -74,6 +75,11 @@ type QuerierOptions struct { } func NewQuerier(opts QuerierOptions) interfaces.Querier { + logsQueryBuilder := logsV3.PrepareLogsQuery + if opts.UseLogsNewSchema { + logsQueryBuilder = logsV4.PrepareLogsQuery + } + return &querier{ cache: opts.Cache, reader: opts.Reader, @@ -82,14 +88,15 @@ func NewQuerier(opts QuerierOptions) interfaces.Querier { builder: queryBuilder.NewQueryBuilder(queryBuilder.QueryBuilderOptions{ BuildTraceQuery: tracesV3.PrepareTracesQuery, - BuildLogQuery: logsV3.PrepareLogsQuery, + BuildLogQuery: logsQueryBuilder, BuildMetricQuery: metricsV3.PrepareMetricQuery, }, opts.FeatureLookup), featureLookUp: opts.FeatureLookup, - testingMode: opts.TestingMode, - returnedSeries: opts.ReturnedSeries, - returnedErr: opts.ReturnedErr, + testingMode: opts.TestingMode, + returnedSeries: opts.ReturnedSeries, + returnedErr: opts.ReturnedErr, + UseLogsNewSchema: opts.UseLogsNewSchema, } } @@ -297,7 +304,7 @@ func mergeSerieses(cachedSeries, missedSeries []*v3.Series) []*v3.Series { return mergedSeries } -func (q *querier) runBuilderQueries(ctx context.Context, params *v3.QueryRangeParamsV3, keys map[string]v3.AttributeKey) ([]*v3.Result, map[string]error, error) { +func (q *querier) runBuilderQueries(ctx context.Context, params *v3.QueryRangeParamsV3) ([]*v3.Result, map[string]error, error) { cacheKeys := q.keyGenerator.GenerateKeys(params) @@ -310,9 +317,9 @@ func (q *querier) runBuilderQueries(ctx context.Context, params *v3.QueryRangePa } wg.Add(1) if queryName == builderQuery.Expression { - go q.runBuilderQuery(ctx, builderQuery, params, keys, cacheKeys, ch, &wg) + go q.runBuilderQuery(ctx, builderQuery, params, cacheKeys, ch, &wg) } else { - go q.runBuilderExpression(ctx, builderQuery, params, keys, cacheKeys, ch, &wg) + go q.runBuilderExpression(ctx, builderQuery, params, cacheKeys, ch, &wg) } } @@ -470,7 +477,7 @@ func (q *querier) runClickHouseQueries(ctx context.Context, params *v3.QueryRang return results, errQueriesByName, err } -func (q *querier) runLogsListQuery(ctx context.Context, params *v3.QueryRangeParamsV3, keys map[string]v3.AttributeKey, tsRanges []utils.LogsListTsRange) ([]*v3.Result, map[string]error, error) { +func (q *querier) runLogsListQuery(ctx context.Context, params *v3.QueryRangeParamsV3, tsRanges []utils.LogsListTsRange) ([]*v3.Result, map[string]error, error) { res := make([]*v3.Result, 0) qName := "" pageSize := uint64(0) @@ -487,7 +494,7 @@ func (q *querier) runLogsListQuery(ctx context.Context, params *v3.QueryRangePar params.End = v.End params.CompositeQuery.BuilderQueries[qName].PageSize = pageSize - uint64(len(data)) - queries, err := q.builder.PrepareQueries(params, keys) + queries, err := q.builder.PrepareQueries(params) if err != nil { return nil, nil, err } @@ -529,7 +536,7 @@ func (q *querier) runLogsListQuery(ctx context.Context, params *v3.QueryRangePar return res, nil, nil } -func (q *querier) runBuilderListQueries(ctx context.Context, params *v3.QueryRangeParamsV3, keys map[string]v3.AttributeKey) ([]*v3.Result, map[string]error, error) { +func (q *querier) runBuilderListQueries(ctx context.Context, params *v3.QueryRangeParamsV3) ([]*v3.Result, map[string]error, error) { // List query has support for only one query. if q.UseLogsNewSchema && params.CompositeQuery != nil && len(params.CompositeQuery.BuilderQueries) == 1 { for _, v := range params.CompositeQuery.BuilderQueries { @@ -537,13 +544,13 @@ func (q *querier) runBuilderListQueries(ctx context.Context, params *v3.QueryRan if v.DataSource == v3.DataSourceLogs && len(v.OrderBy) == 1 && v.OrderBy[0].ColumnName == "timestamp" && v.OrderBy[0].Order == "desc" { startEndArr := utils.GetLogsListTsRanges(params.Start, params.End) if len(startEndArr) > 0 { - return q.runLogsListQuery(ctx, params, keys, startEndArr) + return q.runLogsListQuery(ctx, params, startEndArr) } } } } - queries, err := q.builder.PrepareQueries(params, keys) + queries, err := q.builder.PrepareQueries(params) if err != nil { return nil, nil, err @@ -559,7 +566,7 @@ func (q *querier) runBuilderListQueries(ctx context.Context, params *v3.QueryRan rowList, err := q.reader.GetListResultV3(ctx, query) if err != nil { - ch <- channelResult{Err: fmt.Errorf("error in query-%s: %v", name, err), Name: name, Query: query} + ch <- channelResult{Err: err, Name: name, Query: query} return } ch <- channelResult{List: rowList, Name: name, Query: query} @@ -590,7 +597,7 @@ func (q *querier) runBuilderListQueries(ctx context.Context, params *v3.QueryRan return res, nil, nil } -func (q *querier) QueryRange(ctx context.Context, params *v3.QueryRangeParamsV3, keys map[string]v3.AttributeKey) ([]*v3.Result, map[string]error, error) { +func (q *querier) QueryRange(ctx context.Context, params *v3.QueryRangeParamsV3) ([]*v3.Result, map[string]error, error) { var results []*v3.Result var err error var errQueriesByName map[string]error @@ -598,9 +605,9 @@ func (q *querier) QueryRange(ctx context.Context, params *v3.QueryRangeParamsV3, switch params.CompositeQuery.QueryType { case v3.QueryTypeBuilder: if params.CompositeQuery.PanelType == v3.PanelTypeList || params.CompositeQuery.PanelType == v3.PanelTypeTrace { - results, errQueriesByName, err = q.runBuilderListQueries(ctx, params, keys) + results, errQueriesByName, err = q.runBuilderListQueries(ctx, params) } else { - results, errQueriesByName, err = q.runBuilderQueries(ctx, params, keys) + results, errQueriesByName, err = q.runBuilderQueries(ctx, params) } // in builder query, the only errors we expose are the ones that exceed the resource limits // everything else is internal error as they are not actionable by the user diff --git a/pkg/query-service/app/querier/querier_test.go b/pkg/query-service/app/querier/querier_test.go index aecb7b27ba..a4814d0c0a 100644 --- a/pkg/query-service/app/querier/querier_test.go +++ b/pkg/query-service/app/querier/querier_test.go @@ -8,6 +8,7 @@ import ( "time" "go.signoz.io/signoz/pkg/query-service/app/queryBuilder" + tracesV3 "go.signoz.io/signoz/pkg/query-service/app/traces/v3" "go.signoz.io/signoz/pkg/query-service/cache/inmemory" v3 "go.signoz.io/signoz/pkg/query-service/model/v3" ) @@ -584,7 +585,8 @@ func TestQueryRange(t *testing.T) { } for i, param := range params { - _, errByName, err := q.QueryRange(context.Background(), param, nil) + tracesV3.Enrich(param, map[string]v3.AttributeKey{}) + _, errByName, err := q.QueryRange(context.Background(), param) if err != nil { t.Errorf("expected no error, got %s", err) } @@ -693,7 +695,8 @@ func TestQueryRangeValueType(t *testing.T) { } for i, param := range params { - _, errByName, err := q.QueryRange(context.Background(), param, nil) + tracesV3.Enrich(param, map[string]v3.AttributeKey{}) + _, errByName, err := q.QueryRange(context.Background(), param) if err != nil { t.Errorf("expected no error, got %s", err) } @@ -746,7 +749,8 @@ func TestQueryRangeTimeShift(t *testing.T) { expectedTimeRangeInQueryString := fmt.Sprintf("timestamp >= %d AND timestamp <= %d", (1675115596722-86400*1000)*1000000, ((1675115596722+120*60*1000)-86400*1000)*1000000) for i, param := range params { - _, errByName, err := q.QueryRange(context.Background(), param, nil) + tracesV3.Enrich(param, map[string]v3.AttributeKey{}) + _, errByName, err := q.QueryRange(context.Background(), param) if err != nil { t.Errorf("expected no error, got %s", err) } @@ -844,7 +848,8 @@ func TestQueryRangeTimeShiftWithCache(t *testing.T) { } for i, param := range params { - _, errByName, err := q.QueryRange(context.Background(), param, nil) + tracesV3.Enrich(param, map[string]v3.AttributeKey{}) + _, errByName, err := q.QueryRange(context.Background(), param) if err != nil { t.Errorf("expected no error, got %s", err) } @@ -944,7 +949,8 @@ func TestQueryRangeTimeShiftWithLimitAndCache(t *testing.T) { } for i, param := range params { - _, errByName, err := q.QueryRange(context.Background(), param, nil) + tracesV3.Enrich(param, map[string]v3.AttributeKey{}) + _, errByName, err := q.QueryRange(context.Background(), param) if err != nil { t.Errorf("expected no error, got %s", err) } @@ -1033,7 +1039,8 @@ func TestQueryRangeValueTypePromQL(t *testing.T) { } for i, param := range params { - _, errByName, err := q.QueryRange(context.Background(), param, nil) + tracesV3.Enrich(param, map[string]v3.AttributeKey{}) + _, errByName, err := q.QueryRange(context.Background(), param) if err != nil { t.Errorf("expected no error, got %s", err) } diff --git a/pkg/query-service/app/querier/v2/helper.go b/pkg/query-service/app/querier/v2/helper.go index de9d591f7f..f1dd33c4e6 100644 --- a/pkg/query-service/app/querier/v2/helper.go +++ b/pkg/query-service/app/querier/v2/helper.go @@ -9,6 +9,7 @@ import ( "time" logsV3 "go.signoz.io/signoz/pkg/query-service/app/logs/v3" + logsV4 "go.signoz.io/signoz/pkg/query-service/app/logs/v4" metricsV3 "go.signoz.io/signoz/pkg/query-service/app/metrics/v3" metricsV4 "go.signoz.io/signoz/pkg/query-service/app/metrics/v4" tracesV3 "go.signoz.io/signoz/pkg/query-service/app/traces/v3" @@ -19,12 +20,17 @@ import ( ) func prepareLogsQuery(_ context.Context, + useLogsNewSchema bool, start, end int64, builderQuery *v3.BuilderQuery, params *v3.QueryRangeParamsV3, preferRPM bool, ) (string, error) { + logsQueryBuilder := logsV3.PrepareLogsQuery + if useLogsNewSchema { + logsQueryBuilder = logsV4.PrepareLogsQuery + } query := "" if params == nil || builderQuery == nil { @@ -33,24 +39,24 @@ func prepareLogsQuery(_ context.Context, // for ts query with limit replace it as it is already formed if params.CompositeQuery.PanelType == v3.PanelTypeGraph && builderQuery.Limit > 0 && len(builderQuery.GroupBy) > 0 { - limitQuery, err := logsV3.PrepareLogsQuery( + limitQuery, err := logsQueryBuilder( start, end, params.CompositeQuery.QueryType, params.CompositeQuery.PanelType, builderQuery, - logsV3.Options{GraphLimitQtype: constants.FirstQueryGraphLimit, PreferRPM: preferRPM}, + v3.LogQBOptions{GraphLimitQtype: constants.FirstQueryGraphLimit, PreferRPM: preferRPM}, ) if err != nil { return query, err } - placeholderQuery, err := logsV3.PrepareLogsQuery( + placeholderQuery, err := logsQueryBuilder( start, end, params.CompositeQuery.QueryType, params.CompositeQuery.PanelType, builderQuery, - logsV3.Options{GraphLimitQtype: constants.SecondQueryGraphLimit, PreferRPM: preferRPM}, + v3.LogQBOptions{GraphLimitQtype: constants.SecondQueryGraphLimit, PreferRPM: preferRPM}, ) if err != nil { return query, err @@ -59,13 +65,13 @@ func prepareLogsQuery(_ context.Context, return query, err } - query, err := logsV3.PrepareLogsQuery( + query, err := logsQueryBuilder( start, end, params.CompositeQuery.QueryType, params.CompositeQuery.PanelType, builderQuery, - logsV3.Options{PreferRPM: preferRPM}, + v3.LogQBOptions{PreferRPM: preferRPM}, ) if err != nil { return query, err @@ -77,7 +83,6 @@ func (q *querier) runBuilderQuery( ctx context.Context, builderQuery *v3.BuilderQuery, params *v3.QueryRangeParamsV3, - keys map[string]v3.AttributeKey, cacheKeys map[string]string, ch chan channelResult, wg *sync.WaitGroup, @@ -104,7 +109,7 @@ func (q *querier) runBuilderQuery( var query string var err error if _, ok := cacheKeys[queryName]; !ok { - query, err = prepareLogsQuery(ctx, start, end, builderQuery, params, preferRPM) + query, err = prepareLogsQuery(ctx, q.UseLogsNewSchema, start, end, builderQuery, params, preferRPM) if err != nil { ch <- channelResult{Err: err, Name: queryName, Query: query, Series: nil} return @@ -127,7 +132,7 @@ func (q *querier) runBuilderQuery( missedSeries := make([]*v3.Series, 0) cachedSeries := make([]*v3.Series, 0) for _, miss := range misses { - query, err = prepareLogsQuery(ctx, miss.start, miss.end, builderQuery, params, preferRPM) + query, err = prepareLogsQuery(ctx, q.UseLogsNewSchema, miss.start, miss.end, builderQuery, params, preferRPM) if err != nil { ch <- channelResult{Err: err, Name: queryName, Query: query, Series: nil} return @@ -195,7 +200,6 @@ func (q *querier) runBuilderQuery( end, params.CompositeQuery.PanelType, builderQuery, - keys, tracesV3.Options{GraphLimitQtype: constants.FirstQueryGraphLimit, PreferRPM: preferRPM}, ) if err != nil { @@ -207,7 +211,6 @@ func (q *querier) runBuilderQuery( end, params.CompositeQuery.PanelType, builderQuery, - keys, tracesV3.Options{GraphLimitQtype: constants.SecondQueryGraphLimit, PreferRPM: preferRPM}, ) if err != nil { @@ -221,7 +224,6 @@ func (q *querier) runBuilderQuery( end, params.CompositeQuery.PanelType, builderQuery, - keys, tracesV3.Options{PreferRPM: preferRPM}, ) if err != nil { diff --git a/pkg/query-service/app/querier/v2/querier.go b/pkg/query-service/app/querier/v2/querier.go index b6d92faa44..b71a8cc0cc 100644 --- a/pkg/query-service/app/querier/v2/querier.go +++ b/pkg/query-service/app/querier/v2/querier.go @@ -11,6 +11,7 @@ import ( "time" logsV3 "go.signoz.io/signoz/pkg/query-service/app/logs/v3" + logsV4 "go.signoz.io/signoz/pkg/query-service/app/logs/v4" metricsV4 "go.signoz.io/signoz/pkg/query-service/app/metrics/v4" "go.signoz.io/signoz/pkg/query-service/app/queryBuilder" tracesV3 "go.signoz.io/signoz/pkg/query-service/app/traces/v3" @@ -52,10 +53,9 @@ type querier struct { testingMode bool queriesExecuted []string // tuple of start and end time in milliseconds - timeRanges [][]int - returnedSeries []*v3.Series - returnedErr error - + timeRanges [][]int + returnedSeries []*v3.Series + returnedErr error UseLogsNewSchema bool } @@ -74,6 +74,11 @@ type QuerierOptions struct { } func NewQuerier(opts QuerierOptions) interfaces.Querier { + logsQueryBuilder := logsV3.PrepareLogsQuery + if opts.UseLogsNewSchema { + logsQueryBuilder = logsV4.PrepareLogsQuery + } + return &querier{ cache: opts.Cache, reader: opts.Reader, @@ -82,14 +87,15 @@ func NewQuerier(opts QuerierOptions) interfaces.Querier { builder: queryBuilder.NewQueryBuilder(queryBuilder.QueryBuilderOptions{ BuildTraceQuery: tracesV3.PrepareTracesQuery, - BuildLogQuery: logsV3.PrepareLogsQuery, + BuildLogQuery: logsQueryBuilder, BuildMetricQuery: metricsV4.PrepareMetricQuery, }, opts.FeatureLookup), featureLookUp: opts.FeatureLookup, - testingMode: opts.TestingMode, - returnedSeries: opts.ReturnedSeries, - returnedErr: opts.ReturnedErr, + testingMode: opts.TestingMode, + returnedSeries: opts.ReturnedSeries, + returnedErr: opts.ReturnedErr, + UseLogsNewSchema: opts.UseLogsNewSchema, } } @@ -312,7 +318,7 @@ func mergeSerieses(cachedSeries, missedSeries []*v3.Series) []*v3.Series { return mergedSeries } -func (q *querier) runBuilderQueries(ctx context.Context, params *v3.QueryRangeParamsV3, keys map[string]v3.AttributeKey) ([]*v3.Result, map[string]error, error) { +func (q *querier) runBuilderQueries(ctx context.Context, params *v3.QueryRangeParamsV3) ([]*v3.Result, map[string]error, error) { cacheKeys := q.keyGenerator.GenerateKeys(params) @@ -322,7 +328,7 @@ func (q *querier) runBuilderQueries(ctx context.Context, params *v3.QueryRangePa for queryName, builderQuery := range params.CompositeQuery.BuilderQueries { if queryName == builderQuery.Expression { wg.Add(1) - go q.runBuilderQuery(ctx, builderQuery, params, keys, cacheKeys, ch, &wg) + go q.runBuilderQuery(ctx, builderQuery, params, cacheKeys, ch, &wg) } } @@ -479,7 +485,7 @@ func (q *querier) runClickHouseQueries(ctx context.Context, params *v3.QueryRang return results, errQueriesByName, err } -func (q *querier) runLogsListQuery(ctx context.Context, params *v3.QueryRangeParamsV3, keys map[string]v3.AttributeKey, tsRanges []utils.LogsListTsRange) ([]*v3.Result, map[string]error, error) { +func (q *querier) runLogsListQuery(ctx context.Context, params *v3.QueryRangeParamsV3, tsRanges []utils.LogsListTsRange) ([]*v3.Result, map[string]error, error) { res := make([]*v3.Result, 0) qName := "" pageSize := uint64(0) @@ -496,7 +502,7 @@ func (q *querier) runLogsListQuery(ctx context.Context, params *v3.QueryRangePar params.End = v.End params.CompositeQuery.BuilderQueries[qName].PageSize = pageSize - uint64(len(data)) - queries, err := q.builder.PrepareQueries(params, keys) + queries, err := q.builder.PrepareQueries(params) if err != nil { return nil, nil, err } @@ -538,7 +544,7 @@ func (q *querier) runLogsListQuery(ctx context.Context, params *v3.QueryRangePar return res, nil, nil } -func (q *querier) runBuilderListQueries(ctx context.Context, params *v3.QueryRangeParamsV3, keys map[string]v3.AttributeKey) ([]*v3.Result, map[string]error, error) { +func (q *querier) runBuilderListQueries(ctx context.Context, params *v3.QueryRangeParamsV3) ([]*v3.Result, map[string]error, error) { // List query has support for only one query. if q.UseLogsNewSchema && params.CompositeQuery != nil && len(params.CompositeQuery.BuilderQueries) == 1 { for _, v := range params.CompositeQuery.BuilderQueries { @@ -546,13 +552,13 @@ func (q *querier) runBuilderListQueries(ctx context.Context, params *v3.QueryRan if v.DataSource == v3.DataSourceLogs && len(v.OrderBy) == 1 && v.OrderBy[0].ColumnName == "timestamp" && v.OrderBy[0].Order == "desc" { startEndArr := utils.GetLogsListTsRanges(params.Start, params.End) if len(startEndArr) > 0 { - return q.runLogsListQuery(ctx, params, keys, startEndArr) + return q.runLogsListQuery(ctx, params, startEndArr) } } } } - queries, err := q.builder.PrepareQueries(params, keys) + queries, err := q.builder.PrepareQueries(params) if err != nil { return nil, nil, err @@ -568,7 +574,7 @@ func (q *querier) runBuilderListQueries(ctx context.Context, params *v3.QueryRan rowList, err := q.reader.GetListResultV3(ctx, query) if err != nil { - ch <- channelResult{Err: fmt.Errorf("error in query-%s: %v", name, err), Name: name, Query: query} + ch <- channelResult{Err: err, Name: name, Query: query} return } ch <- channelResult{List: rowList, Name: name, Query: query} @@ -601,7 +607,7 @@ func (q *querier) runBuilderListQueries(ctx context.Context, params *v3.QueryRan // QueryRange is the main function that runs the queries // and returns the results -func (q *querier) QueryRange(ctx context.Context, params *v3.QueryRangeParamsV3, keys map[string]v3.AttributeKey) ([]*v3.Result, map[string]error, error) { +func (q *querier) QueryRange(ctx context.Context, params *v3.QueryRangeParamsV3) ([]*v3.Result, map[string]error, error) { var results []*v3.Result var err error var errQueriesByName map[string]error @@ -609,9 +615,9 @@ func (q *querier) QueryRange(ctx context.Context, params *v3.QueryRangeParamsV3, switch params.CompositeQuery.QueryType { case v3.QueryTypeBuilder: if params.CompositeQuery.PanelType == v3.PanelTypeList || params.CompositeQuery.PanelType == v3.PanelTypeTrace { - results, errQueriesByName, err = q.runBuilderListQueries(ctx, params, keys) + results, errQueriesByName, err = q.runBuilderListQueries(ctx, params) } else { - results, errQueriesByName, err = q.runBuilderQueries(ctx, params, keys) + results, errQueriesByName, err = q.runBuilderQueries(ctx, params) } // in builder query, the only errors we expose are the ones that exceed the resource limits // everything else is internal error as they are not actionable by the user diff --git a/pkg/query-service/app/querier/v2/querier_test.go b/pkg/query-service/app/querier/v2/querier_test.go index 5707e9f70d..c65b6ff54a 100644 --- a/pkg/query-service/app/querier/v2/querier_test.go +++ b/pkg/query-service/app/querier/v2/querier_test.go @@ -8,6 +8,7 @@ import ( "time" "go.signoz.io/signoz/pkg/query-service/app/queryBuilder" + tracesV3 "go.signoz.io/signoz/pkg/query-service/app/traces/v3" "go.signoz.io/signoz/pkg/query-service/cache/inmemory" v3 "go.signoz.io/signoz/pkg/query-service/model/v3" ) @@ -593,7 +594,8 @@ func TestV2QueryRangePanelGraph(t *testing.T) { } for i, param := range params { - _, errByName, err := q.QueryRange(context.Background(), param, nil) + tracesV3.Enrich(param, map[string]v3.AttributeKey{}) + _, errByName, err := q.QueryRange(context.Background(), param) if err != nil { t.Errorf("expected no error, got %s", err) } @@ -738,7 +740,8 @@ func TestV2QueryRangeValueType(t *testing.T) { } for i, param := range params { - _, errByName, err := q.QueryRange(context.Background(), param, nil) + tracesV3.Enrich(param, map[string]v3.AttributeKey{}) + _, errByName, err := q.QueryRange(context.Background(), param) if err != nil { t.Errorf("expected no error, got %s", err) } @@ -792,7 +795,8 @@ func TestV2QueryRangeTimeShift(t *testing.T) { expectedTimeRangeInQueryString := fmt.Sprintf("timestamp >= %d AND timestamp <= %d", (1675115596722-86400*1000)*1000000, ((1675115596722+120*60*1000)-86400*1000)*1000000) for i, param := range params { - _, errByName, err := q.QueryRange(context.Background(), param, nil) + tracesV3.Enrich(param, map[string]v3.AttributeKey{}) + _, errByName, err := q.QueryRange(context.Background(), param) if err != nil { t.Errorf("expected no error, got %s", err) } @@ -892,7 +896,8 @@ func TestV2QueryRangeTimeShiftWithCache(t *testing.T) { } for i, param := range params { - _, errByName, err := q.QueryRange(context.Background(), param, nil) + tracesV3.Enrich(param, map[string]v3.AttributeKey{}) + _, errByName, err := q.QueryRange(context.Background(), param) if err != nil { t.Errorf("expected no error, got %s", err) } @@ -994,7 +999,8 @@ func TestV2QueryRangeTimeShiftWithLimitAndCache(t *testing.T) { } for i, param := range params { - _, errByName, err := q.QueryRange(context.Background(), param, nil) + tracesV3.Enrich(param, map[string]v3.AttributeKey{}) + _, errByName, err := q.QueryRange(context.Background(), param) if err != nil { t.Errorf("expected no error, got %s", err) } @@ -1085,7 +1091,8 @@ func TestV2QueryRangeValueTypePromQL(t *testing.T) { } for i, param := range params { - _, errByName, err := q.QueryRange(context.Background(), param, nil) + tracesV3.Enrich(param, map[string]v3.AttributeKey{}) + _, errByName, err := q.QueryRange(context.Background(), param) if err != nil { t.Errorf("expected no error, got %s", err) } diff --git a/pkg/query-service/app/queryBuilder/query_builder.go b/pkg/query-service/app/queryBuilder/query_builder.go index 7ddd3c114d..879c2d5153 100644 --- a/pkg/query-service/app/queryBuilder/query_builder.go +++ b/pkg/query-service/app/queryBuilder/query_builder.go @@ -5,7 +5,6 @@ import ( "strings" "github.com/SigNoz/govaluate" - logsV3 "go.signoz.io/signoz/pkg/query-service/app/logs/v3" metricsV3 "go.signoz.io/signoz/pkg/query-service/app/metrics/v3" tracesV3 "go.signoz.io/signoz/pkg/query-service/app/traces/v3" "go.signoz.io/signoz/pkg/query-service/cache" @@ -43,8 +42,8 @@ var SupportedFunctions = []string{ var EvalFuncs = map[string]govaluate.ExpressionFunction{} -type prepareTracesQueryFunc func(start, end int64, panelType v3.PanelType, bq *v3.BuilderQuery, keys map[string]v3.AttributeKey, options tracesV3.Options) (string, error) -type prepareLogsQueryFunc func(start, end int64, queryType v3.QueryType, panelType v3.PanelType, bq *v3.BuilderQuery, options logsV3.Options) (string, error) +type prepareTracesQueryFunc func(start, end int64, panelType v3.PanelType, bq *v3.BuilderQuery, options tracesV3.Options) (string, error) +type prepareLogsQueryFunc func(start, end int64, queryType v3.QueryType, panelType v3.PanelType, bq *v3.BuilderQuery, options v3.LogQBOptions) (string, error) type prepareMetricQueryFunc func(start, end int64, queryType v3.QueryType, panelType v3.PanelType, bq *v3.BuilderQuery, options metricsV3.Options) (string, error) type QueryBuilder struct { @@ -162,7 +161,7 @@ func (qb *QueryBuilder) PrepareLiveTailQuery(params *v3.QueryRangeParamsV3) (str } for queryName, query := range compositeQuery.BuilderQueries { if query.Expression == queryName { - queryStr, err = qb.options.BuildLogQuery(params.Start, params.End, compositeQuery.QueryType, compositeQuery.PanelType, query, logsV3.Options{IsLivetailQuery: true}) + queryStr, err = qb.options.BuildLogQuery(params.Start, params.End, compositeQuery.QueryType, compositeQuery.PanelType, query, v3.LogQBOptions{IsLivetailQuery: true}) if err != nil { return "", err } @@ -173,7 +172,7 @@ func (qb *QueryBuilder) PrepareLiveTailQuery(params *v3.QueryRangeParamsV3) (str return queryStr, nil } -func (qb *QueryBuilder) PrepareQueries(params *v3.QueryRangeParamsV3, args ...interface{}) (map[string]string, error) { +func (qb *QueryBuilder) PrepareQueries(params *v3.QueryRangeParamsV3) (map[string]string, error) { queries := make(map[string]string) compositeQuery := params.CompositeQuery @@ -193,19 +192,15 @@ func (qb *QueryBuilder) PrepareQueries(params *v3.QueryRangeParamsV3, args ...in if query.Expression == queryName { switch query.DataSource { case v3.DataSourceTraces: - keys := map[string]v3.AttributeKey{} - if len(args) > 0 { - keys = args[0].(map[string]v3.AttributeKey) - } // for ts query with group by and limit form two queries if compositeQuery.PanelType == v3.PanelTypeGraph && query.Limit > 0 && len(query.GroupBy) > 0 { limitQuery, err := qb.options.BuildTraceQuery(start, end, compositeQuery.PanelType, query, - keys, tracesV3.Options{GraphLimitQtype: constants.FirstQueryGraphLimit, PreferRPM: PreferRPMFeatureEnabled}) + tracesV3.Options{GraphLimitQtype: constants.FirstQueryGraphLimit, PreferRPM: PreferRPMFeatureEnabled}) if err != nil { return nil, err } placeholderQuery, err := qb.options.BuildTraceQuery(start, end, compositeQuery.PanelType, - query, keys, tracesV3.Options{GraphLimitQtype: constants.SecondQueryGraphLimit, PreferRPM: PreferRPMFeatureEnabled}) + query, tracesV3.Options{GraphLimitQtype: constants.SecondQueryGraphLimit, PreferRPM: PreferRPMFeatureEnabled}) if err != nil { return nil, err } @@ -213,7 +208,7 @@ func (qb *QueryBuilder) PrepareQueries(params *v3.QueryRangeParamsV3, args ...in queries[queryName] = query } else { queryString, err := qb.options.BuildTraceQuery(start, end, compositeQuery.PanelType, - query, keys, tracesV3.Options{PreferRPM: PreferRPMFeatureEnabled, GraphLimitQtype: ""}) + query, tracesV3.Options{PreferRPM: PreferRPMFeatureEnabled, GraphLimitQtype: ""}) if err != nil { return nil, err } @@ -222,18 +217,18 @@ func (qb *QueryBuilder) PrepareQueries(params *v3.QueryRangeParamsV3, args ...in case v3.DataSourceLogs: // for ts query with limit replace it as it is already formed if compositeQuery.PanelType == v3.PanelTypeGraph && query.Limit > 0 && len(query.GroupBy) > 0 { - limitQuery, err := qb.options.BuildLogQuery(start, end, compositeQuery.QueryType, compositeQuery.PanelType, query, logsV3.Options{GraphLimitQtype: constants.FirstQueryGraphLimit, PreferRPM: PreferRPMFeatureEnabled}) + limitQuery, err := qb.options.BuildLogQuery(start, end, compositeQuery.QueryType, compositeQuery.PanelType, query, v3.LogQBOptions{GraphLimitQtype: constants.FirstQueryGraphLimit, PreferRPM: PreferRPMFeatureEnabled}) if err != nil { return nil, err } - placeholderQuery, err := qb.options.BuildLogQuery(start, end, compositeQuery.QueryType, compositeQuery.PanelType, query, logsV3.Options{GraphLimitQtype: constants.SecondQueryGraphLimit, PreferRPM: PreferRPMFeatureEnabled}) + placeholderQuery, err := qb.options.BuildLogQuery(start, end, compositeQuery.QueryType, compositeQuery.PanelType, query, v3.LogQBOptions{GraphLimitQtype: constants.SecondQueryGraphLimit, PreferRPM: PreferRPMFeatureEnabled}) if err != nil { return nil, err } query := fmt.Sprintf(placeholderQuery, limitQuery) queries[queryName] = query } else { - queryString, err := qb.options.BuildLogQuery(start, end, compositeQuery.QueryType, compositeQuery.PanelType, query, logsV3.Options{PreferRPM: PreferRPMFeatureEnabled, GraphLimitQtype: ""}) + queryString, err := qb.options.BuildLogQuery(start, end, compositeQuery.QueryType, compositeQuery.PanelType, query, v3.LogQBOptions{PreferRPM: PreferRPMFeatureEnabled, GraphLimitQtype: ""}) if err != nil { return nil, err } diff --git a/pkg/query-service/app/queryBuilder/query_builder_test.go b/pkg/query-service/app/queryBuilder/query_builder_test.go index cca8e4a028..52af7af780 100644 --- a/pkg/query-service/app/queryBuilder/query_builder_test.go +++ b/pkg/query-service/app/queryBuilder/query_builder_test.go @@ -6,6 +6,7 @@ import ( "github.com/stretchr/testify/require" logsV3 "go.signoz.io/signoz/pkg/query-service/app/logs/v3" + logsV4 "go.signoz.io/signoz/pkg/query-service/app/logs/v4" metricsv3 "go.signoz.io/signoz/pkg/query-service/app/metrics/v3" "go.signoz.io/signoz/pkg/query-service/constants" "go.signoz.io/signoz/pkg/query-service/featureManager" @@ -585,6 +586,217 @@ func TestLogsQueryWithFormula(t *testing.T) { } +var testLogsWithFormulaV2 = []struct { + Name string + Query *v3.QueryRangeParamsV3 + ExpectedQuery string +}{ + { + Name: "test formula without dot in filter and group by attribute", + Query: &v3.QueryRangeParamsV3{ + Start: 1702979275000000000, + End: 1702981075000000000, + CompositeQuery: &v3.CompositeQuery{ + QueryType: v3.QueryTypeBuilder, + PanelType: v3.PanelTypeGraph, + BuilderQueries: map[string]*v3.BuilderQuery{ + "A": { + QueryName: "A", + StepInterval: 60, + DataSource: v3.DataSourceLogs, + Filters: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{ + {Key: v3.AttributeKey{Key: "key_1", DataType: v3.AttributeKeyDataTypeBool, Type: v3.AttributeKeyTypeTag}, Value: true, Operator: v3.FilterOperatorEqual}, + }}, + AggregateOperator: v3.AggregateOperatorCount, + Expression: "A", + OrderBy: []v3.OrderBy{ + { + ColumnName: "timestamp", + Order: "desc", + }, + }, + GroupBy: []v3.AttributeKey{ + {Key: "key_1", DataType: v3.AttributeKeyDataTypeBool, Type: v3.AttributeKeyTypeTag}, + }, + }, + "B": { + QueryName: "B", + StepInterval: 60, + DataSource: v3.DataSourceLogs, + Filters: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{ + {Key: v3.AttributeKey{Key: "key_2", DataType: v3.AttributeKeyDataTypeBool, Type: v3.AttributeKeyTypeTag}, Value: true, Operator: v3.FilterOperatorEqual}, + }}, + AggregateOperator: v3.AggregateOperatorCount, + Expression: "B", + OrderBy: []v3.OrderBy{ + { + ColumnName: "timestamp", + Order: "desc", + }, + }, + GroupBy: []v3.AttributeKey{ + {Key: "key_1", DataType: v3.AttributeKeyDataTypeBool, Type: v3.AttributeKeyTypeTag}, + }, + }, + "C": { + QueryName: "C", + Expression: "A + B", + }, + }, + }, + }, + ExpectedQuery: "SELECT A.`key_1` as `key_1`, A.`ts` as `ts`, A.value + B.value as value FROM " + + "(SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts, attributes_bool['key_1'] as `key_1`, toFloat64(count(*)) as value from " + + "signoz_logs.distributed_logs_v2 where (timestamp >= 1702979275000000000 AND timestamp <= 1702981075000000000) AND (ts_bucket_start >= 1702977475 AND ts_bucket_start <= 1702981075) " + + "AND attributes_bool['key_1'] = true AND mapContains(attributes_bool, 'key_1') AND mapContains(attributes_bool, 'key_1') group by `key_1`,ts order by value DESC) as A INNER JOIN (SELECT " + + "toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts, attributes_bool['key_1'] as `key_1`, toFloat64(count(*)) as value " + + "from signoz_logs.distributed_logs_v2 where (timestamp >= 1702979275000000000 AND timestamp <= 1702981075000000000) AND (ts_bucket_start >= 1702977475 AND ts_bucket_start <= 1702981075) " + + "AND attributes_bool['key_2'] = true AND mapContains(attributes_bool, 'key_2') AND mapContains(attributes_bool, 'key_1') group by `key_1`,ts order by value DESC) as B ON A.`key_1` = B.`key_1` AND A.`ts` = B.`ts`", + }, + { + Name: "test formula with dot in filter and group by attribute", + Query: &v3.QueryRangeParamsV3{ + Start: 1702979056000000000, + End: 1702982656000000000, + CompositeQuery: &v3.CompositeQuery{ + QueryType: v3.QueryTypeBuilder, + PanelType: v3.PanelTypeTable, + BuilderQueries: map[string]*v3.BuilderQuery{ + "A": { + QueryName: "A", + StepInterval: 60, + DataSource: v3.DataSourceLogs, + Filters: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{ + {Key: v3.AttributeKey{Key: "key1.1", DataType: v3.AttributeKeyDataTypeBool, Type: v3.AttributeKeyTypeTag}, Value: true, Operator: v3.FilterOperatorEqual}, + }}, + AggregateOperator: v3.AggregateOperatorCount, + Expression: "A", + OrderBy: []v3.OrderBy{ + { + ColumnName: "timestamp", + Order: "desc", + }, + }, + GroupBy: []v3.AttributeKey{ + {Key: "key1.1", DataType: v3.AttributeKeyDataTypeBool, Type: v3.AttributeKeyTypeTag}, + }, + }, + "B": { + QueryName: "B", + StepInterval: 60, + DataSource: v3.DataSourceLogs, + Filters: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{ + {Key: v3.AttributeKey{Key: "key1.2", DataType: v3.AttributeKeyDataTypeBool, Type: v3.AttributeKeyTypeTag}, Value: true, Operator: v3.FilterOperatorEqual}, + }}, + AggregateOperator: v3.AggregateOperatorCount, + Expression: "B", + OrderBy: []v3.OrderBy{ + { + ColumnName: "timestamp", + Order: "desc", + }, + }, + GroupBy: []v3.AttributeKey{ + {Key: "key1.1", DataType: v3.AttributeKeyDataTypeBool, Type: v3.AttributeKeyTypeTag}, + }, + }, + "C": { + QueryName: "C", + Expression: "A + B", + }, + }, + }, + }, + ExpectedQuery: "SELECT A.`key1.1` as `key1.1`, A.`ts` as `ts`, A.value + B.value as value FROM (SELECT attributes_bool['key1.1'] as `key1.1`, " + + "toFloat64(count(*)) as value from signoz_logs.distributed_logs_v2 where (timestamp >= 1702979056000000000 AND timestamp <= 1702982656000000000) AND (ts_bucket_start >= 1702977256 AND ts_bucket_start <= 1702982656) " + + "AND attributes_bool['key1.1'] = true AND mapContains(attributes_bool, 'key1.1') AND mapContains(attributes_bool, 'key1.1') group by `key1.1` order by value DESC) as A INNER JOIN (SELECT " + + "attributes_bool['key1.1'] as `key1.1`, toFloat64(count(*)) as value from signoz_logs.distributed_logs_v2 where (timestamp >= 1702979056000000000 AND timestamp <= 1702982656000000000) " + + "AND (ts_bucket_start >= 1702977256 AND ts_bucket_start <= 1702982656) AND attributes_bool['key1.2'] = true AND mapContains(attributes_bool, 'key1.2') AND " + + "mapContains(attributes_bool, 'key1.1') group by `key1.1` order by value DESC) as B ON A.`key1.1` = B.`key1.1` AND A.`ts` = B.`ts`", + }, + { + Name: "test formula with dot in filter and group by materialized attribute", + Query: &v3.QueryRangeParamsV3{ + Start: 1702980884000000000, + End: 1702984484000000000, + CompositeQuery: &v3.CompositeQuery{ + QueryType: v3.QueryTypeBuilder, + PanelType: v3.PanelTypeGraph, + BuilderQueries: map[string]*v3.BuilderQuery{ + "A": { + QueryName: "A", + StepInterval: 60, + DataSource: v3.DataSourceLogs, + Filters: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{ + {Key: v3.AttributeKey{Key: "key_2", DataType: v3.AttributeKeyDataTypeBool, Type: v3.AttributeKeyTypeTag, IsColumn: true}, Value: true, Operator: v3.FilterOperatorEqual}, + }}, + AggregateOperator: v3.AggregateOperatorCount, + Expression: "A", + OrderBy: []v3.OrderBy{ + { + ColumnName: "timestamp", + Order: "desc", + }, + }, + GroupBy: []v3.AttributeKey{ + {Key: "key1.1", DataType: v3.AttributeKeyDataTypeBool, Type: v3.AttributeKeyTypeTag, IsColumn: true}, + }, + }, + "B": { + QueryName: "B", + StepInterval: 60, + DataSource: v3.DataSourceLogs, + Filters: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{ + {Key: v3.AttributeKey{Key: "key_1", DataType: v3.AttributeKeyDataTypeBool, Type: v3.AttributeKeyTypeTag}, Value: true, Operator: v3.FilterOperatorEqual}, + }}, + AggregateOperator: v3.AggregateOperatorCount, + Expression: "B", + OrderBy: []v3.OrderBy{ + { + ColumnName: "timestamp", + Order: "desc", + }, + }, + GroupBy: []v3.AttributeKey{ + {Key: "key1.1", DataType: v3.AttributeKeyDataTypeBool, Type: v3.AttributeKeyTypeTag, IsColumn: true}, + }, + }, + "C": { + QueryName: "C", + Expression: "A - B", + }, + }, + }, + }, + ExpectedQuery: "SELECT A.`key1.1` as `key1.1`, A.`ts` as `ts`, A.value - B.value as value FROM (SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts, " + + "`attribute_bool_key1$$1` as `key1.1`, toFloat64(count(*)) as value from signoz_logs.distributed_logs_v2 where (timestamp >= 1702980884000000000 AND timestamp <= 1702984484000000000) AND " + + "(ts_bucket_start >= 1702979084 AND ts_bucket_start <= 1702984484) AND `attribute_bool_key_2` = true AND `attribute_bool_key1$$1_exists`=true group by `key1.1`,ts order by value DESC) as " + + "A INNER JOIN (SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts, `attribute_bool_key1$$1` as `key1.1`, toFloat64(count(*)) as value from " + + "signoz_logs.distributed_logs_v2 where (timestamp >= 1702980884000000000 AND timestamp <= 1702984484000000000) AND (ts_bucket_start >= 1702979084 AND ts_bucket_start <= 1702984484) AND " + + "attributes_bool['key_1'] = true AND mapContains(attributes_bool, 'key_1') AND `attribute_bool_key1$$1_exists`=true group by `key1.1`,ts order by value DESC) as B " + + "ON A.`key1.1` = B.`key1.1` AND A.`ts` = B.`ts`", + }, +} + +func TestLogsQueryWithFormulaV2(t *testing.T) { + t.Parallel() + + qbOptions := QueryBuilderOptions{ + BuildLogQuery: logsV4.PrepareLogsQuery, + } + fm := featureManager.StartManager() + qb := NewQueryBuilder(qbOptions, fm) + + for _, test := range testLogsWithFormulaV2 { + t.Run(test.Name, func(t *testing.T) { + queries, err := qb.PrepareQueries(test.Query) + require.NoError(t, err) + require.Equal(t, test.ExpectedQuery, queries["C"]) + }) + } + +} + func TestGenerateCacheKeysMetricsBuilder(t *testing.T) { testCases := []struct { name string diff --git a/pkg/query-service/app/server.go b/pkg/query-service/app/server.go index a1fc0dd329..22d52b9884 100644 --- a/pkg/query-service/app/server.go +++ b/pkg/query-service/app/server.go @@ -710,7 +710,7 @@ func (s *Server) Stop() error { } func makeRulesManager( - promConfigPath, + _, alertManagerURL string, ruleRepoURL string, db *sqlx.DB, diff --git a/pkg/query-service/app/traces/v3/query_builder.go b/pkg/query-service/app/traces/v3/query_builder.go index 41f64548ad..c66b95ea56 100644 --- a/pkg/query-service/app/traces/v3/query_builder.go +++ b/pkg/query-service/app/traces/v3/query_builder.go @@ -58,8 +58,7 @@ var tracesOperatorMappingV3 = map[v3.FilterOperator]string{ v3.FilterOperatorNotExists: "NOT has(%s%s, '%s')", } -func getColumnName(key v3.AttributeKey, keys map[string]v3.AttributeKey) string { - key = enrichKeyWithMetadata(key, keys) +func getColumnName(key v3.AttributeKey) string { if key.IsColumn { return key.Key } @@ -102,13 +101,13 @@ func enrichKeyWithMetadata(key v3.AttributeKey, keys map[string]v3.AttributeKey) } // getSelectLabels returns the select labels for the query based on groupBy and aggregateOperator -func getSelectLabels(aggregatorOperator v3.AggregateOperator, groupBy []v3.AttributeKey, keys map[string]v3.AttributeKey) string { +func getSelectLabels(aggregatorOperator v3.AggregateOperator, groupBy []v3.AttributeKey) string { var selectLabels string if aggregatorOperator == v3.AggregateOperatorNoOp { selectLabels = "" } else { for _, tag := range groupBy { - filterName := getColumnName(tag, keys) + filterName := getColumnName(tag) selectLabels += fmt.Sprintf(" %s as `%s`,", filterName, tag.Key) } } @@ -127,10 +126,10 @@ func getSelectKeys(aggregatorOperator v3.AggregateOperator, groupBy []v3.Attribu return strings.Join(selectLabels, ",") } -func getSelectColumns(sc []v3.AttributeKey, keys map[string]v3.AttributeKey) string { +func getSelectColumns(sc []v3.AttributeKey) string { var columns []string for _, tag := range sc { - columnName := getColumnName(tag, keys) + columnName := getColumnName(tag) columns = append(columns, fmt.Sprintf("%s as `%s` ", columnName, tag.Key)) } return strings.Join(columns, ",") @@ -150,20 +149,19 @@ func getZerosForEpochNano(epoch int64) int64 { return int64(math.Pow(10, float64(19-count))) } -func buildTracesFilterQuery(fs *v3.FilterSet, keys map[string]v3.AttributeKey) (string, error) { +func buildTracesFilterQuery(fs *v3.FilterSet) (string, error) { var conditions []string if fs != nil && len(fs.Items) != 0 { for _, item := range fs.Items { val := item.Value // generate the key - columnName := getColumnName(item.Key, keys) + columnName := getColumnName(item.Key) var fmtVal string - key := enrichKeyWithMetadata(item.Key, keys) item.Operator = v3.FilterOperator(strings.ToLower(strings.TrimSpace(string(item.Operator)))) if item.Operator != v3.FilterOperatorExists && item.Operator != v3.FilterOperatorNotExists { var err error - val, err = utils.ValidateAndCastValue(val, key.DataType) + val, err = utils.ValidateAndCastValue(val, item.Key.DataType) if err != nil { return "", fmt.Errorf("invalid value for key %s: %v", item.Key.Key, err) } @@ -179,15 +177,15 @@ func buildTracesFilterQuery(fs *v3.FilterSet, keys map[string]v3.AttributeKey) ( case v3.FilterOperatorRegex, v3.FilterOperatorNotRegex: conditions = append(conditions, fmt.Sprintf(operator, columnName, fmtVal)) case v3.FilterOperatorExists, v3.FilterOperatorNotExists: - if key.IsColumn { - subQuery, err := existsSubQueryForFixedColumn(key, item.Operator) + if item.Key.IsColumn { + subQuery, err := existsSubQueryForFixedColumn(item.Key, item.Operator) if err != nil { return "", err } conditions = append(conditions, subQuery) } else { - columnType, columnDataType := getClickhouseTracesColumnDataTypeAndType(key) - conditions = append(conditions, fmt.Sprintf(operator, columnDataType, columnType, key.Key)) + columnType, columnDataType := getClickhouseTracesColumnDataTypeAndType(item.Key) + conditions = append(conditions, fmt.Sprintf(operator, columnDataType, columnType, item.Key.Key)) } default: @@ -218,12 +216,11 @@ func existsSubQueryForFixedColumn(key v3.AttributeKey, op v3.FilterOperator) (st } } -func handleEmptyValuesInGroupBy(keys map[string]v3.AttributeKey, groupBy []v3.AttributeKey) (string, error) { +func handleEmptyValuesInGroupBy(groupBy []v3.AttributeKey) (string, error) { filterItems := []v3.FilterItem{} if len(groupBy) != 0 { for _, item := range groupBy { - key := enrichKeyWithMetadata(item, keys) - if !key.IsColumn { + if !item.IsColumn { filterItems = append(filterItems, v3.FilterItem{ Key: item, Operator: v3.FilterOperatorExists, @@ -236,21 +233,21 @@ func handleEmptyValuesInGroupBy(keys map[string]v3.AttributeKey, groupBy []v3.At Operator: "AND", Items: filterItems, } - return buildTracesFilterQuery(&filterSet, keys) + return buildTracesFilterQuery(&filterSet) } return "", nil } -func buildTracesQuery(start, end, step int64, mq *v3.BuilderQuery, tableName string, keys map[string]v3.AttributeKey, panelType v3.PanelType, options Options) (string, error) { +func buildTracesQuery(start, end, step int64, mq *v3.BuilderQuery, _ string, panelType v3.PanelType, options Options) (string, error) { - filterSubQuery, err := buildTracesFilterQuery(mq.Filters, keys) + filterSubQuery, err := buildTracesFilterQuery(mq.Filters) if err != nil { return "", err } // timerange will be sent in epoch millisecond spanIndexTableTimeFilter := fmt.Sprintf("(timestamp >= '%d' AND timestamp <= '%d')", start*getZerosForEpochNano(start), end*getZerosForEpochNano(end)) - selectLabels := getSelectLabels(mq.AggregateOperator, mq.GroupBy, keys) + selectLabels := getSelectLabels(mq.AggregateOperator, mq.GroupBy) having := having(mq.Having) if having != "" { @@ -283,7 +280,7 @@ func buildTracesQuery(start, end, step int64, mq *v3.BuilderQuery, tableName str queryTmpl = "SELECT " + getSelectKeys(mq.AggregateOperator, mq.GroupBy) + " from (" + queryTmpl + ")" } - emptyValuesInGroupByFilter, err := handleEmptyValuesInGroupBy(keys, mq.GroupBy) + emptyValuesInGroupByFilter, err := handleEmptyValuesInGroupBy(mq.GroupBy) if err != nil { return "", err } @@ -293,8 +290,7 @@ func buildTracesQuery(start, end, step int64, mq *v3.BuilderQuery, tableName str if groupBy != "" { groupBy = " group by " + groupBy } - enrichedOrderBy := enrichOrderBy(mq.OrderBy, keys) - orderBy := orderByAttributeKeyTags(panelType, enrichedOrderBy, mq.GroupBy, keys) + orderBy := orderByAttributeKeyTags(panelType, mq.OrderBy, mq.GroupBy) if orderBy != "" { orderBy = " order by " + orderBy } @@ -305,7 +301,7 @@ func buildTracesQuery(start, end, step int64, mq *v3.BuilderQuery, tableName str aggregationKey := "" if mq.AggregateAttribute.Key != "" { - aggregationKey = getColumnName(mq.AggregateAttribute, keys) + aggregationKey = getColumnName(mq.AggregateAttribute) } switch mq.AggregateOperator { @@ -342,14 +338,13 @@ func buildTracesQuery(start, end, step int64, mq *v3.BuilderQuery, tableName str return query, nil case v3.AggregateOperatorCount: if mq.AggregateAttribute.Key != "" { - key := enrichKeyWithMetadata(mq.AggregateAttribute, keys) - if key.IsColumn { - subQuery, err := existsSubQueryForFixedColumn(key, v3.FilterOperatorExists) + if mq.AggregateAttribute.IsColumn { + subQuery, err := existsSubQueryForFixedColumn(mq.AggregateAttribute, v3.FilterOperatorExists) if err == nil { filterSubQuery = fmt.Sprintf("%s AND %s", filterSubQuery, subQuery) } } else { - columnType, columnDataType := getClickhouseTracesColumnDataTypeAndType(key) + columnType, columnDataType := getClickhouseTracesColumnDataTypeAndType(mq.AggregateAttribute) filterSubQuery = fmt.Sprintf("%s AND has(%s%s, '%s')", filterSubQuery, columnDataType, columnType, mq.AggregateAttribute.Key) } } @@ -373,7 +368,7 @@ func buildTracesQuery(start, end, step int64, mq *v3.BuilderQuery, tableName str if len(mq.SelectColumns) == 0 { return "", fmt.Errorf("select columns cannot be empty for panelType %s", panelType) } - selectColumns := getSelectColumns(mq.SelectColumns, keys) + selectColumns := getSelectColumns(mq.SelectColumns) queryNoOpTmpl := fmt.Sprintf("SELECT timestamp as timestamp_datetime, spanID, traceID, "+"%s ", selectColumns) + "from " + constants.SIGNOZ_TRACE_DBNAME + "." + constants.SIGNOZ_SPAN_INDEX_TABLENAME + " where %s %s" + "%s" query = fmt.Sprintf(queryNoOpTmpl, spanIndexTableTimeFilter, filterSubQuery, orderBy) } else { @@ -423,7 +418,7 @@ func groupByAttributeKeyTags(panelType v3.PanelType, graphLimitQtype string, tag // orderBy returns a string of comma separated tags for order by clause // if there are remaining items which are not present in tags they are also added // if the order is not specified, it defaults to ASC -func orderBy(panelType v3.PanelType, items []v3.OrderBy, tagLookup map[string]struct{}, keys map[string]v3.AttributeKey) []string { +func orderBy(panelType v3.PanelType, items []v3.OrderBy, tagLookup map[string]struct{}) []string { var orderBy []string for _, item := range items { @@ -433,7 +428,7 @@ func orderBy(panelType v3.PanelType, items []v3.OrderBy, tagLookup map[string]st orderBy = append(orderBy, fmt.Sprintf("`%s` %s", item.ColumnName, item.Order)) } else if panelType == v3.PanelTypeList { attr := v3.AttributeKey{Key: item.ColumnName, DataType: item.DataType, Type: item.Type, IsColumn: item.IsColumn} - name := getColumnName(attr, keys) + name := getColumnName(attr) if item.IsColumn { orderBy = append(orderBy, fmt.Sprintf("`%s` %s", name, item.Order)) } else { @@ -445,13 +440,13 @@ func orderBy(panelType v3.PanelType, items []v3.OrderBy, tagLookup map[string]st return orderBy } -func orderByAttributeKeyTags(panelType v3.PanelType, items []v3.OrderBy, tags []v3.AttributeKey, keys map[string]v3.AttributeKey) string { +func orderByAttributeKeyTags(panelType v3.PanelType, items []v3.OrderBy, tags []v3.AttributeKey) string { tagLookup := map[string]struct{}{} for _, v := range tags { tagLookup[v.Key] = struct{}{} } - orderByArray := orderBy(panelType, items, tagLookup, keys) + orderByArray := orderBy(panelType, items, tagLookup) if len(orderByArray) == 0 { if panelType == v3.PanelTypeList { @@ -474,7 +469,7 @@ func having(items []v3.Having) string { return strings.Join(having, " AND ") } -func reduceToQuery(query string, reduceTo v3.ReduceToOperator, aggregateOperator v3.AggregateOperator) (string, error) { +func reduceToQuery(query string, reduceTo v3.ReduceToOperator, _ v3.AggregateOperator) (string, error) { var groupBy string switch reduceTo { @@ -508,13 +503,13 @@ func addOffsetToQuery(query string, offset uint64) string { // PrepareTracesQuery returns the query string for traces // start and end are in epoch millisecond // step is in seconds -func PrepareTracesQuery(start, end int64, panelType v3.PanelType, mq *v3.BuilderQuery, keys map[string]v3.AttributeKey, options Options) (string, error) { +func PrepareTracesQuery(start, end int64, panelType v3.PanelType, mq *v3.BuilderQuery, options Options) (string, error) { // adjust the start and end time to the step interval start = start - (start % (mq.StepInterval * 1000)) end = end - (end % (mq.StepInterval * 1000)) if options.GraphLimitQtype == constants.FirstQueryGraphLimit { // give me just the group by names - query, err := buildTracesQuery(start, end, mq.StepInterval, mq, constants.SIGNOZ_SPAN_INDEX_TABLENAME, keys, panelType, options) + query, err := buildTracesQuery(start, end, mq.StepInterval, mq, constants.SIGNOZ_SPAN_INDEX_TABLENAME, panelType, options) if err != nil { return "", err } @@ -522,14 +517,14 @@ func PrepareTracesQuery(start, end int64, panelType v3.PanelType, mq *v3.Builder return query, nil } else if options.GraphLimitQtype == constants.SecondQueryGraphLimit { - query, err := buildTracesQuery(start, end, mq.StepInterval, mq, constants.SIGNOZ_SPAN_INDEX_TABLENAME, keys, panelType, options) + query, err := buildTracesQuery(start, end, mq.StepInterval, mq, constants.SIGNOZ_SPAN_INDEX_TABLENAME, panelType, options) if err != nil { return "", err } return query, nil } - query, err := buildTracesQuery(start, end, mq.StepInterval, mq, constants.SIGNOZ_SPAN_INDEX_TABLENAME, keys, panelType, options) + query, err := buildTracesQuery(start, end, mq.StepInterval, mq, constants.SIGNOZ_SPAN_INDEX_TABLENAME, panelType, options) if err != nil { return "", err } @@ -545,3 +540,34 @@ func PrepareTracesQuery(start, end int64, panelType v3.PanelType, mq *v3.Builder } return query, err } + +func Enrich(params *v3.QueryRangeParamsV3, keys map[string]v3.AttributeKey) { + if params.CompositeQuery.QueryType == v3.QueryTypeBuilder { + for _, query := range params.CompositeQuery.BuilderQueries { + if query.DataSource == v3.DataSourceTraces { + EnrichTracesQuery(query, keys) + } + } + } +} + +func EnrichTracesQuery(query *v3.BuilderQuery, keys map[string]v3.AttributeKey) { + // enrich aggregate attribute + query.AggregateAttribute = enrichKeyWithMetadata(query.AggregateAttribute, keys) + // enrich filter items + if query.Filters != nil && len(query.Filters.Items) > 0 { + for idx, filter := range query.Filters.Items { + query.Filters.Items[idx].Key = enrichKeyWithMetadata(filter.Key, keys) + } + } + // enrich group by + for idx, groupBy := range query.GroupBy { + query.GroupBy[idx] = enrichKeyWithMetadata(groupBy, keys) + } + // enrich order by + query.OrderBy = enrichOrderBy(query.OrderBy, keys) + // enrich select columns + for idx, selectColumn := range query.SelectColumns { + query.SelectColumns[idx] = enrichKeyWithMetadata(selectColumn, keys) + } +} diff --git a/pkg/query-service/app/traces/v3/query_builder_test.go b/pkg/query-service/app/traces/v3/query_builder_test.go index cf4eb21cfe..a1e7635a77 100644 --- a/pkg/query-service/app/traces/v3/query_builder_test.go +++ b/pkg/query-service/app/traces/v3/query_builder_test.go @@ -133,7 +133,7 @@ var buildFilterQueryData = []struct { func TestBuildTracesFilterQuery(t *testing.T) { for _, tt := range buildFilterQueryData { Convey("TestBuildTracesFilterQuery", t, func() { - query, err := buildTracesFilterQuery(tt.FilterSet, map[string]v3.AttributeKey{}) + query, err := buildTracesFilterQuery(tt.FilterSet) So(err, ShouldBeNil) So(query, ShouldEqual, tt.ExpectedFilter) }) @@ -169,7 +169,7 @@ var handleEmptyValuesInGroupByData = []struct { func TestBuildTracesHandleEmptyValuesInGroupBy(t *testing.T) { for _, tt := range handleEmptyValuesInGroupByData { Convey("TestBuildTracesHandleEmptyValuesInGroupBy", t, func() { - query, err := handleEmptyValuesInGroupBy(map[string]v3.AttributeKey{}, tt.GroupBy) + query, err := handleEmptyValuesInGroupBy(tt.GroupBy) So(err, ShouldBeNil) So(query, ShouldEqual, tt.ExpectedFilter) }) @@ -220,8 +220,9 @@ var testColumnName = []struct { func TestColumnName(t *testing.T) { for _, tt := range testColumnName { + tt.AttributeKey = enrichKeyWithMetadata(tt.AttributeKey, map[string]v3.AttributeKey{}) Convey("testColumnName", t, func() { - Column := getColumnName(tt.AttributeKey, map[string]v3.AttributeKey{}) + Column := getColumnName(tt.AttributeKey) So(Column, ShouldEqual, tt.ExpectedColumn) }) } @@ -265,7 +266,7 @@ var testGetSelectLabelsData = []struct { func TestGetSelectLabels(t *testing.T) { for _, tt := range testGetSelectLabelsData { Convey("testGetSelectLabelsData", t, func() { - selectLabels := getSelectLabels(tt.AggregateOperator, tt.GroupByTags, map[string]v3.AttributeKey{}) + selectLabels := getSelectLabels(tt.AggregateOperator, tt.GroupByTags) So(selectLabels, ShouldEqual, tt.SelectLabels) }) } @@ -304,7 +305,7 @@ var testGetSelectColumnsData = []struct { func TestGetSelectColumns(t *testing.T) { for _, tt := range testGetSelectColumnsData { Convey("testGetSelectColumnsData", t, func() { - selectColumns := getSelectColumns(tt.sc, map[string]v3.AttributeKey{}) + selectColumns := getSelectColumns(tt.sc) So(selectColumns, ShouldEqual, tt.SelectColumns) }) } @@ -464,13 +465,15 @@ var testOrderBy = []struct { } func TestOrderBy(t *testing.T) { + keys := map[string]v3.AttributeKey{ + "name": {Key: "name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag, IsColumn: true}, + "bytes": {Key: "bytes", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag, IsColumn: true}, + "response_time": {Key: "response_time", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag, IsColumn: false}, + } for _, tt := range testOrderBy { Convey("testOrderBy", t, func() { - res := orderByAttributeKeyTags(tt.PanelType, tt.Items, tt.Tags, map[string]v3.AttributeKey{ - "name": {Key: "name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag, IsColumn: true}, - "bytes": {Key: "bytes", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag, IsColumn: true}, - "response_time": {Key: "response_time", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag, IsColumn: false}, - }) + tt.Items = enrichOrderBy(tt.Items, keys) + res := orderByAttributeKeyTags(tt.PanelType, tt.Items, tt.Tags) So(res, ShouldResemble, tt.Result) }) } @@ -1171,11 +1174,24 @@ var testBuildTracesQueryData = []struct { } func TestBuildTracesQuery(t *testing.T) { + keys := map[string]v3.AttributeKey{ + "name": {Key: "name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag, IsColumn: true}, + } + for _, tt := range testBuildTracesQueryData { + tt.BuilderQuery.DataSource = v3.DataSourceTraces + params := &v3.QueryRangeParamsV3{ + Version: "v4", + CompositeQuery: &v3.CompositeQuery{ + QueryType: v3.QueryTypeBuilder, + BuilderQueries: map[string]*v3.BuilderQuery{ + "A": tt.BuilderQuery, + }, + }, + } + Enrich(params, keys) Convey("TestBuildTracesQuery", t, func() { - query, err := buildTracesQuery(tt.Start, tt.End, tt.BuilderQuery.StepInterval, tt.BuilderQuery, tt.TableName, map[string]v3.AttributeKey{ - "name": {Key: "name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag, IsColumn: true}, - }, tt.PanelType, tt.Options) + query, err := buildTracesQuery(tt.Start, tt.End, tt.BuilderQuery.StepInterval, tt.BuilderQuery, tt.TableName, tt.PanelType, tt.Options) So(err, ShouldBeNil) So(query, ShouldEqual, tt.ExpectedQuery) }) @@ -1400,7 +1416,7 @@ var testPrepTracesQueryData = []struct { func TestPrepareTracesQuery(t *testing.T) { for _, tt := range testPrepTracesQueryData { Convey("TestPrepareTracesQuery", t, func() { - query, err := PrepareTracesQuery(tt.Start, tt.End, tt.PanelType, tt.BuilderQuery, tt.Keys, tt.Options) + query, err := PrepareTracesQuery(tt.Start, tt.End, tt.PanelType, tt.BuilderQuery, tt.Options) So(err, ShouldBeNil) So(query, ShouldEqual, tt.ExpectedQuery) }) diff --git a/pkg/query-service/app/traces/v3/utils.go b/pkg/query-service/app/traces/v3/utils.go index cbd0940a16..7d4edd5223 100644 --- a/pkg/query-service/app/traces/v3/utils.go +++ b/pkg/query-service/app/traces/v3/utils.go @@ -8,6 +8,39 @@ import ( "go.uber.org/zap" ) +var TracesListViewDefaultSelectedColumns = []v3.AttributeKey{ + { + Key: "serviceName", + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeTag, + IsColumn: true, + }, + { + Key: "name", + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeTag, + IsColumn: true, + }, + { + Key: "durationNano", + DataType: v3.AttributeKeyDataTypeArrayFloat64, + Type: v3.AttributeKeyTypeTag, + IsColumn: true, + }, + { + Key: "httpMethod", + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeTag, + IsColumn: true, + }, + { + Key: "responseStatusCode", + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeTag, + IsColumn: true, + }, +} + // check if traceId filter is used in traces query and return the list of traceIds func TraceIdFilterUsedWithEqual(params *v3.QueryRangeParamsV3) (bool, []string) { compositeQuery := params.CompositeQuery diff --git a/pkg/query-service/common/query_range.go b/pkg/query-service/common/query_range.go index e0c675c50a..c352c7d9f2 100644 --- a/pkg/query-service/common/query_range.go +++ b/pkg/query-service/common/query_range.go @@ -1,10 +1,7 @@ package common import ( - "encoding/json" - "fmt" "math" - "net/url" "time" "go.signoz.io/signoz/pkg/query-service/constants" @@ -73,183 +70,3 @@ func LCMList(nums []int64) int64 { } return result } - -// TODO(srikanthccv): move the custom function in threshold_rule.go to here -func PrepareLinksToTraces(ts time.Time, filterItems []v3.FilterItem) string { - - start := ts.Add(-time.Minute * 15) - end := ts.Add(time.Minute * 15) - - // Traces list view expects time in nanoseconds - tr := v3.URLShareableTimeRange{ - Start: start.UnixNano(), - End: end.UnixNano(), - PageSize: 100, - } - - options := v3.URLShareableOptions{ - MaxLines: 2, - Format: "list", - SelectColumns: constants.TracesListViewDefaultSelectedColumns, - } - - period, _ := json.Marshal(tr) - urlEncodedTimeRange := url.QueryEscape(string(period)) - - urlData := v3.URLShareableCompositeQuery{ - QueryType: string(v3.QueryTypeBuilder), - Builder: v3.URLShareableBuilderQuery{ - QueryData: []v3.BuilderQuery{ - { - DataSource: v3.DataSourceTraces, - QueryName: "A", - AggregateOperator: v3.AggregateOperatorNoOp, - AggregateAttribute: v3.AttributeKey{}, - Filters: &v3.FilterSet{ - Items: filterItems, - Operator: "AND", - }, - Expression: "A", - Disabled: false, - Having: []v3.Having{}, - StepInterval: 60, - OrderBy: []v3.OrderBy{ - { - ColumnName: "timestamp", - Order: "desc", - }, - }, - }, - }, - QueryFormulas: make([]string, 0), - }, - } - - data, _ := json.Marshal(urlData) - compositeQuery := url.QueryEscape(url.QueryEscape(string(data))) - - optionsData, _ := json.Marshal(options) - urlEncodedOptions := url.QueryEscape(string(optionsData)) - - return fmt.Sprintf("compositeQuery=%s&timeRange=%s&startTime=%d&endTime=%d&options=%s", compositeQuery, urlEncodedTimeRange, tr.Start, tr.End, urlEncodedOptions) -} - -func PrepareLinksToLogs(ts time.Time, filterItems []v3.FilterItem) string { - start := ts.Add(-time.Minute * 15) - end := ts.Add(time.Minute * 15) - - // Logs list view expects time in milliseconds - // Logs list view expects time in milliseconds - tr := v3.URLShareableTimeRange{ - Start: start.UnixMilli(), - End: end.UnixMilli(), - PageSize: 100, - } - - options := v3.URLShareableOptions{ - MaxLines: 2, - Format: "list", - SelectColumns: []v3.AttributeKey{}, - } - - period, _ := json.Marshal(tr) - urlEncodedTimeRange := url.QueryEscape(string(period)) - - urlData := v3.URLShareableCompositeQuery{ - QueryType: string(v3.QueryTypeBuilder), - Builder: v3.URLShareableBuilderQuery{ - QueryData: []v3.BuilderQuery{ - { - DataSource: v3.DataSourceLogs, - QueryName: "A", - AggregateOperator: v3.AggregateOperatorNoOp, - AggregateAttribute: v3.AttributeKey{}, - Filters: &v3.FilterSet{ - Items: filterItems, - Operator: "AND", - }, - Expression: "A", - Disabled: false, - Having: []v3.Having{}, - StepInterval: 60, - OrderBy: []v3.OrderBy{ - { - ColumnName: "timestamp", - Order: "desc", - }, - }, - }, - }, - QueryFormulas: make([]string, 0), - }, - } - - data, _ := json.Marshal(urlData) - compositeQuery := url.QueryEscape(url.QueryEscape(string(data))) - - optionsData, _ := json.Marshal(options) - urlEncodedOptions := url.QueryEscape(string(optionsData)) - - return fmt.Sprintf("compositeQuery=%s&timeRange=%s&startTime=%d&endTime=%d&options=%s", compositeQuery, urlEncodedTimeRange, tr.Start, tr.End, urlEncodedOptions) -} - -// The following function is used to prepare the where clause for the query -// `lbls` contains the key value pairs of the labels from the result of the query -// We iterate over the where clause and replace the labels with the actual values -// There are two cases: -// 1. The label is present in the where clause -// 2. The label is not present in the where clause -// -// Example for case 2: -// Latency by serviceName without any filter -// In this case, for each service with latency > threshold we send a notification -// The expectation will be that clicking on the related traces for service A, will -// take us to the traces page with the filter serviceName=A -// So for all the missing labels in the where clause, we add them as key = value -// -// Example for case 1: -// Severity text IN (WARN, ERROR) -// In this case, the Severity text will appear in the `lbls` if it were part of the group -// by clause, in which case we replace it with the actual value for the notification -// i.e Severity text = WARN -// If the Severity text is not part of the group by clause, then we add it as it is -func PrepareFilters(labels map[string]string, filters []v3.FilterItem) []v3.FilterItem { - var filterItems []v3.FilterItem - - added := make(map[string]struct{}) - - for _, item := range filters { - exists := false - for key, value := range labels { - if item.Key.Key == key { - // if the label is present in the where clause, replace it with key = value - filterItems = append(filterItems, v3.FilterItem{ - Key: item.Key, - Operator: v3.FilterOperatorEqual, - Value: value, - }) - exists = true - added[key] = struct{}{} - break - } - } - - if !exists { - // if the label is not present in the where clause, add it as it is - filterItems = append(filterItems, item) - } - } - - // add the labels which are not present in the where clause - for key, value := range labels { - if _, ok := added[key]; !ok { - filterItems = append(filterItems, v3.FilterItem{ - Key: v3.AttributeKey{Key: key}, - Operator: v3.FilterOperatorEqual, - Value: value, - }) - } - } - - return filterItems -} diff --git a/pkg/query-service/constants/constants.go b/pkg/query-service/constants/constants.go index 71a1e39032..78ee31e1a1 100644 --- a/pkg/query-service/constants/constants.go +++ b/pkg/query-service/constants/constants.go @@ -401,39 +401,6 @@ const TIMESTAMP = "timestamp" const FirstQueryGraphLimit = "first_query_graph_limit" const SecondQueryGraphLimit = "second_query_graph_limit" -var TracesListViewDefaultSelectedColumns = []v3.AttributeKey{ - { - Key: "serviceName", - DataType: v3.AttributeKeyDataTypeString, - Type: v3.AttributeKeyTypeTag, - IsColumn: true, - }, - { - Key: "name", - DataType: v3.AttributeKeyDataTypeString, - Type: v3.AttributeKeyTypeTag, - IsColumn: true, - }, - { - Key: "durationNano", - DataType: v3.AttributeKeyDataTypeArrayFloat64, - Type: v3.AttributeKeyTypeTag, - IsColumn: true, - }, - { - Key: "httpMethod", - DataType: v3.AttributeKeyDataTypeString, - Type: v3.AttributeKeyTypeTag, - IsColumn: true, - }, - { - Key: "responseStatusCode", - DataType: v3.AttributeKeyDataTypeString, - Type: v3.AttributeKeyTypeTag, - IsColumn: true, - }, -} - const DefaultFilterSuggestionsAttributesLimit = 50 const MaxFilterSuggestionsAttributesLimit = 100 const DefaultFilterSuggestionsExamplesLimit = 2 diff --git a/pkg/query-service/contextlinks/links.go b/pkg/query-service/contextlinks/links.go new file mode 100644 index 0000000000..d0d8400e74 --- /dev/null +++ b/pkg/query-service/contextlinks/links.go @@ -0,0 +1,203 @@ +package contextlinks + +import ( + "encoding/json" + "fmt" + "net/url" + "time" + + tracesV3 "go.signoz.io/signoz/pkg/query-service/app/traces/v3" + v3 "go.signoz.io/signoz/pkg/query-service/model/v3" +) + +func PrepareLinksToTraces(start, end time.Time, filterItems []v3.FilterItem) string { + + // Traces list view expects time in nanoseconds + tr := v3.URLShareableTimeRange{ + Start: start.UnixNano(), + End: end.UnixNano(), + PageSize: 100, + } + + options := v3.URLShareableOptions{ + MaxLines: 2, + Format: "list", + SelectColumns: tracesV3.TracesListViewDefaultSelectedColumns, + } + + period, _ := json.Marshal(tr) + urlEncodedTimeRange := url.QueryEscape(string(period)) + + builderQuery := v3.BuilderQuery{ + DataSource: v3.DataSourceTraces, + QueryName: "A", + AggregateOperator: v3.AggregateOperatorNoOp, + AggregateAttribute: v3.AttributeKey{}, + Filters: &v3.FilterSet{ + Items: filterItems, + Operator: "AND", + }, + Expression: "A", + Disabled: false, + Having: []v3.Having{}, + StepInterval: 60, + OrderBy: []v3.OrderBy{ + { + ColumnName: "timestamp", + Order: "desc", + }, + }, + } + + urlData := v3.URLShareableCompositeQuery{ + QueryType: string(v3.QueryTypeBuilder), + Builder: v3.URLShareableBuilderQuery{ + QueryData: []v3.BuilderQuery{ + builderQuery, + }, + QueryFormulas: make([]string, 0), + }, + } + + data, _ := json.Marshal(urlData) + compositeQuery := url.QueryEscape(url.QueryEscape(string(data))) + + optionsData, _ := json.Marshal(options) + urlEncodedOptions := url.QueryEscape(string(optionsData)) + + return fmt.Sprintf("compositeQuery=%s&timeRange=%s&startTime=%d&endTime=%d&options=%s", compositeQuery, urlEncodedTimeRange, tr.Start, tr.End, urlEncodedOptions) +} + +func PrepareLinksToLogs(start, end time.Time, filterItems []v3.FilterItem) string { + + // Logs list view expects time in milliseconds + tr := v3.URLShareableTimeRange{ + Start: start.UnixMilli(), + End: end.UnixMilli(), + PageSize: 100, + } + + options := v3.URLShareableOptions{ + MaxLines: 2, + Format: "list", + SelectColumns: []v3.AttributeKey{}, + } + + period, _ := json.Marshal(tr) + urlEncodedTimeRange := url.QueryEscape(string(period)) + + builderQuery := v3.BuilderQuery{ + DataSource: v3.DataSourceLogs, + QueryName: "A", + AggregateOperator: v3.AggregateOperatorNoOp, + AggregateAttribute: v3.AttributeKey{}, + Filters: &v3.FilterSet{ + Items: filterItems, + Operator: "AND", + }, + Expression: "A", + Disabled: false, + Having: []v3.Having{}, + StepInterval: 60, + OrderBy: []v3.OrderBy{ + { + ColumnName: "timestamp", + Order: "desc", + }, + }, + } + + urlData := v3.URLShareableCompositeQuery{ + QueryType: string(v3.QueryTypeBuilder), + Builder: v3.URLShareableBuilderQuery{ + QueryData: []v3.BuilderQuery{ + builderQuery, + }, + QueryFormulas: make([]string, 0), + }, + } + + data, _ := json.Marshal(urlData) + compositeQuery := url.QueryEscape(url.QueryEscape(string(data))) + + optionsData, _ := json.Marshal(options) + urlEncodedOptions := url.QueryEscape(string(optionsData)) + + return fmt.Sprintf("compositeQuery=%s&timeRange=%s&startTime=%d&endTime=%d&options=%s", compositeQuery, urlEncodedTimeRange, tr.Start, tr.End, urlEncodedOptions) +} + +// The following function is used to prepare the where clause for the query +// `lbls` contains the key value pairs of the labels from the result of the query +// We iterate over the where clause and replace the labels with the actual values +// There are two cases: +// 1. The label is present in the where clause +// 2. The label is not present in the where clause +// +// Example for case 2: +// Latency by serviceName without any filter +// In this case, for each service with latency > threshold we send a notification +// The expectation will be that clicking on the related traces for service A, will +// take us to the traces page with the filter serviceName=A +// So for all the missing labels in the where clause, we add them as key = value +// +// Example for case 1: +// Severity text IN (WARN, ERROR) +// In this case, the Severity text will appear in the `lbls` if it were part of the group +// by clause, in which case we replace it with the actual value for the notification +// i.e Severity text = WARN +// If the Severity text is not part of the group by clause, then we add it as it is +func PrepareFilters(labels map[string]string, whereClauseItems []v3.FilterItem, groupByItems []v3.AttributeKey, keys map[string]v3.AttributeKey) []v3.FilterItem { + var filterItems []v3.FilterItem + + added := make(map[string]struct{}) + + for _, item := range whereClauseItems { + exists := false + for key, value := range labels { + if item.Key.Key == key { + // if the label is present in the where clause, replace it with key = value + filterItems = append(filterItems, v3.FilterItem{ + Key: item.Key, + Operator: v3.FilterOperatorEqual, + Value: value, + }) + exists = true + added[key] = struct{}{} + break + } + } + + if !exists { + // if there is no label for the filter item, add it as it is + filterItems = append(filterItems, item) + } + } + + // if there are labels which are not part of the where clause, but + // exist in the result, then they could be part of the group by clause + for key, value := range labels { + if _, ok := added[key]; !ok { + // start by taking the attribute key from the keys map, if not present, create a new one + attributeKey, ok := keys[key] + if !ok { + attributeKey = v3.AttributeKey{Key: key} + } + + // if there is a group by item with the same key, use that instead + for _, groupByItem := range groupByItems { + if groupByItem.Key == key { + attributeKey = groupByItem + break + } + } + + filterItems = append(filterItems, v3.FilterItem{ + Key: attributeKey, + Operator: v3.FilterOperatorEqual, + Value: value, + }) + } + } + + return filterItems +} diff --git a/pkg/query-service/dao/sqlite/connection.go b/pkg/query-service/dao/sqlite/connection.go index a4373d5ecd..a7e77334ef 100644 --- a/pkg/query-service/dao/sqlite/connection.go +++ b/pkg/query-service/dao/sqlite/connection.go @@ -105,6 +105,7 @@ func InitDB(dataSourceName string) (*ModelDaoSqlite, error) { telemetry.GetInstance().SetUserCountCallback(mds.GetUserCount) telemetry.GetInstance().SetUserRoleCallback(mds.GetUserRole) + telemetry.GetInstance().SetGetUsersCallback(mds.GetUsers) return mds, nil } diff --git a/pkg/query-service/integrations/alertManager/manager.go b/pkg/query-service/integrations/alertManager/manager.go index d80893010e..10db4debd7 100644 --- a/pkg/query-service/integrations/alertManager/manager.go +++ b/pkg/query-service/integrations/alertManager/manager.go @@ -24,42 +24,62 @@ type Manager interface { TestReceiver(receiver *Receiver) *model.ApiError } -func New(url string) (Manager, error) { - - if url == "" { - url = constants.GetAlertManagerApiPrefix() +func defaultOptions() []ManagerOptions { + return []ManagerOptions{ + WithURL(constants.GetAlertManagerApiPrefix()), + WithChannelApiPath(constants.AmChannelApiPath), } +} - urlParsed, err := neturl.Parse(url) - if err != nil { - return nil, err +type ManagerOptions func(m *manager) error + +func New(opts ...ManagerOptions) (Manager, error) { + m := &manager{} + + newOpts := defaultOptions() + newOpts = append(newOpts, opts...) + + for _, opt := range newOpts { + err := opt(m) + if err != nil { + return nil, err + } } - return &manager{ - url: url, - parsedURL: urlParsed, - }, nil + return m, nil } -type manager struct { - url string - parsedURL *neturl.URL +func WithURL(url string) ManagerOptions { + return func(m *manager) error { + m.url = url + parsedURL, err := neturl.Parse(url) + if err != nil { + return err + } + m.parsedURL = parsedURL + return nil + } } -func prepareAmChannelApiURL() string { - basePath := constants.GetAlertManagerApiPrefix() - AmChannelApiPath := constants.AmChannelApiPath - - if len(AmChannelApiPath) > 0 && rune(AmChannelApiPath[0]) == rune('/') { - AmChannelApiPath = AmChannelApiPath[1:] +func WithChannelApiPath(path string) ManagerOptions { + return func(m *manager) error { + m.channelApiPath = path + return nil } +} + +type manager struct { + url string + parsedURL *neturl.URL + channelApiPath string +} - return fmt.Sprintf("%s%s", basePath, AmChannelApiPath) +func (m *manager) prepareAmChannelApiURL() string { + return fmt.Sprintf("%s%s", m.url, m.channelApiPath) } -func prepareTestApiURL() string { - basePath := constants.GetAlertManagerApiPrefix() - return fmt.Sprintf("%s%s", basePath, "v1/testReceiver") +func (m *manager) prepareTestApiURL() string { + return fmt.Sprintf("%s%s", m.url, "v1/testReceiver") } func (m *manager) URL() *neturl.URL { @@ -79,7 +99,7 @@ func (m *manager) AddRoute(receiver *Receiver) *model.ApiError { receiverString, _ := json.Marshal(receiver) - amURL := prepareAmChannelApiURL() + amURL := m.prepareAmChannelApiURL() response, err := http.Post(amURL, contentType, bytes.NewBuffer(receiverString)) if err != nil { @@ -97,7 +117,7 @@ func (m *manager) AddRoute(receiver *Receiver) *model.ApiError { func (m *manager) EditRoute(receiver *Receiver) *model.ApiError { receiverString, _ := json.Marshal(receiver) - amURL := prepareAmChannelApiURL() + amURL := m.prepareAmChannelApiURL() req, err := http.NewRequest(http.MethodPut, amURL, bytes.NewBuffer(receiverString)) if err != nil { @@ -126,7 +146,7 @@ func (m *manager) DeleteRoute(name string) *model.ApiError { values := map[string]string{"name": name} requestData, _ := json.Marshal(values) - amURL := prepareAmChannelApiURL() + amURL := m.prepareAmChannelApiURL() req, err := http.NewRequest(http.MethodDelete, amURL, bytes.NewBuffer(requestData)) if err != nil { @@ -156,7 +176,7 @@ func (m *manager) TestReceiver(receiver *Receiver) *model.ApiError { receiverBytes, _ := json.Marshal(receiver) - amTestURL := prepareTestApiURL() + amTestURL := m.prepareTestApiURL() response, err := http.Post(amTestURL, contentType, bytes.NewBuffer(receiverBytes)) if err != nil { diff --git a/pkg/query-service/integrations/alertManager/notifier.go b/pkg/query-service/integrations/alertManager/notifier.go index e29879f10a..434e2bc112 100644 --- a/pkg/query-service/integrations/alertManager/notifier.go +++ b/pkg/query-service/integrations/alertManager/notifier.go @@ -295,7 +295,7 @@ func newAlertmanagerSet(urls []string, timeout time.Duration, logger log.Logger) ams := []Manager{} for _, u := range urls { - am, err := New(u) + am, err := New(WithURL(u)) if err != nil { level.Error(s.logger).Log(fmt.Sprintf("invalid alert manager url %s: %s", u, err)) } else { diff --git a/pkg/query-service/interfaces/interface.go b/pkg/query-service/interfaces/interface.go index cfb4f9159e..db2563edab 100644 --- a/pkg/query-service/interfaces/interface.go +++ b/pkg/query-service/interfaces/interface.go @@ -8,18 +8,11 @@ import ( "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/util/stats" - am "go.signoz.io/signoz/pkg/query-service/integrations/alertManager" "go.signoz.io/signoz/pkg/query-service/model" v3 "go.signoz.io/signoz/pkg/query-service/model/v3" ) type Reader interface { - GetChannel(id string) (*model.ChannelItem, *model.ApiError) - GetChannels() (*[]model.ChannelItem, *model.ApiError) - DeleteChannel(id string) *model.ApiError - CreateChannel(receiver *am.Receiver) (*am.Receiver, *model.ApiError) - EditChannel(receiver *am.Receiver, id string) (*am.Receiver, *model.ApiError) - GetInstantQueryMetricsResult(ctx context.Context, query *model.InstantQueryMetricsParams) (*promql.Result, *stats.QueryStats, *model.ApiError) GetQueryRangeResult(ctx context.Context, query *model.QueryRangeParams) (*promql.Result, *stats.QueryStats, *model.ApiError) GetServiceOverview(ctx context.Context, query *model.GetServiceOverviewParams, skipConfig *model.SkipConfig) (*[]model.ServiceOverviewItem, *model.ApiError) @@ -58,8 +51,6 @@ type Reader interface { SetTTL(ctx context.Context, ttlParams *model.TTLParams) (*model.SetTTLResponseItem, *model.ApiError) FetchTemporality(ctx context.Context, metricNames []string) (map[string]map[v3.Temporality]bool, error) - GetMetricResult(ctx context.Context, query string) ([]*model.Series, error) - GetMetricResultEE(ctx context.Context, query string) ([]*model.Series, string, error) GetMetricAggregateAttributes(ctx context.Context, req *v3.AggregateAttributeRequest) (*v3.AggregateAttributeResponse, error) GetMetricAttributeKeys(ctx context.Context, req *v3.FilterAttributeKeyRequest) (*v3.FilterAttributeKeyResponse, error) GetMetricAttributeValues(ctx context.Context, req *v3.FilterAttributeValueRequest) (*v3.FilterAttributeValueResponse, error) @@ -70,10 +61,9 @@ type Reader interface { // QB V3 metrics/traces/logs GetTimeSeriesResultV3(ctx context.Context, query string) ([]*v3.Series, error) GetListResultV3(ctx context.Context, query string) ([]*v3.Row, error) - LiveTailLogsV3(ctx context.Context, query string, timestampStart uint64, idStart string, client *v3.LogsLiveTailClient) + LiveTailLogsV3(ctx context.Context, query string, timestampStart uint64, idStart string, client *model.LogsLiveTailClient) + LiveTailLogsV4(ctx context.Context, query string, timestampStart uint64, idStart string, client *model.LogsLiveTailClientV2) - GetDashboardsInfo(ctx context.Context) (*model.DashboardsInfo, error) - GetSavedViewsInfo(ctx context.Context) (*model.SavedViewsInfo, error) GetTotalSpans(ctx context.Context) (uint64, error) GetTotalLogs(ctx context.Context) (uint64, error) GetTotalSamples(ctx context.Context) (uint64, error) @@ -92,7 +82,6 @@ type Reader interface { GetLogAttributeKeys(ctx context.Context, req *v3.FilterAttributeKeyRequest) (*v3.FilterAttributeKeyResponse, error) GetLogAttributeValues(ctx context.Context, req *v3.FilterAttributeValueRequest) (*v3.FilterAttributeValueResponse, error) GetLogAggregateAttributes(ctx context.Context, req *v3.AggregateAttributeRequest) (*v3.AggregateAttributeResponse, error) - GetUsers(ctx context.Context) ([]model.UserPayload, error) GetQBFilterSuggestionsForLogs( ctx context.Context, req *v3.QBFilterSuggestionsRequest, @@ -108,25 +97,25 @@ type Reader interface { GetMetricMetadata(context.Context, string, string) (*v3.MetricMetadataResponse, error) - AddRuleStateHistory(ctx context.Context, ruleStateHistory []v3.RuleStateHistory) error - GetOverallStateTransitions(ctx context.Context, ruleID string, params *v3.QueryRuleStateHistory) ([]v3.ReleStateItem, error) - ReadRuleStateHistoryByRuleID(ctx context.Context, ruleID string, params *v3.QueryRuleStateHistory) (*v3.RuleStateTimeline, error) - GetTotalTriggers(ctx context.Context, ruleID string, params *v3.QueryRuleStateHistory) (uint64, error) - GetTriggersByInterval(ctx context.Context, ruleID string, params *v3.QueryRuleStateHistory) (*v3.Series, error) - GetAvgResolutionTime(ctx context.Context, ruleID string, params *v3.QueryRuleStateHistory) (float64, error) - GetAvgResolutionTimeByInterval(ctx context.Context, ruleID string, params *v3.QueryRuleStateHistory) (*v3.Series, error) - ReadRuleStateHistoryTopContributorsByRuleID(ctx context.Context, ruleID string, params *v3.QueryRuleStateHistory) ([]v3.RuleStateHistoryContributor, error) - GetLastSavedRuleStateHistory(ctx context.Context, ruleID string) ([]v3.RuleStateHistory, error) + AddRuleStateHistory(ctx context.Context, ruleStateHistory []model.RuleStateHistory) error + GetOverallStateTransitions(ctx context.Context, ruleID string, params *model.QueryRuleStateHistory) ([]model.ReleStateItem, error) + ReadRuleStateHistoryByRuleID(ctx context.Context, ruleID string, params *model.QueryRuleStateHistory) (*model.RuleStateTimeline, error) + GetTotalTriggers(ctx context.Context, ruleID string, params *model.QueryRuleStateHistory) (uint64, error) + GetTriggersByInterval(ctx context.Context, ruleID string, params *model.QueryRuleStateHistory) (*v3.Series, error) + GetAvgResolutionTime(ctx context.Context, ruleID string, params *model.QueryRuleStateHistory) (float64, error) + GetAvgResolutionTimeByInterval(ctx context.Context, ruleID string, params *model.QueryRuleStateHistory) (*v3.Series, error) + ReadRuleStateHistoryTopContributorsByRuleID(ctx context.Context, ruleID string, params *model.QueryRuleStateHistory) ([]model.RuleStateHistoryContributor, error) + GetLastSavedRuleStateHistory(ctx context.Context, ruleID string) ([]model.RuleStateHistory, error) GetMinAndMaxTimestampForTraceID(ctx context.Context, traceID []string) (int64, int64, error) // Query Progress tracking helpers. ReportQueryStartForProgressTracking(queryId string) (reportQueryFinished func(), err *model.ApiError) - SubscribeToQueryProgress(queryId string) (<-chan v3.QueryProgress, func(), *model.ApiError) + SubscribeToQueryProgress(queryId string) (<-chan model.QueryProgress, func(), *model.ApiError) } type Querier interface { - QueryRange(context.Context, *v3.QueryRangeParamsV3, map[string]v3.AttributeKey) ([]*v3.Result, map[string]error, error) + QueryRange(context.Context, *v3.QueryRangeParamsV3) ([]*v3.Result, map[string]error, error) // test helpers QueriesExecuted() []string diff --git a/pkg/query-service/model/alerting.go b/pkg/query-service/model/alerting.go index 4d54f6ae34..944efecc12 100644 --- a/pkg/query-service/model/alerting.go +++ b/pkg/query-service/model/alerting.go @@ -3,8 +3,10 @@ package model import ( "database/sql/driver" "encoding/json" + "fmt" "github.com/pkg/errors" + v3 "go.signoz.io/signoz/pkg/query-service/model/v3" ) // AlertState denotes the state of an active alert. @@ -88,3 +90,104 @@ func (s *AlertState) Scan(value interface{}) error { func (s *AlertState) Value() (driver.Value, error) { return s.String(), nil } + +type LabelsString string + +func (l *LabelsString) MarshalJSON() ([]byte, error) { + lbls := make(map[string]string) + err := json.Unmarshal([]byte(*l), &lbls) + if err != nil { + return nil, err + } + return json.Marshal(lbls) +} + +func (l *LabelsString) Scan(src interface{}) error { + if data, ok := src.(string); ok { + *l = LabelsString(data) + } + return nil +} + +func (l LabelsString) String() string { + return string(l) +} + +type RuleStateTimeline struct { + Items []RuleStateHistory `json:"items"` + Total uint64 `json:"total"` + Labels map[string][]string `json:"labels"` +} + +type RuleStateHistory struct { + RuleID string `json:"ruleID" ch:"rule_id"` + RuleName string `json:"ruleName" ch:"rule_name"` + // One of ["normal", "firing"] + OverallState AlertState `json:"overallState" ch:"overall_state"` + OverallStateChanged bool `json:"overallStateChanged" ch:"overall_state_changed"` + // One of ["normal", "firing", "no_data", "muted"] + State AlertState `json:"state" ch:"state"` + StateChanged bool `json:"stateChanged" ch:"state_changed"` + UnixMilli int64 `json:"unixMilli" ch:"unix_milli"` + Labels LabelsString `json:"labels" ch:"labels"` + Fingerprint uint64 `json:"fingerprint" ch:"fingerprint"` + Value float64 `json:"value" ch:"value"` + + RelatedTracesLink string `json:"relatedTracesLink"` + RelatedLogsLink string `json:"relatedLogsLink"` +} + +type QueryRuleStateHistory struct { + Start int64 `json:"start"` + End int64 `json:"end"` + State string `json:"state"` + Filters *v3.FilterSet `json:"filters"` + Offset int64 `json:"offset"` + Limit int64 `json:"limit"` + Order string `json:"order"` +} + +func (r *QueryRuleStateHistory) Validate() error { + if r.Start == 0 || r.End == 0 { + return fmt.Errorf("start and end are required") + } + if r.Offset < 0 || r.Limit < 0 { + return fmt.Errorf("offset and limit must be greater than 0") + } + if r.Order != "asc" && r.Order != "desc" { + return fmt.Errorf("order must be asc or desc") + } + return nil +} + +type RuleStateHistoryContributor struct { + Fingerprint uint64 `json:"fingerprint" ch:"fingerprint"` + Labels LabelsString `json:"labels" ch:"labels"` + Count uint64 `json:"count" ch:"count"` + RelatedTracesLink string `json:"relatedTracesLink"` + RelatedLogsLink string `json:"relatedLogsLink"` +} + +type RuleStateTransition struct { + RuleID string `json:"ruleID" ch:"rule_id"` + State AlertState `json:"state" ch:"state"` + FiringTime int64 `json:"firingTime" ch:"firing_time"` + ResolutionTime int64 `json:"resolutionTime" ch:"resolution_time"` +} + +type ReleStateItem struct { + State AlertState `json:"state"` + Start int64 `json:"start"` + End int64 `json:"end"` +} + +type Stats struct { + TotalCurrentTriggers uint64 `json:"totalCurrentTriggers"` + TotalPastTriggers uint64 `json:"totalPastTriggers"` + CurrentTriggersSeries *v3.Series `json:"currentTriggersSeries"` + PastTriggersSeries *v3.Series `json:"pastTriggersSeries"` + CurrentAvgResolutionTime string `json:"currentAvgResolutionTime"` + PastAvgResolutionTime string `json:"pastAvgResolutionTime"` + CurrentAvgResolutionTimeSeries *v3.Series `json:"currentAvgResolutionTimeSeries"` + PastAvgResolutionTimeSeries *v3.Series `json:"pastAvgResolutionTimeSeries"` +} diff --git a/pkg/query-service/model/logs.go b/pkg/query-service/model/logs.go new file mode 100644 index 0000000000..0319581516 --- /dev/null +++ b/pkg/query-service/model/logs.go @@ -0,0 +1,75 @@ +package model + +import ( + "context" + "strings" + + v3 "go.signoz.io/signoz/pkg/query-service/model/v3" +) + +type LogsLiveTailClientV2 struct { + Name string + Logs chan *SignozLogV2 + Done chan *bool + Error chan error +} + +type LogsLiveTailClient struct { + Name string + Logs chan *SignozLog + Done chan *bool + Error chan error +} + +type QueryProgress struct { + ReadRows uint64 `json:"read_rows"` + + ReadBytes uint64 `json:"read_bytes"` + + ElapsedMs uint64 `json:"elapsed_ms"` +} + +func GetLogFieldsV3(ctx context.Context, queryRangeParams *v3.QueryRangeParamsV3, fields *GetFieldsResponse) map[string]v3.AttributeKey { + data := map[string]v3.AttributeKey{} + for _, query := range queryRangeParams.CompositeQuery.BuilderQueries { + if query.DataSource == v3.DataSourceLogs { + + // top level fields meta will always be present in the frontend. (can be support for that as enchancement) + getType := func(t string) (v3.AttributeKeyType, bool) { + if t == "attributes" { + return v3.AttributeKeyTypeTag, false + } else if t == "resources" { + return v3.AttributeKeyTypeResource, false + } + return "", true + } + + for _, selectedField := range fields.Selected { + fieldType, pass := getType(selectedField.Type) + if pass { + continue + } + data[selectedField.Name] = v3.AttributeKey{ + Key: selectedField.Name, + Type: fieldType, + DataType: v3.AttributeKeyDataType(strings.ToLower(selectedField.DataType)), + IsColumn: true, + } + } + for _, interestingField := range fields.Interesting { + fieldType, pass := getType(interestingField.Type) + if pass { + continue + } + data[interestingField.Name] = v3.AttributeKey{ + Key: interestingField.Name, + Type: fieldType, + DataType: v3.AttributeKeyDataType(strings.ToLower(interestingField.DataType)), + IsColumn: false, + } + } + break + } + } + return data +} diff --git a/pkg/query-service/model/queryParams.go b/pkg/query-service/model/queryParams.go index 321f7417be..342f8f10f0 100644 --- a/pkg/query-service/model/queryParams.go +++ b/pkg/query-service/model/queryParams.go @@ -18,110 +18,6 @@ type QueryRangeParams struct { Stats string } -type MetricQuery struct { - QueryName string `json:"queryName"` - MetricName string `json:"metricName"` - TagFilters *FilterSet `json:"tagFilters,omitempty"` - GroupingTags []string `json:"groupBy,omitempty"` - AggregateOperator AggregateOperator `json:"aggregateOperator"` - Expression string `json:"expression"` - Disabled bool `json:"disabled"` - ReduceTo ReduceToOperator `json:"reduceTo,omitempty"` -} - -type ReduceToOperator int - -const ( - _ ReduceToOperator = iota - RLAST - RSUM - RAVG - RMAX - RMIN -) - -type QueryType int - -const ( - _ QueryType = iota - QUERY_BUILDER - CLICKHOUSE - PROM -) - -type PromQuery struct { - Query string `json:"query"` - Stats string `json:"stats,omitempty"` - Disabled bool `json:"disabled"` -} - -type ClickHouseQuery struct { - Query string `json:"query"` - Disabled bool `json:"disabled"` -} - -type PanelType int - -const ( - _ PanelType = iota - TIME_SERIES - QUERY_VALUE -) - -type CompositeMetricQuery struct { - BuilderQueries map[string]*MetricQuery `json:"builderQueries,omitempty"` - ClickHouseQueries map[string]*ClickHouseQuery `json:"chQueries,omitempty"` - PromQueries map[string]*PromQuery `json:"promQueries,omitempty"` - PanelType PanelType `json:"panelType"` - QueryType QueryType `json:"queryType"` -} - -type AggregateOperator int - -const ( - _ AggregateOperator = iota - NOOP - COUNT - COUNT_DISTINCT - SUM - AVG - MAX - MIN - P05 - P10 - P20 - P25 - P50 - P75 - P90 - P95 - P99 - RATE - SUM_RATE - // leave blank space for possily {AVG, X}_RATE - _ - _ - _ - RATE_SUM - RATE_AVG - RATE_MAX - RATE_MIN - HIST_QUANTILE_50 - HIST_QUANTILE_75 - HIST_QUANTILE_90 - HIST_QUANTILE_95 - HIST_QUANTILE_99 -) - -type DataSource int - -const ( - _ DataSource = iota - METRICS - TRACES - LOGS -) - const ( StringTagMapCol = "stringTagMap" NumberTagMapCol = "numberTagMap" @@ -129,16 +25,6 @@ const ( ResourceTagMapCol = "resourceTagsMap" ) -type QueryRangeParamsV2 struct { - DataSource DataSource `json:"dataSource"` - Start int64 `json:"start"` - End int64 `json:"end"` - Step int64 `json:"step"` - CompositeMetricQuery *CompositeMetricQuery `json:"compositeMetricQuery"` - Variables map[string]interface{} `json:"variables,omitempty"` - NoCache bool `json:"noCache"` -} - type DashboardVars struct { Query string `json:"query"` Variables map[string]interface{} `json:"variables,omitempty"` diff --git a/pkg/query-service/model/response.go b/pkg/query-service/model/response.go index 83df872175..03e538879c 100644 --- a/pkg/query-service/model/response.go +++ b/pkg/query-service/model/response.go @@ -4,7 +4,6 @@ import ( "encoding/json" "fmt" "math" - "sort" "strconv" "time" @@ -79,7 +78,7 @@ func BadRequest(err error) *ApiError { func BadRequestStr(s string) *ApiError { return &ApiError{ Typ: ErrorBadData, - Err: fmt.Errorf(s), + Err: errors.New(s), } } @@ -500,46 +499,12 @@ type NextPrevErrorIDs struct { GroupID string `json:"groupID"` } -type Series struct { - QueryName string `json:"queryName"` - Labels map[string]string `json:"metric"` - Points []MetricPoint `json:"values"` -} - -func (s *Series) SortPoints() { - sort.Slice(s.Points, func(i, j int) bool { - return s.Points[i].Timestamp < s.Points[j].Timestamp - }) -} - -type MetricPoint struct { - Timestamp int64 - Value float64 -} - type MetricStatus struct { MetricName string LastReceivedTsMillis int64 LastReceivedLabels map[string]string } -// MarshalJSON implements json.Marshaler. -func (p *MetricPoint) MarshalJSON() ([]byte, error) { - v := strconv.FormatFloat(p.Value, 'f', -1, 64) - return json.Marshal([...]interface{}{float64(p.Timestamp) / 1000, v}) -} - -// UnmarshalJSON implements json.Unmarshaler. -func (p *MetricPoint) UnmarshalJSON(b []byte) error { - var a [2]interface{} - if err := json.Unmarshal(b, &a); err != nil { - return err - } - p.Timestamp = int64(a[0].(float64) * 1000) - p.Value, _ = strconv.ParseFloat(a[1].(string), 64) - return nil -} - type ShowCreateTableStatement struct { Statement string `json:"statement" ch:"statement"` } @@ -572,6 +537,21 @@ type SignozLog struct { Attributes_bool map[string]bool `json:"attributes_bool" ch:"attributes_bool"` } +type SignozLogV2 struct { + Timestamp uint64 `json:"timestamp" ch:"timestamp"` + ID string `json:"id" ch:"id"` + TraceID string `json:"trace_id" ch:"trace_id"` + SpanID string `json:"span_id" ch:"span_id"` + TraceFlags uint32 `json:"trace_flags" ch:"trace_flags"` + SeverityText string `json:"severity_text" ch:"severity_text"` + SeverityNumber uint8 `json:"severity_number" ch:"severity_number"` + Body string `json:"body" ch:"body"` + Resources_string map[string]string `json:"resources_string" ch:"resources_string"` + Attributes_string map[string]string `json:"attributes_string" ch:"attributes_string"` + Attributes_number map[string]float64 `json:"attributes_float" ch:"attributes_number"` + Attributes_bool map[string]bool `json:"attributes_bool" ch:"attributes_bool"` +} + type LogsTailClient struct { Name string Logs chan *SignozLog @@ -638,6 +618,7 @@ type AlertsInfo struct { LogsBasedAlerts int `json:"logsBasedAlerts"` MetricBasedAlerts int `json:"metricBasedAlerts"` TracesBasedAlerts int `json:"tracesBasedAlerts"` + TotalChannels int `json:"totalChannels"` SlackChannels int `json:"slackChannels"` WebHookChannels int `json:"webHookChannels"` PagerDutyChannels int `json:"pagerDutyChannels"` @@ -650,6 +631,7 @@ type AlertsInfo struct { SpanMetricsPrometheusQueries int `json:"spanMetricsPrometheusQueries"` AlertNames []string `json:"alertNames"` AlertsWithTSV2 int `json:"alertsWithTSv2"` + AlertsWithLogsChQuery int `json:"alertsWithLogsChQuery"` } type SavedViewsInfo struct { @@ -666,6 +648,7 @@ type DashboardsInfo struct { TracesBasedPanels int `json:"tracesBasedPanels"` DashboardNames []string `json:"dashboardNames"` QueriesWithTSV2 int `json:"queriesWithTSV2"` + DashboardsWithLogsChQuery int `json:"dashboardsWithLogsChQuery"` } type TagTelemetryData struct { diff --git a/pkg/query-service/model/v3/v3.go b/pkg/query-service/model/v3/v3.go index c21d47229c..2d99118533 100644 --- a/pkg/query-service/model/v3/v3.go +++ b/pkg/query-service/model/v3/v3.go @@ -11,7 +11,6 @@ import ( "github.com/google/uuid" "github.com/pkg/errors" - "go.signoz.io/signoz/pkg/query-service/model" ) type DataSource string @@ -371,6 +370,22 @@ type QueryRangeParamsV3 struct { FormatForWeb bool `json:"formatForWeb,omitempty"` } +func (q *QueryRangeParamsV3) Clone() *QueryRangeParamsV3 { + if q == nil { + return nil + } + return &QueryRangeParamsV3{ + Start: q.Start, + End: q.End, + Step: q.Step, + CompositeQuery: q.CompositeQuery.Clone(), + Variables: q.Variables, + NoCache: q.NoCache, + Version: q.Version, + FormatForWeb: q.FormatForWeb, + } +} + type PromQuery struct { Query string `json:"query"` Stats string `json:"stats,omitempty"` @@ -378,6 +393,18 @@ type PromQuery struct { Legend string `json:"legend,omitempty"` } +func (p *PromQuery) Clone() *PromQuery { + if p == nil { + return nil + } + return &PromQuery{ + Query: p.Query, + Stats: p.Stats, + Disabled: p.Disabled, + Legend: p.Legend, + } +} + func (p *PromQuery) Validate() error { if p == nil { return nil @@ -396,6 +423,16 @@ type ClickHouseQuery struct { Legend string `json:"legend,omitempty"` } +func (c *ClickHouseQuery) Clone() *ClickHouseQuery { + if c == nil { + return nil + } + return &ClickHouseQuery{ + Query: c.Query, + Disabled: c.Disabled, + Legend: c.Legend, + } +} func (c *ClickHouseQuery) Validate() error { if c == nil { return nil @@ -421,6 +458,43 @@ type CompositeQuery struct { FillGaps bool `json:"fillGaps,omitempty"` } +func (c *CompositeQuery) Clone() *CompositeQuery { + if c == nil { + return nil + } + var builderQueries map[string]*BuilderQuery + if c.BuilderQueries != nil { + builderQueries = make(map[string]*BuilderQuery) + for name, query := range c.BuilderQueries { + builderQueries[name] = query.Clone() + } + } + var clickHouseQueries map[string]*ClickHouseQuery + if c.ClickHouseQueries != nil { + clickHouseQueries = make(map[string]*ClickHouseQuery) + for name, query := range c.ClickHouseQueries { + clickHouseQueries[name] = query.Clone() + } + } + var promQueries map[string]*PromQuery + if c.PromQueries != nil { + promQueries = make(map[string]*PromQuery) + for name, query := range c.PromQueries { + promQueries[name] = query.Clone() + } + } + return &CompositeQuery{ + BuilderQueries: builderQueries, + ClickHouseQueries: clickHouseQueries, + PromQueries: promQueries, + PanelType: c.PanelType, + QueryType: c.QueryType, + Unit: c.Unit, + FillGaps: c.FillGaps, + } + +} + func (c *CompositeQuery) EnabledQueries() int { count := 0 switch c.QueryType { @@ -646,6 +720,7 @@ const ( FunctionNameMedian5 FunctionName = "median5" FunctionNameMedian7 FunctionName = "median7" FunctionNameTimeShift FunctionName = "timeShift" + FunctionNameAnomaly FunctionName = "anomaly" ) func (f FunctionName) Validate() error { @@ -665,7 +740,8 @@ func (f FunctionName) Validate() error { FunctionNameMedian3, FunctionNameMedian5, FunctionNameMedian7, - FunctionNameTimeShift: + FunctionNameTimeShift, + FunctionNameAnomaly: return nil default: return fmt.Errorf("invalid function name: %s", f) @@ -673,33 +749,68 @@ func (f FunctionName) Validate() error { } type Function struct { - Name FunctionName `json:"name"` - Args []interface{} `json:"args,omitempty"` + Name FunctionName `json:"name"` + Args []interface{} `json:"args,omitempty"` + NamedArgs map[string]interface{} `json:"namedArgs,omitempty"` } type BuilderQuery struct { - QueryName string `json:"queryName"` - StepInterval int64 `json:"stepInterval"` - DataSource DataSource `json:"dataSource"` - AggregateOperator AggregateOperator `json:"aggregateOperator"` - AggregateAttribute AttributeKey `json:"aggregateAttribute,omitempty"` - Temporality Temporality `json:"temporality,omitempty"` - Filters *FilterSet `json:"filters,omitempty"` - GroupBy []AttributeKey `json:"groupBy,omitempty"` - Expression string `json:"expression"` - Disabled bool `json:"disabled"` - Having []Having `json:"having,omitempty"` - Legend string `json:"legend,omitempty"` - Limit uint64 `json:"limit"` - Offset uint64 `json:"offset"` - PageSize uint64 `json:"pageSize"` - OrderBy []OrderBy `json:"orderBy,omitempty"` - ReduceTo ReduceToOperator `json:"reduceTo,omitempty"` - SelectColumns []AttributeKey `json:"selectColumns,omitempty"` - TimeAggregation TimeAggregation `json:"timeAggregation,omitempty"` - SpaceAggregation SpaceAggregation `json:"spaceAggregation,omitempty"` - Functions []Function `json:"functions,omitempty"` - ShiftBy int64 + QueryName string `json:"queryName"` + StepInterval int64 `json:"stepInterval"` + DataSource DataSource `json:"dataSource"` + AggregateOperator AggregateOperator `json:"aggregateOperator"` + AggregateAttribute AttributeKey `json:"aggregateAttribute,omitempty"` + Temporality Temporality `json:"temporality,omitempty"` + Filters *FilterSet `json:"filters,omitempty"` + GroupBy []AttributeKey `json:"groupBy,omitempty"` + Expression string `json:"expression"` + Disabled bool `json:"disabled"` + Having []Having `json:"having,omitempty"` + Legend string `json:"legend,omitempty"` + Limit uint64 `json:"limit"` + Offset uint64 `json:"offset"` + PageSize uint64 `json:"pageSize"` + OrderBy []OrderBy `json:"orderBy,omitempty"` + ReduceTo ReduceToOperator `json:"reduceTo,omitempty"` + SelectColumns []AttributeKey `json:"selectColumns,omitempty"` + TimeAggregation TimeAggregation `json:"timeAggregation,omitempty"` + SpaceAggregation SpaceAggregation `json:"spaceAggregation,omitempty"` + Functions []Function `json:"functions,omitempty"` + ShiftBy int64 + IsAnomaly bool + QueriesUsedInFormula []string +} + +func (b *BuilderQuery) Clone() *BuilderQuery { + if b == nil { + return nil + } + return &BuilderQuery{ + QueryName: b.QueryName, + StepInterval: b.StepInterval, + DataSource: b.DataSource, + AggregateOperator: b.AggregateOperator, + AggregateAttribute: b.AggregateAttribute, + Temporality: b.Temporality, + Filters: b.Filters.Clone(), + GroupBy: b.GroupBy, + Expression: b.Expression, + Disabled: b.Disabled, + Having: b.Having, + Legend: b.Legend, + Limit: b.Limit, + Offset: b.Offset, + PageSize: b.PageSize, + OrderBy: b.OrderBy, + ReduceTo: b.ReduceTo, + SelectColumns: b.SelectColumns, + TimeAggregation: b.TimeAggregation, + SpaceAggregation: b.SpaceAggregation, + Functions: b.Functions, + ShiftBy: b.ShiftBy, + IsAnomaly: b.IsAnomaly, + QueriesUsedInFormula: b.QueriesUsedInFormula, + } } // CanDefaultZero returns true if the missing value can be substituted by zero @@ -878,6 +989,16 @@ type FilterSet struct { Items []FilterItem `json:"items"` } +func (f *FilterSet) Clone() *FilterSet { + if f == nil { + return nil + } + return &FilterSet{ + Operator: f.Operator, + Items: f.Items, + } +} + func (f *FilterSet) Validate() error { if f == nil { return nil @@ -1029,17 +1150,15 @@ type Table struct { } type Result struct { - QueryName string `json:"queryName,omitempty"` - Series []*Series `json:"series,omitempty"` - List []*Row `json:"list,omitempty"` - Table *Table `json:"table,omitempty"` -} - -type LogsLiveTailClient struct { - Name string - Logs chan *model.SignozLog - Done chan *bool - Error chan error + QueryName string `json:"queryName,omitempty"` + Series []*Series `json:"series,omitempty"` + PredictedSeries []*Series `json:"predictedSeries,omitempty"` + UpperBoundSeries []*Series `json:"upperBoundSeries,omitempty"` + LowerBoundSeries []*Series `json:"lowerBoundSeries,omitempty"` + AnomalyScores []*Series `json:"anomalyScores,omitempty"` + List []*Row `json:"list,omitempty"` + Table *Table `json:"table,omitempty"` + IsAnomaly bool `json:"isAnomaly,omitempty"` } type Series struct { @@ -1160,115 +1279,6 @@ type MetricMetadataResponse struct { Temporality string `json:"temporality"` } -type LabelsString string - -func (l *LabelsString) MarshalJSON() ([]byte, error) { - lbls := make(map[string]string) - err := json.Unmarshal([]byte(*l), &lbls) - if err != nil { - return nil, err - } - return json.Marshal(lbls) -} - -func (l *LabelsString) Scan(src interface{}) error { - if data, ok := src.(string); ok { - *l = LabelsString(data) - } - return nil -} - -func (l LabelsString) String() string { - return string(l) -} - -type RuleStateTimeline struct { - Items []RuleStateHistory `json:"items"` - Total uint64 `json:"total"` - Labels map[string][]string `json:"labels"` -} - -type RuleStateHistory struct { - RuleID string `json:"ruleID" ch:"rule_id"` - RuleName string `json:"ruleName" ch:"rule_name"` - // One of ["normal", "firing"] - OverallState model.AlertState `json:"overallState" ch:"overall_state"` - OverallStateChanged bool `json:"overallStateChanged" ch:"overall_state_changed"` - // One of ["normal", "firing", "no_data", "muted"] - State model.AlertState `json:"state" ch:"state"` - StateChanged bool `json:"stateChanged" ch:"state_changed"` - UnixMilli int64 `json:"unixMilli" ch:"unix_milli"` - Labels LabelsString `json:"labels" ch:"labels"` - Fingerprint uint64 `json:"fingerprint" ch:"fingerprint"` - Value float64 `json:"value" ch:"value"` - - RelatedTracesLink string `json:"relatedTracesLink"` - RelatedLogsLink string `json:"relatedLogsLink"` -} - -type QueryRuleStateHistory struct { - Start int64 `json:"start"` - End int64 `json:"end"` - State string `json:"state"` - Filters *FilterSet `json:"filters"` - Offset int64 `json:"offset"` - Limit int64 `json:"limit"` - Order string `json:"order"` -} - -func (r *QueryRuleStateHistory) Validate() error { - if r.Start == 0 || r.End == 0 { - return fmt.Errorf("start and end are required") - } - if r.Offset < 0 || r.Limit < 0 { - return fmt.Errorf("offset and limit must be greater than 0") - } - if r.Order != "asc" && r.Order != "desc" { - return fmt.Errorf("order must be asc or desc") - } - return nil -} - -type RuleStateHistoryContributor struct { - Fingerprint uint64 `json:"fingerprint" ch:"fingerprint"` - Labels LabelsString `json:"labels" ch:"labels"` - Count uint64 `json:"count" ch:"count"` - RelatedTracesLink string `json:"relatedTracesLink"` - RelatedLogsLink string `json:"relatedLogsLink"` -} - -type RuleStateTransition struct { - RuleID string `json:"ruleID" ch:"rule_id"` - State model.AlertState `json:"state" ch:"state"` - FiringTime int64 `json:"firingTime" ch:"firing_time"` - ResolutionTime int64 `json:"resolutionTime" ch:"resolution_time"` -} - -type ReleStateItem struct { - State model.AlertState `json:"state"` - Start int64 `json:"start"` - End int64 `json:"end"` -} - -type Stats struct { - TotalCurrentTriggers uint64 `json:"totalCurrentTriggers"` - TotalPastTriggers uint64 `json:"totalPastTriggers"` - CurrentTriggersSeries *Series `json:"currentTriggersSeries"` - PastTriggersSeries *Series `json:"pastTriggersSeries"` - CurrentAvgResolutionTime string `json:"currentAvgResolutionTime"` - PastAvgResolutionTime string `json:"pastAvgResolutionTime"` - CurrentAvgResolutionTimeSeries *Series `json:"currentAvgResolutionTimeSeries"` - PastAvgResolutionTimeSeries *Series `json:"pastAvgResolutionTimeSeries"` -} - -type QueryProgress struct { - ReadRows uint64 `json:"read_rows"` - - ReadBytes uint64 `json:"read_bytes"` - - ElapsedMs uint64 `json:"elapsed_ms"` -} - type URLShareableTimeRange struct { Start int64 `json:"start"` End int64 `json:"end"` diff --git a/pkg/query-service/rules/alerting.go b/pkg/query-service/rules/alerting.go index f6826ed3d8..cb5205f99e 100644 --- a/pkg/query-service/rules/alerting.go +++ b/pkg/query-service/rules/alerting.go @@ -4,6 +4,7 @@ import ( "encoding/json" "fmt" "net/url" + "sort" "strings" "time" @@ -124,6 +125,47 @@ type RuleCondition struct { SelectedQuery string `json:"selectedQueryName,omitempty"` } +func (rc *RuleCondition) GetSelectedQueryName() string { + if rc != nil { + if rc.SelectedQuery != "" { + return rc.SelectedQuery + } + + queryNames := map[string]struct{}{} + + if rc.CompositeQuery != nil { + if rc.QueryType() == v3.QueryTypeBuilder { + for name := range rc.CompositeQuery.BuilderQueries { + queryNames[name] = struct{}{} + } + } else if rc.QueryType() == v3.QueryTypeClickHouseSQL { + for name := range rc.CompositeQuery.ClickHouseQueries { + queryNames[name] = struct{}{} + } + } + } + + // The following logic exists for backward compatibility + // If there is no selected query, then + // - check if F1 is present, if yes, return F1 + // - else return the query with max ascii value + // this logic is not really correct. we should be considering + // whether the query is enabled or not. but this is a temporary + // fix to support backward compatibility + if _, ok := queryNames["F1"]; ok { + return "F1" + } + keys := make([]string, 0, len(queryNames)) + for k := range queryNames { + keys = append(keys, k) + } + sort.Strings(keys) + return keys[len(keys)-1] + } + // This should never happen + return "" +} + func (rc *RuleCondition) IsValid() bool { if rc.CompositeQuery == nil { diff --git a/pkg/query-service/rules/base_rule.go b/pkg/query-service/rules/base_rule.go index 492f6f685c..a108938b1d 100644 --- a/pkg/query-service/rules/base_rule.go +++ b/pkg/query-service/rules/base_rule.go @@ -109,6 +109,7 @@ func NewBaseRule(id string, p *PostableRule, reader interfaces.Reader, opts ...R id: id, name: p.AlertName, source: p.Source, + typ: p.AlertType, ruleCondition: p.RuleCondition, evalWindow: time.Duration(p.EvalWindow), labels: qslabels.FromMap(p.Labels), @@ -201,6 +202,21 @@ func (r *BaseRule) Unit() string { return "" } +func (r *BaseRule) Timestamps(ts time.Time) (time.Time, time.Time) { + start := ts.Add(-time.Duration(r.evalWindow)).UnixMilli() + end := ts.UnixMilli() + + if r.evalDelay > 0 { + start = start - int64(r.evalDelay.Milliseconds()) + end = end - int64(r.evalDelay.Milliseconds()) + } + // round to minute otherwise we could potentially miss data + start = start - (start % (60 * 1000)) + end = end - (end % (60 * 1000)) + + return time.UnixMilli(start), time.UnixMilli(end) +} + func (r *BaseRule) SetLastError(err error) { r.mtx.Lock() defer r.mtx.Unlock() @@ -471,9 +487,9 @@ func (r *BaseRule) shouldAlert(series v3.Series) (Sample, bool) { return alertSmpl, shouldAlert } -func (r *BaseRule) RecordRuleStateHistory(ctx context.Context, prevState, currentState model.AlertState, itemsToAdd []v3.RuleStateHistory) error { +func (r *BaseRule) RecordRuleStateHistory(ctx context.Context, prevState, currentState model.AlertState, itemsToAdd []model.RuleStateHistory) error { zap.L().Debug("recording rule state history", zap.String("ruleid", r.ID()), zap.Any("prevState", prevState), zap.Any("currentState", currentState), zap.Any("itemsToAdd", itemsToAdd)) - revisedItemsToAdd := map[uint64]v3.RuleStateHistory{} + revisedItemsToAdd := map[uint64]model.RuleStateHistory{} lastSavedState, err := r.reader.GetLastSavedRuleStateHistory(ctx, r.ID()) if err != nil { @@ -483,7 +499,7 @@ func (r *BaseRule) RecordRuleStateHistory(ctx context.Context, prevState, curren // the state would reset so we need to add the corresponding state changes to previously saved states if !r.handledRestart && len(lastSavedState) > 0 { zap.L().Debug("handling restart", zap.String("ruleid", r.ID()), zap.Any("lastSavedState", lastSavedState)) - l := map[uint64]v3.RuleStateHistory{} + l := map[uint64]model.RuleStateHistory{} for _, item := range itemsToAdd { l[item.Fingerprint] = item } @@ -552,7 +568,7 @@ func (r *BaseRule) RecordRuleStateHistory(ctx context.Context, prevState, curren if len(revisedItemsToAdd) > 0 && r.reader != nil { zap.L().Debug("writing rule state history", zap.String("ruleid", r.ID()), zap.Any("revisedItemsToAdd", revisedItemsToAdd)) - entries := make([]v3.RuleStateHistory, 0, len(revisedItemsToAdd)) + entries := make([]model.RuleStateHistory, 0, len(revisedItemsToAdd)) for _, item := range revisedItemsToAdd { entries = append(entries, item) } diff --git a/pkg/query-service/rules/db.go b/pkg/query-service/rules/db.go index d9a9be195c..e6f8d6301c 100644 --- a/pkg/query-service/rules/db.go +++ b/pkg/query-service/rules/db.go @@ -11,6 +11,7 @@ import ( "github.com/jmoiron/sqlx" "go.signoz.io/signoz/pkg/query-service/auth" "go.signoz.io/signoz/pkg/query-service/common" + am "go.signoz.io/signoz/pkg/query-service/integrations/alertManager" "go.signoz.io/signoz/pkg/query-service/model" v3 "go.signoz.io/signoz/pkg/query-service/model/v3" "go.uber.org/zap" @@ -18,6 +19,12 @@ import ( // Data store to capture user alert rule settings type RuleDB interface { + GetChannel(id string) (*model.ChannelItem, *model.ApiError) + GetChannels() (*[]model.ChannelItem, *model.ApiError) + DeleteChannel(id string) *model.ApiError + CreateChannel(receiver *am.Receiver) (*am.Receiver, *model.ApiError) + EditChannel(receiver *am.Receiver, id string) (*am.Receiver, *model.ApiError) + // CreateRuleTx stores rule in the db and returns tx and group name (on success) CreateRuleTx(ctx context.Context, rule string) (int64, Tx, error) @@ -68,13 +75,15 @@ type Tx interface { type ruleDB struct { *sqlx.DB + alertManager am.Manager } // todo: move init methods for creating tables -func NewRuleDB(db *sqlx.DB) RuleDB { +func NewRuleDB(db *sqlx.DB, alertManager am.Manager) RuleDB { return &ruleDB{ db, + alertManager, } } @@ -303,6 +312,229 @@ func (r *ruleDB) EditPlannedMaintenance(ctx context.Context, maintenance Planned return "", nil } +func getChannelType(receiver *am.Receiver) string { + + if receiver.EmailConfigs != nil { + return "email" + } + if receiver.OpsGenieConfigs != nil { + return "opsgenie" + } + if receiver.PagerdutyConfigs != nil { + return "pagerduty" + } + if receiver.PushoverConfigs != nil { + return "pushover" + } + if receiver.SNSConfigs != nil { + return "sns" + } + if receiver.SlackConfigs != nil { + return "slack" + } + if receiver.VictorOpsConfigs != nil { + return "victorops" + } + if receiver.WebhookConfigs != nil { + return "webhook" + } + if receiver.WechatConfigs != nil { + return "wechat" + } + if receiver.MSTeamsConfigs != nil { + return "msteams" + } + return "" +} + +func (r *ruleDB) GetChannel(id string) (*model.ChannelItem, *model.ApiError) { + + idInt, _ := strconv.Atoi(id) + channel := model.ChannelItem{} + + query := "SELECT id, created_at, updated_at, name, type, data data FROM notification_channels WHERE id=?;" + + stmt, err := r.Preparex(query) + + if err != nil { + zap.L().Error("Error in preparing sql query for GetChannel", zap.Error(err)) + return nil, &model.ApiError{Typ: model.ErrorInternal, Err: err} + } + + err = stmt.Get(&channel, idInt) + + if err != nil { + zap.L().Error("Error in getting channel with id", zap.Int("id", idInt), zap.Error(err)) + return nil, &model.ApiError{Typ: model.ErrorInternal, Err: err} + } + + return &channel, nil +} + +func (r *ruleDB) DeleteChannel(id string) *model.ApiError { + + idInt, _ := strconv.Atoi(id) + + channelToDelete, apiErrorObj := r.GetChannel(id) + + if apiErrorObj != nil { + return apiErrorObj + } + + tx, err := r.Begin() + if err != nil { + return &model.ApiError{Typ: model.ErrorInternal, Err: err} + } + + { + stmt, err := tx.Prepare(`DELETE FROM notification_channels WHERE id=$1;`) + if err != nil { + zap.L().Error("Error in preparing statement for INSERT to notification_channels", zap.Error(err)) + tx.Rollback() + return &model.ApiError{Typ: model.ErrorInternal, Err: err} + } + defer stmt.Close() + + if _, err := stmt.Exec(idInt); err != nil { + zap.L().Error("Error in Executing prepared statement for INSERT to notification_channels", zap.Error(err)) + tx.Rollback() // return an error too, we may want to wrap them + return &model.ApiError{Typ: model.ErrorInternal, Err: err} + } + } + + apiError := r.alertManager.DeleteRoute(channelToDelete.Name) + if apiError != nil { + tx.Rollback() + return apiError + } + + err = tx.Commit() + if err != nil { + zap.L().Error("Error in committing transaction for DELETE command to notification_channels", zap.Error(err)) + return &model.ApiError{Typ: model.ErrorInternal, Err: err} + } + + return nil + +} + +func (r *ruleDB) GetChannels() (*[]model.ChannelItem, *model.ApiError) { + + channels := []model.ChannelItem{} + + query := "SELECT id, created_at, updated_at, name, type, data data FROM notification_channels" + + err := r.Select(&channels, query) + + zap.L().Info(query) + + if err != nil { + zap.L().Error("Error in processing sql query", zap.Error(err)) + return nil, &model.ApiError{Typ: model.ErrorInternal, Err: err} + } + + return &channels, nil + +} + +func (r *ruleDB) EditChannel(receiver *am.Receiver, id string) (*am.Receiver, *model.ApiError) { + + idInt, _ := strconv.Atoi(id) + + channel, apiErrObj := r.GetChannel(id) + + if apiErrObj != nil { + return nil, apiErrObj + } + if channel.Name != receiver.Name { + return nil, &model.ApiError{Typ: model.ErrorBadData, Err: fmt.Errorf("channel name cannot be changed")} + } + + tx, err := r.Begin() + if err != nil { + return nil, &model.ApiError{Typ: model.ErrorInternal, Err: err} + } + + channel_type := getChannelType(receiver) + + receiverString, _ := json.Marshal(receiver) + + { + stmt, err := tx.Prepare(`UPDATE notification_channels SET updated_at=$1, type=$2, data=$3 WHERE id=$4;`) + + if err != nil { + zap.L().Error("Error in preparing statement for UPDATE to notification_channels", zap.Error(err)) + tx.Rollback() + return nil, &model.ApiError{Typ: model.ErrorInternal, Err: err} + } + defer stmt.Close() + + if _, err := stmt.Exec(time.Now(), channel_type, string(receiverString), idInt); err != nil { + zap.L().Error("Error in Executing prepared statement for UPDATE to notification_channels", zap.Error(err)) + tx.Rollback() // return an error too, we may want to wrap them + return nil, &model.ApiError{Typ: model.ErrorInternal, Err: err} + } + } + + apiError := r.alertManager.EditRoute(receiver) + if apiError != nil { + tx.Rollback() + return nil, apiError + } + + err = tx.Commit() + if err != nil { + zap.L().Error("Error in committing transaction for INSERT to notification_channels", zap.Error(err)) + return nil, &model.ApiError{Typ: model.ErrorInternal, Err: err} + } + + return receiver, nil + +} + +func (r *ruleDB) CreateChannel(receiver *am.Receiver) (*am.Receiver, *model.ApiError) { + + channel_type := getChannelType(receiver) + + receiverString, _ := json.Marshal(receiver) + + tx, err := r.Begin() + if err != nil { + return nil, &model.ApiError{Typ: model.ErrorInternal, Err: err} + } + + { + stmt, err := tx.Prepare(`INSERT INTO notification_channels (created_at, updated_at, name, type, data) VALUES($1,$2,$3,$4,$5);`) + if err != nil { + zap.L().Error("Error in preparing statement for INSERT to notification_channels", zap.Error(err)) + tx.Rollback() + return nil, &model.ApiError{Typ: model.ErrorInternal, Err: err} + } + defer stmt.Close() + + if _, err := stmt.Exec(time.Now(), time.Now(), receiver.Name, channel_type, string(receiverString)); err != nil { + zap.L().Error("Error in Executing prepared statement for INSERT to notification_channels", zap.Error(err)) + tx.Rollback() // return an error too, we may want to wrap them + return nil, &model.ApiError{Typ: model.ErrorInternal, Err: err} + } + } + + apiError := r.alertManager.AddRoute(receiver) + if apiError != nil { + tx.Rollback() + return nil, apiError + } + + err = tx.Commit() + if err != nil { + zap.L().Error("Error in committing transaction for INSERT to notification_channels", zap.Error(err)) + return nil, &model.ApiError{Typ: model.ErrorInternal, Err: err} + } + + return receiver, nil + +} + func (r *ruleDB) GetAlertsInfo(ctx context.Context) (*model.AlertsInfo, error) { alertsInfo := model.AlertsInfo{} // fetch alerts from rules db @@ -319,6 +551,10 @@ func (r *ruleDB) GetAlertsInfo(ctx context.Context) (*model.AlertsInfo, error) { if strings.Contains(alert, "time_series_v2") { alertsInfo.AlertsWithTSV2 = alertsInfo.AlertsWithTSV2 + 1 } + if strings.Contains(alert, "signoz_logs.distributed_logs") || + strings.Contains(alert, "signoz_logs.logs") { + alertsInfo.AlertsWithLogsChQuery = alertsInfo.AlertsWithLogsChQuery + 1 + } err = json.Unmarshal([]byte(alert), &rule) if err != nil { zap.L().Error("invalid rule data", zap.Error(err)) @@ -349,5 +585,31 @@ func (r *ruleDB) GetAlertsInfo(ctx context.Context) (*model.AlertsInfo, error) { alertsInfo.TotalAlerts = alertsInfo.TotalAlerts + 1 } alertsInfo.AlertNames = alertNames + + channels, _ := r.GetChannels() + if channels != nil { + alertsInfo.TotalChannels = len(*channels) + for _, channel := range *channels { + if channel.Type == "slack" { + alertsInfo.SlackChannels = alertsInfo.SlackChannels + 1 + } + if channel.Type == "webhook" { + alertsInfo.WebHookChannels = alertsInfo.WebHookChannels + 1 + } + if channel.Type == "email" { + alertsInfo.EmailChannels = alertsInfo.EmailChannels + 1 + } + if channel.Type == "pagerduty" { + alertsInfo.PagerDutyChannels = alertsInfo.PagerDutyChannels + 1 + } + if channel.Type == "opsgenie" { + alertsInfo.OpsGenieChannels = alertsInfo.OpsGenieChannels + 1 + } + if channel.Type == "msteams" { + alertsInfo.MSTeamsChannels = alertsInfo.MSTeamsChannels + 1 + } + } + } + return &alertsInfo, nil } diff --git a/pkg/query-service/rules/manager.go b/pkg/query-service/rules/manager.go index eaabc4f27a..89dec5f3d1 100644 --- a/pkg/query-service/rules/manager.go +++ b/pkg/query-service/rules/manager.go @@ -190,7 +190,12 @@ func NewManager(o *ManagerOptions) (*Manager, error) { return nil, err } - db := NewRuleDB(o.DBConn) + amManager, err := am.New() + if err != nil { + return nil, err + } + + db := NewRuleDB(o.DBConn, amManager) telemetry.GetInstance().SetAlertsInfoCallback(db.GetAlertsInfo) diff --git a/pkg/query-service/rules/prom_rule.go b/pkg/query-service/rules/prom_rule.go index 7136a88e97..db5a963731 100644 --- a/pkg/query-service/rules/prom_rule.go +++ b/pkg/query-service/rules/prom_rule.go @@ -219,7 +219,7 @@ func (r *PromRule) Eval(ctx context.Context, ts time.Time) (interface{}, error) } - itemsToAdd := []v3.RuleStateHistory{} + itemsToAdd := []model.RuleStateHistory{} // Check if any pending alerts should be removed or fire now. Write out alert timeseries. for fp, a := range r.active { @@ -236,13 +236,13 @@ func (r *PromRule) Eval(ctx context.Context, ts time.Time) (interface{}, error) if a.State != model.StateInactive { a.State = model.StateInactive a.ResolvedAt = ts - itemsToAdd = append(itemsToAdd, v3.RuleStateHistory{ + itemsToAdd = append(itemsToAdd, model.RuleStateHistory{ RuleID: r.ID(), RuleName: r.Name(), State: model.StateInactive, StateChanged: true, UnixMilli: ts.UnixMilli(), - Labels: v3.LabelsString(labelsJSON), + Labels: model.LabelsString(labelsJSON), Fingerprint: a.QueryResultLables.Hash(), }) } @@ -256,13 +256,13 @@ func (r *PromRule) Eval(ctx context.Context, ts time.Time) (interface{}, error) if a.Missing { state = model.StateNoData } - itemsToAdd = append(itemsToAdd, v3.RuleStateHistory{ + itemsToAdd = append(itemsToAdd, model.RuleStateHistory{ RuleID: r.ID(), RuleName: r.Name(), State: state, StateChanged: true, UnixMilli: ts.UnixMilli(), - Labels: v3.LabelsString(labelsJSON), + Labels: model.LabelsString(labelsJSON), Fingerprint: a.QueryResultLables.Hash(), Value: a.Value, }) diff --git a/pkg/query-service/rules/promrule_test.go b/pkg/query-service/rules/promrule_test.go index 7c559d1eee..c87ef2cee9 100644 --- a/pkg/query-service/rules/promrule_test.go +++ b/pkg/query-service/rules/promrule_test.go @@ -13,7 +13,7 @@ import ( func TestPromRuleShouldAlert(t *testing.T) { postableRule := PostableRule{ AlertName: "Test Rule", - AlertType: "METRIC_BASED_ALERT", + AlertType: AlertTypeMetric, RuleType: RuleTypeProm, EvalWindow: Duration(5 * time.Minute), Frequency: Duration(1 * time.Minute), diff --git a/pkg/query-service/rules/rule.go b/pkg/query-service/rules/rule.go index bb41a2be13..2b5b8d5aae 100644 --- a/pkg/query-service/rules/rule.go +++ b/pkg/query-service/rules/rule.go @@ -5,7 +5,6 @@ import ( "time" "go.signoz.io/signoz/pkg/query-service/model" - v3 "go.signoz.io/signoz/pkg/query-service/model/v3" "go.signoz.io/signoz/pkg/query-service/utils/labels" ) @@ -35,7 +34,7 @@ type Rule interface { SetEvaluationTimestamp(time.Time) GetEvaluationTimestamp() time.Time - RecordRuleStateHistory(ctx context.Context, prevState, currentState model.AlertState, itemsToAdd []v3.RuleStateHistory) error + RecordRuleStateHistory(ctx context.Context, prevState, currentState model.AlertState, itemsToAdd []model.RuleStateHistory) error SendAlerts(ctx context.Context, ts time.Time, resendDelay time.Duration, interval time.Duration, notifyFunc NotifyFunc) } diff --git a/pkg/query-service/rules/threshold_rule.go b/pkg/query-service/rules/threshold_rule.go index 964774500e..0f768314cf 100644 --- a/pkg/query-service/rules/threshold_rule.go +++ b/pkg/query-service/rules/threshold_rule.go @@ -6,9 +6,7 @@ import ( "encoding/json" "fmt" "math" - "net/url" "regexp" - "sort" "text/template" "time" "unicode" @@ -16,6 +14,7 @@ import ( "go.uber.org/zap" "go.signoz.io/signoz/pkg/query-service/common" + "go.signoz.io/signoz/pkg/query-service/contextlinks" "go.signoz.io/signoz/pkg/query-service/model" "go.signoz.io/signoz/pkg/query-service/postprocess" @@ -31,6 +30,7 @@ import ( "go.signoz.io/signoz/pkg/query-service/utils/timestamp" logsv3 "go.signoz.io/signoz/pkg/query-service/app/logs/v3" + tracesV3 "go.signoz.io/signoz/pkg/query-service/app/traces/v3" "go.signoz.io/signoz/pkg/query-service/formatter" yaml "gopkg.in/yaml.v2" @@ -53,6 +53,10 @@ type ThresholdRule struct { querier interfaces.Querier // querierV2 is used for alerts created after the introduction of new metrics query builder querierV2 interfaces.Querier + + // used for attribute metadata enrichment for logs and traces + logsKeys map[string]v3.AttributeKey + spansKeys map[string]v3.AttributeKey } func NewThresholdRule( @@ -164,16 +168,8 @@ func (r *ThresholdRule) prepareQueryRange(ts time.Time) (*v3.QueryRangeParamsV3, zap.L().Info("prepareQueryRange", zap.Int64("ts", ts.UnixMilli()), zap.Int64("evalWindow", r.evalWindow.Milliseconds()), zap.Int64("evalDelay", r.evalDelay.Milliseconds())) - start := ts.Add(-time.Duration(r.evalWindow)).UnixMilli() - end := ts.UnixMilli() - - if r.evalDelay > 0 { - start = start - int64(r.evalDelay.Milliseconds()) - end = end - int64(r.evalDelay.Milliseconds()) - } - // round to minute otherwise we could potentially miss data - start = start - (start % (60 * 1000)) - end = end - (end % (60 * 1000)) + startTs, endTs := r.Timestamps(ts) + start, end := startTs.UnixMilli(), endTs.UnixMilli() if r.ruleCondition.QueryType() == v3.QueryTypeClickHouseSQL { params := &v3.QueryRangeParamsV3{ @@ -239,245 +235,76 @@ func (r *ThresholdRule) prepareQueryRange(ts time.Time) (*v3.QueryRangeParamsV3, }, nil } -// The following function is used to prepare the where clause for the query -// `lbls` contains the key value pairs of the labels from the result of the query -// We iterate over the where clause and replace the labels with the actual values -// There are two cases: -// 1. The label is present in the where clause -// 2. The label is not present in the where clause -// -// Example for case 2: -// Latency by serviceName without any filter -// In this case, for each service with latency > threshold we send a notification -// The expectation will be that clicking on the related traces for service A, will -// take us to the traces page with the filter serviceName=A -// So for all the missing labels in the where clause, we add them as key = value -// -// Example for case 1: -// Severity text IN (WARN, ERROR) -// In this case, the Severity text will appear in the `lbls` if it were part of the group -// by clause, in which case we replace it with the actual value for the notification -// i.e Severity text = WARN -// If the Severity text is not part of the group by clause, then we add it as it is -func (r *ThresholdRule) fetchFilters(selectedQuery string, lbls labels.Labels) []v3.FilterItem { - var filterItems []v3.FilterItem - - added := make(map[string]struct{}) - - if r.ruleCondition.CompositeQuery.QueryType == v3.QueryTypeBuilder && - r.ruleCondition.CompositeQuery.BuilderQueries[selectedQuery] != nil && - r.ruleCondition.CompositeQuery.BuilderQueries[selectedQuery].Filters != nil { - - for _, item := range r.ruleCondition.CompositeQuery.BuilderQueries[selectedQuery].Filters.Items { - exists := false - for _, label := range lbls { - if item.Key.Key == label.Name { - // if the label is present in the where clause, replace it with key = value - filterItems = append(filterItems, v3.FilterItem{ - Key: item.Key, - Operator: v3.FilterOperatorEqual, - Value: label.Value, - }) - exists = true - added[label.Name] = struct{}{} - break - } - } - - if !exists { - // if the label is not present in the where clause, add it as it is - filterItems = append(filterItems, item) - } - } - } - - // add the labels which are not present in the where clause - for _, label := range lbls { - if _, ok := added[label.Name]; !ok { - filterItems = append(filterItems, v3.FilterItem{ - Key: v3.AttributeKey{Key: label.Name}, - Operator: v3.FilterOperatorEqual, - Value: label.Value, - }) - } - } - - return filterItems -} - func (r *ThresholdRule) prepareLinksToLogs(ts time.Time, lbls labels.Labels) string { selectedQuery := r.GetSelectedQuery() + qr, err := r.prepareQueryRange(ts) + if err != nil { + return "" + } + start := time.UnixMilli(qr.Start) + end := time.UnixMilli(qr.End) + // TODO(srikanthccv): handle formula queries if selectedQuery < "A" || selectedQuery > "Z" { return "" } - q, err := r.prepareQueryRange(ts) - if err != nil { + q := r.ruleCondition.CompositeQuery.BuilderQueries[selectedQuery] + if q == nil { return "" } - // Logs list view expects time in milliseconds - tr := v3.URLShareableTimeRange{ - Start: q.Start, - End: q.End, - PageSize: 100, - } - - options := v3.URLShareableOptions{ - MaxLines: 2, - Format: "list", - SelectColumns: []v3.AttributeKey{}, - } - - period, _ := json.Marshal(tr) - urlEncodedTimeRange := url.QueryEscape(string(period)) - - filterItems := r.fetchFilters(selectedQuery, lbls) - urlData := v3.URLShareableCompositeQuery{ - QueryType: string(v3.QueryTypeBuilder), - Builder: v3.URLShareableBuilderQuery{ - QueryData: []v3.BuilderQuery{ - { - DataSource: v3.DataSourceLogs, - QueryName: "A", - AggregateOperator: v3.AggregateOperatorNoOp, - AggregateAttribute: v3.AttributeKey{}, - Filters: &v3.FilterSet{ - Items: filterItems, - Operator: "AND", - }, - Expression: "A", - Disabled: false, - Having: []v3.Having{}, - StepInterval: 60, - OrderBy: []v3.OrderBy{ - { - ColumnName: "timestamp", - Order: "desc", - }, - }, - }, - }, - QueryFormulas: make([]string, 0), - }, + + if q.DataSource != v3.DataSourceLogs { + return "" } - data, _ := json.Marshal(urlData) - compositeQuery := url.QueryEscape(url.QueryEscape(string(data))) + queryFilter := []v3.FilterItem{} + if q.Filters != nil { + queryFilter = q.Filters.Items + } - optionsData, _ := json.Marshal(options) - urlEncodedOptions := url.QueryEscape(string(optionsData)) + filterItems := contextlinks.PrepareFilters(lbls.Map(), queryFilter, q.GroupBy, r.logsKeys) - return fmt.Sprintf("compositeQuery=%s&timeRange=%s&startTime=%d&endTime=%d&options=%s", compositeQuery, urlEncodedTimeRange, tr.Start, tr.End, urlEncodedOptions) + return contextlinks.PrepareLinksToLogs(start, end, filterItems) } func (r *ThresholdRule) prepareLinksToTraces(ts time.Time, lbls labels.Labels) string { selectedQuery := r.GetSelectedQuery() + qr, err := r.prepareQueryRange(ts) + if err != nil { + return "" + } + start := time.UnixMilli(qr.Start) + end := time.UnixMilli(qr.End) + // TODO(srikanthccv): handle formula queries if selectedQuery < "A" || selectedQuery > "Z" { return "" } - q, err := r.prepareQueryRange(ts) - if err != nil { + q := r.ruleCondition.CompositeQuery.BuilderQueries[selectedQuery] + if q == nil { return "" } - // Traces list view expects time in nanoseconds - tr := v3.URLShareableTimeRange{ - Start: q.Start * time.Second.Microseconds(), - End: q.End * time.Second.Microseconds(), - PageSize: 100, - } - - options := v3.URLShareableOptions{ - MaxLines: 2, - Format: "list", - SelectColumns: constants.TracesListViewDefaultSelectedColumns, - } - - period, _ := json.Marshal(tr) - urlEncodedTimeRange := url.QueryEscape(string(period)) - - filterItems := r.fetchFilters(selectedQuery, lbls) - urlData := v3.URLShareableCompositeQuery{ - QueryType: string(v3.QueryTypeBuilder), - Builder: v3.URLShareableBuilderQuery{ - QueryData: []v3.BuilderQuery{ - { - DataSource: v3.DataSourceTraces, - QueryName: "A", - AggregateOperator: v3.AggregateOperatorNoOp, - AggregateAttribute: v3.AttributeKey{}, - Filters: &v3.FilterSet{ - Items: filterItems, - Operator: "AND", - }, - Expression: "A", - Disabled: false, - Having: []v3.Having{}, - StepInterval: 60, - OrderBy: []v3.OrderBy{ - { - ColumnName: "timestamp", - Order: "desc", - }, - }, - }, - }, - QueryFormulas: make([]string, 0), - }, + + if q.DataSource != v3.DataSourceTraces { + return "" } - data, _ := json.Marshal(urlData) - compositeQuery := url.QueryEscape(url.QueryEscape(string(data))) + queryFilter := []v3.FilterItem{} + if q.Filters != nil { + queryFilter = q.Filters.Items + } - optionsData, _ := json.Marshal(options) - urlEncodedOptions := url.QueryEscape(string(optionsData)) + filterItems := contextlinks.PrepareFilters(lbls.Map(), queryFilter, q.GroupBy, r.spansKeys) - return fmt.Sprintf("compositeQuery=%s&timeRange=%s&startTime=%d&endTime=%d&options=%s", compositeQuery, urlEncodedTimeRange, tr.Start, tr.End, urlEncodedOptions) + return contextlinks.PrepareLinksToTraces(start, end, filterItems) } func (r *ThresholdRule) GetSelectedQuery() string { - if r.ruleCondition != nil { - if r.ruleCondition.SelectedQuery != "" { - return r.ruleCondition.SelectedQuery - } - - queryNames := map[string]struct{}{} - - if r.ruleCondition.CompositeQuery != nil { - if r.ruleCondition.QueryType() == v3.QueryTypeBuilder { - for name := range r.ruleCondition.CompositeQuery.BuilderQueries { - queryNames[name] = struct{}{} - } - } else if r.ruleCondition.QueryType() == v3.QueryTypeClickHouseSQL { - for name := range r.ruleCondition.CompositeQuery.ClickHouseQueries { - queryNames[name] = struct{}{} - } - } - } - - // The following logic exists for backward compatibility - // If there is no selected query, then - // - check if F1 is present, if yes, return F1 - // - else return the query with max ascii value - // this logic is not really correct. we should be considering - // whether the query is enabled or not. but this is a temporary - // fix to support backward compatibility - if _, ok := queryNames["F1"]; ok { - return "F1" - } - keys := make([]string, 0, len(queryNames)) - for k := range queryNames { - keys = append(keys, k) - } - sort.Strings(keys) - return keys[len(keys)-1] - } - // This should never happen - return "" + return r.ruleCondition.GetSelectedQueryName() } func (r *ThresholdRule) buildAndRunQuery(ctx context.Context, ts time.Time) (Vector, error) { @@ -492,11 +319,37 @@ func (r *ThresholdRule) buildAndRunQuery(ctx context.Context, ts time.Time) (Vec } if params.CompositeQuery.QueryType == v3.QueryTypeBuilder { - // check if any enrichment is required for logs if yes then enrich them - if logsv3.EnrichmentRequired(params) { - // Note: Sending empty fields key because enrichment is only needed for json - // TODO: Add support for attribute enrichment later - logsv3.Enrich(params, map[string]v3.AttributeKey{}) + hasLogsQuery := false + hasTracesQuery := false + for _, query := range params.CompositeQuery.BuilderQueries { + if query.DataSource == v3.DataSourceLogs { + hasLogsQuery = true + } + if query.DataSource == v3.DataSourceTraces { + hasTracesQuery = true + } + } + + if hasLogsQuery { + // check if any enrichment is required for logs if yes then enrich them + if logsv3.EnrichmentRequired(params) { + logsFields, err := r.reader.GetLogFields(ctx) + if err != nil { + return nil, err + } + logsKeys := model.GetLogFieldsV3(ctx, params, logsFields) + r.logsKeys = logsKeys + logsv3.Enrich(params, logsKeys) + } + } + + if hasTracesQuery { + spanKeys, err := r.reader.GetSpanAttributeKeys(ctx) + if err != nil { + return nil, err + } + r.spansKeys = spanKeys + tracesV3.Enrich(params, spanKeys) } } @@ -504,9 +357,9 @@ func (r *ThresholdRule) buildAndRunQuery(ctx context.Context, ts time.Time) (Vec var queryErrors map[string]error if r.version == "v4" { - results, queryErrors, err = r.querierV2.QueryRange(ctx, params, map[string]v3.AttributeKey{}) + results, queryErrors, err = r.querierV2.QueryRange(ctx, params) } else { - results, queryErrors, err = r.querier.QueryRange(ctx, params, map[string]v3.AttributeKey{}) + results, queryErrors, err = r.querier.QueryRange(ctx, params) } if err != nil { @@ -654,11 +507,13 @@ func (r *ThresholdRule) Eval(ctx context.Context, ts time.Time) (interface{}, er if r.typ == AlertTypeTraces { link := r.prepareLinksToTraces(ts, smpl.MetricOrig) if link != "" && r.hostFromSource() != "" { + zap.L().Info("adding traces link to annotations", zap.String("link", fmt.Sprintf("%s/traces-explorer?%s", r.hostFromSource(), link))) annotations = append(annotations, labels.Label{Name: "related_traces", Value: fmt.Sprintf("%s/traces-explorer?%s", r.hostFromSource(), link)}) } } else if r.typ == AlertTypeLogs { link := r.prepareLinksToLogs(ts, smpl.MetricOrig) if link != "" && r.hostFromSource() != "" { + zap.L().Info("adding logs link to annotations", zap.String("link", fmt.Sprintf("%s/logs/logs-explorer?%s", r.hostFromSource(), link))) annotations = append(annotations, labels.Label{Name: "related_logs", Value: fmt.Sprintf("%s/logs/logs-explorer?%s", r.hostFromSource(), link)}) } } @@ -703,7 +558,7 @@ func (r *ThresholdRule) Eval(ctx context.Context, ts time.Time) (interface{}, er r.active[h] = a } - itemsToAdd := []v3.RuleStateHistory{} + itemsToAdd := []model.RuleStateHistory{} // Check if any pending alerts should be removed or fire now. Write out alert timeseries. for fp, a := range r.active { @@ -720,13 +575,13 @@ func (r *ThresholdRule) Eval(ctx context.Context, ts time.Time) (interface{}, er if a.State != model.StateInactive { a.State = model.StateInactive a.ResolvedAt = ts - itemsToAdd = append(itemsToAdd, v3.RuleStateHistory{ + itemsToAdd = append(itemsToAdd, model.RuleStateHistory{ RuleID: r.ID(), RuleName: r.Name(), State: model.StateInactive, StateChanged: true, UnixMilli: ts.UnixMilli(), - Labels: v3.LabelsString(labelsJSON), + Labels: model.LabelsString(labelsJSON), Fingerprint: a.QueryResultLables.Hash(), Value: a.Value, }) @@ -741,13 +596,13 @@ func (r *ThresholdRule) Eval(ctx context.Context, ts time.Time) (interface{}, er if a.Missing { state = model.StateNoData } - itemsToAdd = append(itemsToAdd, v3.RuleStateHistory{ + itemsToAdd = append(itemsToAdd, model.RuleStateHistory{ RuleID: r.ID(), RuleName: r.Name(), State: state, StateChanged: true, UnixMilli: ts.UnixMilli(), - Labels: v3.LabelsString(labelsJSON), + Labels: model.LabelsString(labelsJSON), Fingerprint: a.QueryResultLables.Hash(), Value: a.Value, }) diff --git a/pkg/query-service/rules/threshold_rule_test.go b/pkg/query-service/rules/threshold_rule_test.go index ab37ad6af1..65d020d25f 100644 --- a/pkg/query-service/rules/threshold_rule_test.go +++ b/pkg/query-service/rules/threshold_rule_test.go @@ -18,7 +18,7 @@ import ( func TestThresholdRuleShouldAlert(t *testing.T) { postableRule := PostableRule{ AlertName: "Tricky Condition Tests", - AlertType: "METRIC_BASED_ALERT", + AlertType: AlertTypeMetric, RuleType: RuleTypeThreshold, EvalWindow: Duration(5 * time.Minute), Frequency: Duration(1 * time.Minute), @@ -788,7 +788,7 @@ func TestPrepareLinksToLogs(t *testing.T) { func TestPrepareLinksToTraces(t *testing.T) { postableRule := PostableRule{ AlertName: "Links to traces test", - AlertType: "TRACES_BASED_ALERT", + AlertType: AlertTypeTraces, RuleType: RuleTypeThreshold, EvalWindow: Duration(5 * time.Minute), Frequency: Duration(1 * time.Minute), @@ -830,7 +830,7 @@ func TestPrepareLinksToTraces(t *testing.T) { func TestThresholdRuleLabelNormalization(t *testing.T) { postableRule := PostableRule{ AlertName: "Tricky Condition Tests", - AlertType: "METRIC_BASED_ALERT", + AlertType: AlertTypeMetric, RuleType: RuleTypeThreshold, EvalWindow: Duration(5 * time.Minute), Frequency: Duration(1 * time.Minute), @@ -914,7 +914,7 @@ func TestThresholdRuleLabelNormalization(t *testing.T) { func TestThresholdRuleEvalDelay(t *testing.T) { postableRule := PostableRule{ AlertName: "Test Eval Delay", - AlertType: "METRIC_BASED_ALERT", + AlertType: AlertTypeMetric, RuleType: RuleTypeThreshold, EvalWindow: Duration(5 * time.Minute), Frequency: Duration(1 * time.Minute), @@ -963,7 +963,7 @@ func TestThresholdRuleEvalDelay(t *testing.T) { func TestThresholdRuleClickHouseTmpl(t *testing.T) { postableRule := PostableRule{ AlertName: "Tricky Condition Tests", - AlertType: "METRIC_BASED_ALERT", + AlertType: AlertTypeMetric, RuleType: RuleTypeThreshold, EvalWindow: Duration(5 * time.Minute), Frequency: Duration(1 * time.Minute), @@ -1019,7 +1019,7 @@ func (m *queryMatcherAny) Match(string, string) error { func TestThresholdRuleUnitCombinations(t *testing.T) { postableRule := PostableRule{ AlertName: "Units test", - AlertType: "METRIC_BASED_ALERT", + AlertType: AlertTypeMetric, RuleType: RuleTypeThreshold, EvalWindow: Duration(5 * time.Minute), Frequency: Duration(1 * time.Minute), @@ -1170,8 +1170,8 @@ func TestThresholdRuleUnitCombinations(t *testing.T) { func TestThresholdRuleNoData(t *testing.T) { postableRule := PostableRule{ - AlertName: "Units test", - AlertType: "METRIC_BASED_ALERT", + AlertName: "No data test", + AlertType: AlertTypeMetric, RuleType: RuleTypeThreshold, EvalWindow: Duration(5 * time.Minute), Frequency: Duration(1 * time.Minute), @@ -1261,3 +1261,238 @@ func TestThresholdRuleNoData(t *testing.T) { } } } + +func TestThresholdRuleTracesLink(t *testing.T) { + postableRule := PostableRule{ + AlertName: "Traces link test", + AlertType: AlertTypeTraces, + RuleType: RuleTypeThreshold, + EvalWindow: Duration(5 * time.Minute), + Frequency: Duration(1 * time.Minute), + RuleCondition: &RuleCondition{ + CompositeQuery: &v3.CompositeQuery{ + QueryType: v3.QueryTypeBuilder, + BuilderQueries: map[string]*v3.BuilderQuery{ + "A": { + QueryName: "A", + StepInterval: 60, + AggregateAttribute: v3.AttributeKey{ + Key: "durationNano", + }, + AggregateOperator: v3.AggregateOperatorP95, + DataSource: v3.DataSourceTraces, + Expression: "A", + Filters: &v3.FilterSet{ + Operator: "AND", + Items: []v3.FilterItem{ + { + Key: v3.AttributeKey{Key: "httpMethod", IsColumn: true, Type: v3.AttributeKeyTypeTag, DataType: v3.AttributeKeyDataTypeString}, + Value: "GET", + Operator: v3.FilterOperatorEqual, + }, + }, + }, + }, + }, + }, + }, + } + fm := featureManager.StartManager() + mock, err := cmock.NewClickHouseWithQueryMatcher(nil, &queryMatcherAny{}) + if err != nil { + t.Errorf("an error '%s' was not expected when opening a stub database connection", err) + } + + metaCols := make([]cmock.ColumnType, 0) + metaCols = append(metaCols, cmock.ColumnType{Name: "DISTINCT(tagKey)", Type: "String"}) + metaCols = append(metaCols, cmock.ColumnType{Name: "tagType", Type: "String"}) + metaCols = append(metaCols, cmock.ColumnType{Name: "dataType", Type: "String"}) + metaCols = append(metaCols, cmock.ColumnType{Name: "isColumn", Type: "Bool"}) + + cols := make([]cmock.ColumnType, 0) + cols = append(cols, cmock.ColumnType{Name: "value", Type: "Float64"}) + cols = append(cols, cmock.ColumnType{Name: "attr", Type: "String"}) + cols = append(cols, cmock.ColumnType{Name: "timestamp", Type: "String"}) + + for idx, c := range testCases { + metaRows := cmock.NewRows(metaCols, c.metaValues) + mock. + ExpectQuery("SELECT DISTINCT(tagKey), tagType, dataType, isColumn FROM archiveNamespace.span_attributes_keys"). + WillReturnRows(metaRows) + + rows := cmock.NewRows(cols, c.values) + + // We are testing the eval logic after the query is run + // so we don't care about the query string here + queryString := "SELECT any" + mock. + ExpectQuery(queryString). + WillReturnRows(rows) + postableRule.RuleCondition.CompareOp = CompareOp(c.compareOp) + postableRule.RuleCondition.MatchType = MatchType(c.matchType) + postableRule.RuleCondition.Target = &c.target + postableRule.RuleCondition.CompositeQuery.Unit = c.yAxisUnit + postableRule.RuleCondition.TargetUnit = c.targetUnit + postableRule.Annotations = map[string]string{ + "description": "This alert is fired when the defined metric (current value: {{$value}}) crosses the threshold ({{$threshold}})", + "summary": "The rule threshold is set to {{$threshold}}, and the observed metric value is {{$value}}", + } + + options := clickhouseReader.NewOptions("", 0, 0, 0, "", "archiveNamespace") + reader := clickhouseReader.NewReaderFromClickhouseConnection(mock, options, nil, "", fm, "", true) + + rule, err := NewThresholdRule("69", &postableRule, fm, reader, true) + rule.temporalityMap = map[string]map[v3.Temporality]bool{ + "signoz_calls_total": { + v3.Delta: true, + }, + } + if err != nil { + assert.NoError(t, err) + } + + retVal, err := rule.Eval(context.Background(), time.Now()) + if err != nil { + assert.NoError(t, err) + } + + if c.expectAlerts == 0 { + assert.Equal(t, 0, retVal.(int), "case %d", idx) + } else { + assert.Equal(t, c.expectAlerts, retVal.(int), "case %d", idx) + for _, item := range rule.active { + for name, value := range item.Annotations.Map() { + if name == "related_traces" { + assert.NotEmpty(t, value, "case %d", idx) + assert.Contains(t, value, "GET") + } + } + } + } + } +} + +func TestThresholdRuleLogsLink(t *testing.T) { + postableRule := PostableRule{ + AlertName: "Logs link test", + AlertType: AlertTypeLogs, + RuleType: RuleTypeThreshold, + EvalWindow: Duration(5 * time.Minute), + Frequency: Duration(1 * time.Minute), + RuleCondition: &RuleCondition{ + CompositeQuery: &v3.CompositeQuery{ + QueryType: v3.QueryTypeBuilder, + BuilderQueries: map[string]*v3.BuilderQuery{ + "A": { + QueryName: "A", + StepInterval: 60, + AggregateAttribute: v3.AttributeKey{ + Key: "component", + }, + AggregateOperator: v3.AggregateOperatorCountDistinct, + DataSource: v3.DataSourceLogs, + Expression: "A", + Filters: &v3.FilterSet{ + Operator: "AND", + Items: []v3.FilterItem{ + { + Key: v3.AttributeKey{Key: "k8s.container.name", IsColumn: false, Type: v3.AttributeKeyTypeTag, DataType: v3.AttributeKeyDataTypeString}, + Value: "testcontainer", + Operator: v3.FilterOperatorEqual, + }, + }, + }, + }, + }, + }, + }, + } + fm := featureManager.StartManager() + mock, err := cmock.NewClickHouseWithQueryMatcher(nil, &queryMatcherAny{}) + if err != nil { + t.Errorf("an error '%s' was not expected when opening a stub database connection", err) + } + + attrMetaCols := make([]cmock.ColumnType, 0) + attrMetaCols = append(attrMetaCols, cmock.ColumnType{Name: "name", Type: "String"}) + attrMetaCols = append(attrMetaCols, cmock.ColumnType{Name: "dataType", Type: "String"}) + + resourceMetaCols := make([]cmock.ColumnType, 0) + resourceMetaCols = append(resourceMetaCols, cmock.ColumnType{Name: "name", Type: "String"}) + resourceMetaCols = append(resourceMetaCols, cmock.ColumnType{Name: "dataType", Type: "String"}) + + createTableCols := make([]cmock.ColumnType, 0) + createTableCols = append(createTableCols, cmock.ColumnType{Name: "statement", Type: "String"}) + + cols := make([]cmock.ColumnType, 0) + cols = append(cols, cmock.ColumnType{Name: "value", Type: "Float64"}) + cols = append(cols, cmock.ColumnType{Name: "attr", Type: "String"}) + cols = append(cols, cmock.ColumnType{Name: "timestamp", Type: "String"}) + + for idx, c := range testCases { + attrMetaRows := cmock.NewRows(attrMetaCols, c.attrMetaValues) + mock. + ExpectSelect("SELECT DISTINCT name, datatype from signoz_logs.distributed_logs_attribute_keys group by name, datatype"). + WillReturnRows(attrMetaRows) + + resourceMetaRows := cmock.NewRows(resourceMetaCols, c.resourceMetaValues) + mock. + ExpectSelect("SELECT DISTINCT name, datatype from signoz_logs.distributed_logs_resource_keys group by name, datatype"). + WillReturnRows(resourceMetaRows) + + createTableRows := cmock.NewRows(createTableCols, c.createTableValues) + mock. + ExpectSelect("SHOW CREATE TABLE signoz_logs.logs"). + WillReturnRows(createTableRows) + + rows := cmock.NewRows(cols, c.values) + + // We are testing the eval logic after the query is run + // so we don't care about the query string here + queryString := "SELECT any" + mock. + ExpectQuery(queryString). + WillReturnRows(rows) + postableRule.RuleCondition.CompareOp = CompareOp(c.compareOp) + postableRule.RuleCondition.MatchType = MatchType(c.matchType) + postableRule.RuleCondition.Target = &c.target + postableRule.RuleCondition.CompositeQuery.Unit = c.yAxisUnit + postableRule.RuleCondition.TargetUnit = c.targetUnit + postableRule.Annotations = map[string]string{ + "description": "This alert is fired when the defined metric (current value: {{$value}}) crosses the threshold ({{$threshold}})", + "summary": "The rule threshold is set to {{$threshold}}, and the observed metric value is {{$value}}", + } + + options := clickhouseReader.NewOptions("", 0, 0, 0, "", "archiveNamespace") + reader := clickhouseReader.NewReaderFromClickhouseConnection(mock, options, nil, "", fm, "", true) + + rule, err := NewThresholdRule("69", &postableRule, fm, reader, true) + rule.temporalityMap = map[string]map[v3.Temporality]bool{ + "signoz_calls_total": { + v3.Delta: true, + }, + } + if err != nil { + assert.NoError(t, err) + } + + retVal, err := rule.Eval(context.Background(), time.Now()) + if err != nil { + assert.NoError(t, err) + } + + if c.expectAlerts == 0 { + assert.Equal(t, 0, retVal.(int), "case %d", idx) + } else { + assert.Equal(t, c.expectAlerts, retVal.(int), "case %d", idx) + for _, item := range rule.active { + for name, value := range item.Annotations.Map() { + if name == "related_logs" { + assert.NotEmpty(t, value, "case %d", idx) + assert.Contains(t, value, "testcontainer") + } + } + } + } + } +} diff --git a/pkg/query-service/rules/threshold_rule_test_data.go b/pkg/query-service/rules/threshold_rule_test_data.go new file mode 100644 index 0000000000..cc301c5aa9 --- /dev/null +++ b/pkg/query-service/rules/threshold_rule_test_data.go @@ -0,0 +1,90 @@ +package rules + +import "time" + +var ( + testCases = []struct { + targetUnit string + yAxisUnit string + values [][]interface{} + metaValues [][]interface{} + attrMetaValues [][]interface{} + resourceMetaValues [][]interface{} + createTableValues [][]interface{} + expectAlerts int + compareOp string + matchType string + target float64 + summaryAny []string + }{ + { + targetUnit: "s", + yAxisUnit: "ns", + values: [][]interface{}{ + {float64(572588400), "attr", time.Now()}, // 0.57 seconds + {float64(572386400), "attr", time.Now().Add(1 * time.Second)}, // 0.57 seconds + {float64(300947400), "attr", time.Now().Add(2 * time.Second)}, // 0.3 seconds + {float64(299316000), "attr", time.Now().Add(3 * time.Second)}, // 0.3 seconds + {float64(66640400.00000001), "attr", time.Now().Add(4 * time.Second)}, // 0.06 seconds + }, + metaValues: [][]interface{}{}, + createTableValues: [][]interface{}{ + {"statement"}, + }, + attrMetaValues: [][]interface{}{}, + resourceMetaValues: [][]interface{}{}, + expectAlerts: 0, + compareOp: "1", // Above + matchType: "1", // Once + target: 1, // 1 second + }, + { + targetUnit: "ms", + yAxisUnit: "ns", + values: [][]interface{}{ + {float64(572588400), "attr", time.Now()}, // 572.58 ms + {float64(572386400), "attr", time.Now().Add(1 * time.Second)}, // 572.38 ms + {float64(300947400), "attr", time.Now().Add(2 * time.Second)}, // 300.94 ms + {float64(299316000), "attr", time.Now().Add(3 * time.Second)}, // 299.31 ms + {float64(66640400.00000001), "attr", time.Now().Add(4 * time.Second)}, // 66.64 ms + }, + metaValues: [][]interface{}{}, + createTableValues: [][]interface{}{ + {"statement"}, + }, + attrMetaValues: [][]interface{}{}, + resourceMetaValues: [][]interface{}{}, + expectAlerts: 4, + compareOp: "1", // Above + matchType: "1", // Once + target: 200, // 200 ms + summaryAny: []string{ + "observed metric value is 299 ms", + "the observed metric value is 573 ms", + "the observed metric value is 572 ms", + "the observed metric value is 301 ms", + }, + }, + { + targetUnit: "decgbytes", + yAxisUnit: "bytes", + values: [][]interface{}{ + {float64(2863284053), "attr", time.Now()}, // 2.86 GB + {float64(2863388842), "attr", time.Now().Add(1 * time.Second)}, // 2.86 GB + {float64(300947400), "attr", time.Now().Add(2 * time.Second)}, // 0.3 GB + {float64(299316000), "attr", time.Now().Add(3 * time.Second)}, // 0.3 GB + {float64(66640400.00000001), "attr", time.Now().Add(4 * time.Second)}, // 66.64 MB + }, + metaValues: [][]interface{}{}, + createTableValues: [][]interface{}{ + {"statement"}, + }, + attrMetaValues: [][]interface{}{}, + resourceMetaValues: [][]interface{}{}, + expectAlerts: 0, + compareOp: "1", // Above + matchType: "1", // Once + target: 200, // 200 GB + }, + } +) diff --git a/pkg/query-service/telemetry/telemetry.go b/pkg/query-service/telemetry/telemetry.go index 7f282ea3f9..be6ad4719c 100644 --- a/pkg/query-service/telemetry/telemetry.go +++ b/pkg/query-service/telemetry/telemetry.go @@ -178,9 +178,12 @@ type Telemetry struct { patTokenUser bool mutex sync.RWMutex - alertsInfoCallback func(ctx context.Context) (*model.AlertsInfo, error) - userCountCallback func(ctx context.Context) (int, error) - userRoleCallback func(ctx context.Context, groupId string) (string, error) + alertsInfoCallback func(ctx context.Context) (*model.AlertsInfo, error) + userCountCallback func(ctx context.Context) (int, error) + userRoleCallback func(ctx context.Context, groupId string) (string, error) + getUsersCallback func(ctx context.Context) ([]model.UserPayload, *model.ApiError) + dashboardsInfoCallback func(ctx context.Context) (*model.DashboardsInfo, error) + savedViewsInfoCallback func(ctx context.Context) (*model.SavedViewsInfo, error) } func (a *Telemetry) SetAlertsInfoCallback(callback func(ctx context.Context) (*model.AlertsInfo, error)) { @@ -195,6 +198,18 @@ func (a *Telemetry) SetUserRoleCallback(callback func(ctx context.Context, group a.userRoleCallback = callback } +func (a *Telemetry) SetGetUsersCallback(callback func(ctx context.Context) ([]model.UserPayload, *model.ApiError)) { + a.getUsersCallback = callback +} + +func (a *Telemetry) SetSavedViewsInfoCallback(callback func(ctx context.Context) (*model.SavedViewsInfo, error)) { + a.savedViewsInfoCallback = callback +} + +func (a *Telemetry) SetDashboardsInfoCallback(callback func(ctx context.Context) (*model.DashboardsInfo, error)) { + a.dashboardsInfoCallback = callback +} + func createTelemetry() { // Do not do anything in CI (not even resolving the outbound IP address) if testing.Testing() { @@ -296,7 +311,7 @@ func createTelemetry() { data[key] = value } - users, apiErr := telemetry.reader.GetUsers(ctx) + users, apiErr := telemetry.getUsersCallback(ctx) if apiErr == nil { for _, user := range users { if user.Email == DEFAULT_CLOUD_EMAIL { @@ -308,65 +323,48 @@ func createTelemetry() { alertsInfo, err := telemetry.alertsInfoCallback(ctx) if err == nil { - dashboardsInfo, err := telemetry.reader.GetDashboardsInfo(ctx) + dashboardsInfo, err := telemetry.dashboardsInfoCallback(ctx) if err == nil { - channels, err := telemetry.reader.GetChannels() + savedViewsInfo, err := telemetry.savedViewsInfoCallback(ctx) if err == nil { - for _, channel := range *channels { - switch channel.Type { - case "slack": - alertsInfo.SlackChannels++ - case "webhook": - alertsInfo.WebHookChannels++ - case "pagerduty": - alertsInfo.PagerDutyChannels++ - case "opsgenie": - alertsInfo.OpsGenieChannels++ - case "email": - alertsInfo.EmailChannels++ - case "msteams": - alertsInfo.MSTeamsChannels++ - } + dashboardsAlertsData := map[string]interface{}{ + "totalDashboards": dashboardsInfo.TotalDashboards, + "totalDashboardsWithPanelAndName": dashboardsInfo.TotalDashboardsWithPanelAndName, + "dashboardNames": dashboardsInfo.DashboardNames, + "alertNames": alertsInfo.AlertNames, + "logsBasedPanels": dashboardsInfo.LogsBasedPanels, + "metricBasedPanels": dashboardsInfo.MetricBasedPanels, + "tracesBasedPanels": dashboardsInfo.TracesBasedPanels, + "dashboardsWithTSV2": dashboardsInfo.QueriesWithTSV2, + "dashboardWithLogsChQuery": dashboardsInfo.DashboardsWithLogsChQuery, + "totalAlerts": alertsInfo.TotalAlerts, + "alertsWithTSV2": alertsInfo.AlertsWithTSV2, + "logsBasedAlerts": alertsInfo.LogsBasedAlerts, + "metricBasedAlerts": alertsInfo.MetricBasedAlerts, + "tracesBasedAlerts": alertsInfo.TracesBasedAlerts, + "totalChannels": alertsInfo.TotalChannels, + "totalSavedViews": savedViewsInfo.TotalSavedViews, + "logsSavedViews": savedViewsInfo.LogsSavedViews, + "tracesSavedViews": savedViewsInfo.TracesSavedViews, + "slackChannels": alertsInfo.SlackChannels, + "webHookChannels": alertsInfo.WebHookChannels, + "pagerDutyChannels": alertsInfo.PagerDutyChannels, + "opsGenieChannels": alertsInfo.OpsGenieChannels, + "emailChannels": alertsInfo.EmailChannels, + "msteamsChannels": alertsInfo.MSTeamsChannels, + "metricsBuilderQueries": alertsInfo.MetricsBuilderQueries, + "metricsClickHouseQueries": alertsInfo.MetricsClickHouseQueries, + "metricsPrometheusQueries": alertsInfo.MetricsPrometheusQueries, + "spanMetricsPrometheusQueries": alertsInfo.SpanMetricsPrometheusQueries, + "alertsWithLogsChQuery": alertsInfo.AlertsWithLogsChQuery, } - savedViewsInfo, err := telemetry.reader.GetSavedViewsInfo(ctx) - if err == nil { - dashboardsAlertsData := map[string]interface{}{ - "totalDashboards": dashboardsInfo.TotalDashboards, - "totalDashboardsWithPanelAndName": dashboardsInfo.TotalDashboardsWithPanelAndName, - "dashboardNames": dashboardsInfo.DashboardNames, - "alertNames": alertsInfo.AlertNames, - "logsBasedPanels": dashboardsInfo.LogsBasedPanels, - "metricBasedPanels": dashboardsInfo.MetricBasedPanels, - "tracesBasedPanels": dashboardsInfo.TracesBasedPanels, - "dashboardsWithTSV2": dashboardsInfo.QueriesWithTSV2, - "totalAlerts": alertsInfo.TotalAlerts, - "alertsWithTSV2": alertsInfo.AlertsWithTSV2, - "logsBasedAlerts": alertsInfo.LogsBasedAlerts, - "metricBasedAlerts": alertsInfo.MetricBasedAlerts, - "tracesBasedAlerts": alertsInfo.TracesBasedAlerts, - "totalChannels": len(*channels), - "totalSavedViews": savedViewsInfo.TotalSavedViews, - "logsSavedViews": savedViewsInfo.LogsSavedViews, - "tracesSavedViews": savedViewsInfo.TracesSavedViews, - "slackChannels": alertsInfo.SlackChannels, - "webHookChannels": alertsInfo.WebHookChannels, - "pagerDutyChannels": alertsInfo.PagerDutyChannels, - "opsGenieChannels": alertsInfo.OpsGenieChannels, - "emailChannels": alertsInfo.EmailChannels, - "msteamsChannels": alertsInfo.MSTeamsChannels, - "metricsBuilderQueries": alertsInfo.MetricsBuilderQueries, - "metricsClickHouseQueries": alertsInfo.MetricsClickHouseQueries, - "metricsPrometheusQueries": alertsInfo.MetricsPrometheusQueries, - "spanMetricsPrometheusQueries": alertsInfo.SpanMetricsPrometheusQueries, - } - // send event only if there are dashboards or alerts or channels - if (dashboardsInfo.TotalDashboards > 0 || alertsInfo.TotalAlerts > 0 || len(*channels) > 0 || savedViewsInfo.TotalSavedViews > 0) && apiErr == nil { - for _, user := range users { - if user.Email == DEFAULT_CLOUD_EMAIL { - continue - } - telemetry.SendEvent(TELEMETRY_EVENT_DASHBOARDS_ALERTS, dashboardsAlertsData, user.Email, false, false) + // send event only if there are dashboards or alerts or channels + if (dashboardsInfo.TotalDashboards > 0 || alertsInfo.TotalAlerts > 0 || alertsInfo.TotalChannels > 0 || savedViewsInfo.TotalSavedViews > 0) && apiErr == nil { + for _, user := range users { + if user.Email == DEFAULT_CLOUD_EMAIL { + continue } + telemetry.SendEvent(TELEMETRY_EVENT_DASHBOARDS_ALERTS, dashboardsAlertsData, user.Email, false, false) } } } @@ -450,11 +448,9 @@ func getOutboundIP() string { } defer resp.Body.Close() + ipBody, err := io.ReadAll(resp.Body) if err == nil { - ipBody, err := io.ReadAll(resp.Body) - if err == nil { - ip = ipBody - } + ip = ipBody } return string(ip) diff --git a/pkg/query-service/tests/integration/filter_suggestions_test.go b/pkg/query-service/tests/integration/filter_suggestions_test.go index 6859a6ac2f..a1f56115c5 100644 --- a/pkg/query-service/tests/integration/filter_suggestions_test.go +++ b/pkg/query-service/tests/integration/filter_suggestions_test.go @@ -186,7 +186,7 @@ func (tb *FilterSuggestionsTestBed) mockAttribValuesQueryResponse( {Type: "Nullable(Float64)", Name: "float64TagValue"}, } - expectedAttribKeysInQuery := []string{} + expectedAttribKeysInQuery := []any{} mockResultRows := [][]any{} for idx, attrib := range expectedAttribs { expectedAttribKeysInQuery = append(expectedAttribKeysInQuery, attrib.Key) @@ -198,8 +198,8 @@ func (tb *FilterSuggestionsTestBed) mockAttribValuesQueryResponse( } tb.mockClickhouse.ExpectQuery( - "select.*tagKey.*stringTagValue.*int64TagValue.*float64TagValue.*distributed_tag_attributes.*tagKey.*in.*", - ).WithArgs(expectedAttribKeysInQuery).WillReturnRows(mockhouse.NewRows(resultCols, mockResultRows)) + "select.*tagKey.*stringTagValue.*int64TagValue.*float64TagValue.*distributed_tag_attributes.*tagKey", + ).WithArgs(expectedAttribKeysInQuery...).WillReturnRows(mockhouse.NewRows(resultCols, mockResultRows)) } type FilterSuggestionsTestBed struct { diff --git a/pkg/query-service/utils/format.go b/pkg/query-service/utils/format.go index e9b7a0b7e3..f09f4dffd6 100644 --- a/pkg/query-service/utils/format.go +++ b/pkg/query-service/utils/format.go @@ -14,6 +14,8 @@ import ( // ValidateAndCastValue validates and casts the value of a key to the corresponding data type of the key func ValidateAndCastValue(v interface{}, dataType v3.AttributeKeyDataType) (interface{}, error) { + // get the actual value if it's a pointer + v = getPointerValue(v) switch dataType { case v3.AttributeKeyDataTypeString: switch x := v.(type) { diff --git a/signoz-core-ui b/signoz-core-ui new file mode 160000 index 0000000000..f8c925d842 --- /dev/null +++ b/signoz-core-ui @@ -0,0 +1 @@ +Subproject commit f8c925d842922f8a30063012a7bfb688a3bf0f36