From f3cb3b9840cdadf8a21b1ee8f2d68ecd31b5beed Mon Sep 17 00:00:00 2001 From: Shaheer Kochai Date: Wed, 11 Sep 2024 18:14:22 +0430 Subject: [PATCH 01/79] fix: loading and no-data states showing in loading state of alert edit/overview (#5887) --- frontend/src/container/FormAlertRules/ChartPreview/index.tsx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frontend/src/container/FormAlertRules/ChartPreview/index.tsx b/frontend/src/container/FormAlertRules/ChartPreview/index.tsx index 03db8a362e..add24106e4 100644 --- a/frontend/src/container/FormAlertRules/ChartPreview/index.tsx +++ b/frontend/src/container/FormAlertRules/ChartPreview/index.tsx @@ -260,7 +260,7 @@ function ChartPreview({ )} - {chartData && !queryResponse.isError && ( + {chartData && !queryResponse.isError && !queryResponse.isLoading && ( Date: Wed, 11 Sep 2024 18:16:41 +0430 Subject: [PATCH 02/79] feat: alert history feedback changes (#5903) * fix: make the default offset 0 * chore: add beta tag to alert history * fix: don't add 5 minutes earlier to the timeline graph data --- .../src/container/AlertHistory/Timeline/Graph/Graph.tsx | 5 +---- frontend/src/pages/AlertDetails/hooks.tsx | 4 +++- frontend/src/periscope/components/BetaTag/BetaTag.tsx | 9 +++++++++ 3 files changed, 13 insertions(+), 5 deletions(-) create mode 100644 frontend/src/periscope/components/BetaTag/BetaTag.tsx diff --git a/frontend/src/container/AlertHistory/Timeline/Graph/Graph.tsx b/frontend/src/container/AlertHistory/Timeline/Graph/Graph.tsx index a0534691df..5adf1c481a 100644 --- a/frontend/src/container/AlertHistory/Timeline/Graph/Graph.tsx +++ b/frontend/src/container/AlertHistory/Timeline/Graph/Graph.tsx @@ -26,17 +26,14 @@ function HorizontalTimelineGraph({ return [[], []]; } - // add a first and last entry to make sure the graph displays all the data - const FIVE_MINUTES_IN_SECONDS = 300; + // add an entry for the end time of the last entry to make sure the graph displays all the data const timestamps = [ - data[0].start / 1000 - FIVE_MINUTES_IN_SECONDS, // 5 minutes before the first entry ...data.map((item) => item.start / 1000), data[data.length - 1].end / 1000, // end value of last entry ]; const states = [ - ALERT_STATUS[data[0].state], // Same state as the first entry ...data.map((item) => ALERT_STATUS[item.state]), ALERT_STATUS[data[data.length - 1].state], // Same state as the last entry ]; diff --git a/frontend/src/pages/AlertDetails/hooks.tsx b/frontend/src/pages/AlertDetails/hooks.tsx index fc6219b195..c6f7b64d64 100644 --- a/frontend/src/pages/AlertDetails/hooks.tsx +++ b/frontend/src/pages/AlertDetails/hooks.tsx @@ -26,6 +26,7 @@ import history from 'lib/history'; import { History, Table } from 'lucide-react'; import EditRules from 'pages/EditRules'; import { OrderPreferenceItems } from 'pages/Logs/config'; +import BetaTag from 'periscope/components/BetaTag/BetaTag'; import PaginationInfoText from 'periscope/components/PaginationInfoText/PaginationInfoText'; import { useAlertRule } from 'providers/Alert'; import { useCallback, useMemo } from 'react'; @@ -125,6 +126,7 @@ export const useRouteTabUtils = (): { routes: TabRoutes[] } => {
History +
), route: getRouteUrl(AlertDetailsTab.HISTORY), @@ -256,7 +258,7 @@ export const useGetAlertRuleDetailsTimelineTable = (): GetAlertRuleDetailsTimeli const { updatedOrder, offset } = useMemo( () => ({ updatedOrder: params.get(urlKey.order) ?? OrderPreferenceItems.ASC, - offset: parseInt(params.get(urlKey.offset) ?? '1', 10), + offset: parseInt(params.get(urlKey.offset) ?? '0', 10), }), [params], ); diff --git a/frontend/src/periscope/components/BetaTag/BetaTag.tsx b/frontend/src/periscope/components/BetaTag/BetaTag.tsx new file mode 100644 index 0000000000..55a4e79d58 --- /dev/null +++ b/frontend/src/periscope/components/BetaTag/BetaTag.tsx @@ -0,0 +1,9 @@ +import { Tag } from 'antd'; + +export default function BetaTag(): JSX.Element { + return ( + + Beta + + ); +} From d6b75d76ca89f1d2fd68b3d0e737fafdabf65844 Mon Sep 17 00:00:00 2001 From: Shaheer Kochai Date: Wed, 11 Sep 2024 19:02:17 +0430 Subject: [PATCH 03/79] fix: add support for long texts in alert history page (#5895) --- .../AlertDetails/AlertHeader/AlertHeader.tsx | 5 +- .../KeyValueLabel/KeyValueLabel.tsx | 25 +++++++-- .../LineClampedText.styles.scss | 6 +++ .../LineClampedText/LineClampedText.tsx | 52 +++++++++++++++++++ .../components/TrimmedText/TrimmedText.tsx | 30 +++++++++++ 5 files changed, 114 insertions(+), 4 deletions(-) create mode 100644 frontend/src/periscope/components/LineClampedText/LineClampedText.styles.scss create mode 100644 frontend/src/periscope/components/LineClampedText/LineClampedText.tsx create mode 100644 frontend/src/periscope/components/TrimmedText/TrimmedText.tsx diff --git a/frontend/src/pages/AlertDetails/AlertHeader/AlertHeader.tsx b/frontend/src/pages/AlertDetails/AlertHeader/AlertHeader.tsx index f4ff7b933b..073b84382b 100644 --- a/frontend/src/pages/AlertDetails/AlertHeader/AlertHeader.tsx +++ b/frontend/src/pages/AlertDetails/AlertHeader/AlertHeader.tsx @@ -1,5 +1,6 @@ import './AlertHeader.styles.scss'; +import LineClampedText from 'periscope/components/LineClampedText/LineClampedText'; import { useAlertRule } from 'providers/Alert'; import { useEffect, useMemo } from 'react'; @@ -42,7 +43,9 @@ function AlertHeader({ alertDetails }: AlertHeaderProps): JSX.Element {
-
{alert}
+
+ +
diff --git a/frontend/src/periscope/components/KeyValueLabel/KeyValueLabel.tsx b/frontend/src/periscope/components/KeyValueLabel/KeyValueLabel.tsx index aa14dd6380..377c647a3f 100644 --- a/frontend/src/periscope/components/KeyValueLabel/KeyValueLabel.tsx +++ b/frontend/src/periscope/components/KeyValueLabel/KeyValueLabel.tsx @@ -1,18 +1,37 @@ import './KeyValueLabel.styles.scss'; -type KeyValueLabelProps = { badgeKey: string; badgeValue: string }; +import { Tooltip } from 'antd'; + +import TrimmedText from '../TrimmedText/TrimmedText'; + +type KeyValueLabelProps = { + badgeKey: string; + badgeValue: string; + maxCharacters?: number; +}; export default function KeyValueLabel({ badgeKey, badgeValue, + maxCharacters = 20, }: KeyValueLabelProps): JSX.Element | null { if (!badgeKey || !badgeValue) { return null; } return (
-
{badgeKey}
-
{badgeValue}
+
+ +
+ +
+ +
+
); } + +KeyValueLabel.defaultProps = { + maxCharacters: 20, +}; diff --git a/frontend/src/periscope/components/LineClampedText/LineClampedText.styles.scss b/frontend/src/periscope/components/LineClampedText/LineClampedText.styles.scss new file mode 100644 index 0000000000..644ddc480b --- /dev/null +++ b/frontend/src/periscope/components/LineClampedText/LineClampedText.styles.scss @@ -0,0 +1,6 @@ +.line-clamped-text { + display: -webkit-box; + -webkit-box-orient: vertical; + overflow: hidden; + text-overflow: ellipsis; +} diff --git a/frontend/src/periscope/components/LineClampedText/LineClampedText.tsx b/frontend/src/periscope/components/LineClampedText/LineClampedText.tsx new file mode 100644 index 0000000000..6b90704b0c --- /dev/null +++ b/frontend/src/periscope/components/LineClampedText/LineClampedText.tsx @@ -0,0 +1,52 @@ +import './LineClampedText.styles.scss'; + +import { Tooltip } from 'antd'; +import { useEffect, useRef, useState } from 'react'; + +function LineClampedText({ + text, + lines, +}: { + text: string; + lines?: number; +}): JSX.Element { + const [isOverflowing, setIsOverflowing] = useState(false); + const textRef = useRef(null); + + useEffect(() => { + const checkOverflow = (): void => { + if (textRef.current) { + setIsOverflowing( + textRef.current.scrollHeight > textRef.current.clientHeight, + ); + } + }; + + checkOverflow(); + window.addEventListener('resize', checkOverflow); + + return (): void => { + window.removeEventListener('resize', checkOverflow); + }; + }, [text, lines]); + + const content = ( +
+ {text} +
+ ); + + return isOverflowing ? {content} : content; +} + +LineClampedText.defaultProps = { + lines: 1, +}; + +export default LineClampedText; diff --git a/frontend/src/periscope/components/TrimmedText/TrimmedText.tsx b/frontend/src/periscope/components/TrimmedText/TrimmedText.tsx new file mode 100644 index 0000000000..10919f2e3f --- /dev/null +++ b/frontend/src/periscope/components/TrimmedText/TrimmedText.tsx @@ -0,0 +1,30 @@ +import { Tooltip } from 'antd'; +import { useEffect, useState } from 'react'; + +function TrimmedText({ + text, + maxCharacters, +}: { + text: string; + maxCharacters: number; +}): JSX.Element { + const [displayText, setDisplayText] = useState(text); + + useEffect(() => { + if (text.length > maxCharacters) { + setDisplayText(`${text.slice(0, maxCharacters)}...`); + } else { + setDisplayText(text); + } + }, [text, maxCharacters]); + + return text.length > maxCharacters ? ( + + {displayText} + + ) : ( + {displayText} + ); +} + +export default TrimmedText; From 20ac75e3d23c8edba06e4c3518641d31b6492d3b Mon Sep 17 00:00:00 2001 From: Srikanth Chekuri Date: Thu, 12 Sep 2024 00:57:48 +0530 Subject: [PATCH 04/79] chore: json logs for collector (#5240) --- deploy/docker-swarm/clickhouse-setup/otel-collector-config.yaml | 2 ++ deploy/docker/clickhouse-setup/otel-collector-config.yaml | 2 ++ 2 files changed, 4 insertions(+) diff --git a/deploy/docker-swarm/clickhouse-setup/otel-collector-config.yaml b/deploy/docker-swarm/clickhouse-setup/otel-collector-config.yaml index 091d4f504b..afa8291358 100644 --- a/deploy/docker-swarm/clickhouse-setup/otel-collector-config.yaml +++ b/deploy/docker-swarm/clickhouse-setup/otel-collector-config.yaml @@ -154,6 +154,8 @@ extensions: service: telemetry: + logs: + encoding: json metrics: address: 0.0.0.0:8888 extensions: [health_check, zpages, pprof] diff --git a/deploy/docker/clickhouse-setup/otel-collector-config.yaml b/deploy/docker/clickhouse-setup/otel-collector-config.yaml index e6d4c2418e..6f30d42ad1 100644 --- a/deploy/docker/clickhouse-setup/otel-collector-config.yaml +++ b/deploy/docker/clickhouse-setup/otel-collector-config.yaml @@ -158,6 +158,8 @@ exporters: service: telemetry: + logs: + encoding: json metrics: address: 0.0.0.0:8888 extensions: From 6e7f04b4922f8112f3d4a7ced1ad44683a5cc4da Mon Sep 17 00:00:00 2001 From: Nityananda Gohain Date: Thu, 12 Sep 2024 09:48:09 +0530 Subject: [PATCH 05/79] logs v4 qb refactor (#5908) * feat: logsV4 initial refactoring * feat: filter_query builder with tests added * feat: all functions of v4 refactored * fix: tests fixed * fix: update select for table panel * fix: tests updated with better examples of limit and group by * fix: resource filter support in live tail --------- Co-authored-by: Srikanth Chekuri --- pkg/query-service/app/logs/v3/json_filter.go | 24 +- .../app/logs/v3/json_filter_test.go | 2 +- .../app/logs/v3/query_builder.go | 56 +- pkg/query-service/app/logs/v4/json_filter.go | 105 ++ .../app/logs/v4/json_filter_test.go | 200 +++ .../app/logs/v4/query_builder.go | 507 ++++++++ .../app/logs/v4/query_builder_test.go | 1099 +++++++++++++++++ .../app/logs/v4/resource_query_builder.go | 13 +- .../logs/v4/resource_query_builder_test.go | 2 +- pkg/query-service/constants/constants.go | 12 + pkg/query-service/model/v3/v3.go | 6 + pkg/query-service/utils/format.go | 22 + 12 files changed, 2002 insertions(+), 46 deletions(-) create mode 100644 pkg/query-service/app/logs/v4/json_filter.go create mode 100644 pkg/query-service/app/logs/v4/json_filter_test.go create mode 100644 pkg/query-service/app/logs/v4/query_builder_test.go diff --git a/pkg/query-service/app/logs/v3/json_filter.go b/pkg/query-service/app/logs/v3/json_filter.go index 887baaab4c..d883b61797 100644 --- a/pkg/query-service/app/logs/v3/json_filter.go +++ b/pkg/query-service/app/logs/v3/json_filter.go @@ -20,7 +20,7 @@ const ( NGRAM_SIZE = 4 ) -var dataTypeMapping = map[string]string{ +var DataTypeMapping = map[string]string{ "string": STRING, "int64": INT64, "float64": FLOAT64, @@ -31,7 +31,7 @@ var dataTypeMapping = map[string]string{ "array(bool)": ARRAY_BOOL, } -var arrayValueTypeMapping = map[string]string{ +var ArrayValueTypeMapping = map[string]string{ "array(string)": "string", "array(int64)": "int64", "array(float64)": "float64", @@ -59,7 +59,7 @@ var jsonLogOperators = map[v3.FilterOperator]string{ v3.FilterOperatorNotHas: "NOT has(%s, %s)", } -func getPath(keyArr []string) string { +func GetPath(keyArr []string) string { path := []string{} for i := 0; i < len(keyArr); i++ { if strings.HasSuffix(keyArr[i], "[*]") { @@ -71,7 +71,7 @@ func getPath(keyArr []string) string { return strings.Join(path, ".") } -func getJSONFilterKey(key v3.AttributeKey, op v3.FilterOperator, isArray bool) (string, error) { +func GetJSONFilterKey(key v3.AttributeKey, op v3.FilterOperator, isArray bool) (string, error) { keyArr := strings.Split(key.Key, ".") // i.e it should be at least body.name, and not something like body if len(keyArr) < 2 { @@ -89,11 +89,11 @@ func getJSONFilterKey(key v3.AttributeKey, op v3.FilterOperator, isArray bool) ( var dataType string var ok bool - if dataType, ok = dataTypeMapping[string(key.DataType)]; !ok { + if dataType, ok = DataTypeMapping[string(key.DataType)]; !ok { return "", fmt.Errorf("unsupported dataType for JSON: %s", key.DataType) } - path := getPath(keyArr[1:]) + path := GetPath(keyArr[1:]) if isArray { return fmt.Sprintf("JSONExtract(JSON_QUERY(%s, '$.%s'), '%s')", keyArr[0], path, dataType), nil @@ -109,7 +109,7 @@ func getJSONFilterKey(key v3.AttributeKey, op v3.FilterOperator, isArray bool) ( } // takes the path and the values and generates where clauses for better usage of index -func getPathIndexFilter(path string) string { +func GetPathIndexFilter(path string) string { filters := []string{} keyArr := strings.Split(path, ".") if len(keyArr) < 2 { @@ -136,7 +136,7 @@ func GetJSONFilter(item v3.FilterItem) (string, error) { dataType := item.Key.DataType isArray := false // check if its an array and handle it - if val, ok := arrayValueTypeMapping[string(item.Key.DataType)]; ok { + if val, ok := ArrayValueTypeMapping[string(item.Key.DataType)]; ok { if item.Operator != v3.FilterOperatorHas && item.Operator != v3.FilterOperatorNotHas { return "", fmt.Errorf("only has operator is supported for array") } @@ -144,7 +144,7 @@ func GetJSONFilter(item v3.FilterItem) (string, error) { dataType = v3.AttributeKeyDataType(val) } - key, err := getJSONFilterKey(item.Key, item.Operator, isArray) + key, err := GetJSONFilterKey(item.Key, item.Operator, isArray) if err != nil { return "", err } @@ -164,7 +164,7 @@ func GetJSONFilter(item v3.FilterItem) (string, error) { if logsOp, ok := jsonLogOperators[op]; ok { switch op { case v3.FilterOperatorExists, v3.FilterOperatorNotExists: - filter = fmt.Sprintf(logsOp, key, getPath(strings.Split(item.Key.Key, ".")[1:])) + filter = fmt.Sprintf(logsOp, key, GetPath(strings.Split(item.Key.Key, ".")[1:])) case v3.FilterOperatorRegex, v3.FilterOperatorNotRegex, v3.FilterOperatorHas, v3.FilterOperatorNotHas: fmtVal := utils.ClickHouseFormattedValue(value) filter = fmt.Sprintf(logsOp, key, fmtVal) @@ -181,7 +181,7 @@ func GetJSONFilter(item v3.FilterItem) (string, error) { filters := []string{} - pathFilter := getPathIndexFilter(item.Key.Key) + pathFilter := GetPathIndexFilter(item.Key.Key) if pathFilter != "" { filters = append(filters, pathFilter) } @@ -196,7 +196,7 @@ func GetJSONFilter(item v3.FilterItem) (string, error) { // add exists check for non array items as default values of int/float/bool will corrupt the results if !isArray && !(item.Operator == v3.FilterOperatorExists || item.Operator == v3.FilterOperatorNotExists) { - existsFilter := fmt.Sprintf("JSON_EXISTS(body, '$.%s')", getPath(strings.Split(item.Key.Key, ".")[1:])) + existsFilter := fmt.Sprintf("JSON_EXISTS(body, '$.%s')", GetPath(strings.Split(item.Key.Key, ".")[1:])) filter = fmt.Sprintf("%s AND %s", existsFilter, filter) } diff --git a/pkg/query-service/app/logs/v3/json_filter_test.go b/pkg/query-service/app/logs/v3/json_filter_test.go index 0a71cd67b2..060ba63707 100644 --- a/pkg/query-service/app/logs/v3/json_filter_test.go +++ b/pkg/query-service/app/logs/v3/json_filter_test.go @@ -140,7 +140,7 @@ var testGetJSONFilterKeyData = []struct { func TestGetJSONFilterKey(t *testing.T) { for _, tt := range testGetJSONFilterKeyData { Convey("testgetKey", t, func() { - columnName, err := getJSONFilterKey(tt.Key, tt.Operator, tt.IsArray) + columnName, err := GetJSONFilterKey(tt.Key, tt.Operator, tt.IsArray) if tt.Error { So(err, ShouldNotBeNil) } else { diff --git a/pkg/query-service/app/logs/v3/query_builder.go b/pkg/query-service/app/logs/v3/query_builder.go index 2aa56002ff..bd64b4d0e6 100644 --- a/pkg/query-service/app/logs/v3/query_builder.go +++ b/pkg/query-service/app/logs/v3/query_builder.go @@ -9,7 +9,7 @@ import ( "go.signoz.io/signoz/pkg/query-service/utils" ) -var aggregateOperatorToPercentile = map[v3.AggregateOperator]float64{ +var AggregateOperatorToPercentile = map[v3.AggregateOperator]float64{ v3.AggregateOperatorP05: 0.05, v3.AggregateOperatorP10: 0.10, v3.AggregateOperatorP20: 0.20, @@ -21,7 +21,7 @@ var aggregateOperatorToPercentile = map[v3.AggregateOperator]float64{ v3.AggregateOperatorP99: 0.99, } -var aggregateOperatorToSQLFunc = map[v3.AggregateOperator]string{ +var AggregateOperatorToSQLFunc = map[v3.AggregateOperator]string{ v3.AggregateOperatorAvg: "avg", v3.AggregateOperatorMax: "max", v3.AggregateOperatorMin: "min", @@ -53,7 +53,7 @@ var logOperators = map[v3.FilterOperator]string{ const BODY = "body" -func getClickhouseLogsColumnType(columnType v3.AttributeKeyType) string { +func GetClickhouseLogsColumnType(columnType v3.AttributeKeyType) string { if columnType == v3.AttributeKeyTypeTag { return "attributes" } @@ -83,7 +83,7 @@ func getClickhouseColumnName(key v3.AttributeKey) string { //if the key is present in the topLevelColumn then it will be only searched in those columns, //regardless if it is indexed/present again in resource or column attribute if !key.IsColumn { - columnType := getClickhouseLogsColumnType(key.Type) + columnType := GetClickhouseLogsColumnType(key.Type) columnDataType := getClickhouseLogsColumnDataType(key.DataType) clickhouseColumn = fmt.Sprintf("%s_%s_value[indexOf(%s_%s_key, '%s')]", columnType, columnDataType, columnType, columnDataType, key.Key) return clickhouseColumn @@ -114,7 +114,7 @@ func getSelectLabels(aggregatorOperator v3.AggregateOperator, groupBy []v3.Attri return selectLabels } -func getSelectKeys(aggregatorOperator v3.AggregateOperator, groupBy []v3.AttributeKey) string { +func GetSelectKeys(aggregatorOperator v3.AggregateOperator, groupBy []v3.AttributeKey) string { var selectLabels []string if aggregatorOperator == v3.AggregateOperatorNoOp { return "" @@ -154,7 +154,7 @@ func GetExistsNexistsFilter(op v3.FilterOperator, item v3.FilterItem) string { } return fmt.Sprintf("%s_exists`=%v", strings.TrimSuffix(getClickhouseColumnName(item.Key), "`"), val) } - columnType := getClickhouseLogsColumnType(item.Key.Type) + columnType := GetClickhouseLogsColumnType(item.Key.Type) columnDataType := getClickhouseLogsColumnDataType(item.Key.DataType) return fmt.Sprintf(logOperators[op], columnType, columnDataType, item.Key.Key) } @@ -224,7 +224,7 @@ func buildLogsTimeSeriesFilterQuery(fs *v3.FilterSet, groupBy []v3.AttributeKey, // add group by conditions to filter out log lines which doesn't have the key for _, attr := range groupBy { if !attr.IsColumn { - columnType := getClickhouseLogsColumnType(attr.Type) + columnType := GetClickhouseLogsColumnType(attr.Type) columnDataType := getClickhouseLogsColumnDataType(attr.DataType) conditions = append(conditions, fmt.Sprintf("has(%s_%s_key, '%s')", columnType, columnDataType, attr.Key)) } else if attr.Type != v3.AttributeKeyTypeUnspecified { @@ -258,7 +258,7 @@ func buildLogsQuery(panelType v3.PanelType, start, end, step int64, mq *v3.Build selectLabels := getSelectLabels(mq.AggregateOperator, mq.GroupBy) - having := having(mq.Having) + having := Having(mq.Having) if having != "" { having = " having " + having } @@ -288,10 +288,10 @@ func buildLogsQuery(panelType v3.PanelType, start, end, step int64, mq *v3.Build // we dont need value for first query // going with this route as for a cleaner approach on implementation if graphLimitQtype == constants.FirstQueryGraphLimit { - queryTmpl = "SELECT " + getSelectKeys(mq.AggregateOperator, mq.GroupBy) + " from (" + queryTmpl + ")" + queryTmpl = "SELECT " + GetSelectKeys(mq.AggregateOperator, mq.GroupBy) + " from (" + queryTmpl + ")" } - groupBy := groupByAttributeKeyTags(panelType, graphLimitQtype, mq.GroupBy...) + groupBy := GroupByAttributeKeyTags(panelType, graphLimitQtype, mq.GroupBy...) if panelType != v3.PanelTypeList && groupBy != "" { groupBy = " group by " + groupBy } @@ -301,7 +301,7 @@ func buildLogsQuery(panelType v3.PanelType, start, end, step int64, mq *v3.Build } if graphLimitQtype == constants.SecondQueryGraphLimit { - filterSubQuery = filterSubQuery + " AND " + fmt.Sprintf("(%s) GLOBAL IN (", getSelectKeys(mq.AggregateOperator, mq.GroupBy)) + "#LIMIT_PLACEHOLDER)" + filterSubQuery = filterSubQuery + " AND " + fmt.Sprintf("(%s) GLOBAL IN (", GetSelectKeys(mq.AggregateOperator, mq.GroupBy)) + "#LIMIT_PLACEHOLDER)" } aggregationKey := "" @@ -329,7 +329,7 @@ func buildLogsQuery(panelType v3.PanelType, start, end, step int64, mq *v3.Build rate = rate / 60.0 } - op := fmt.Sprintf("%s(%s)/%f", aggregateOperatorToSQLFunc[mq.AggregateOperator], aggregationKey, rate) + op := fmt.Sprintf("%s(%s)/%f", AggregateOperatorToSQLFunc[mq.AggregateOperator], aggregationKey, rate) query := fmt.Sprintf(queryTmpl, op, filterSubQuery, groupBy, having, orderBy) return query, nil case @@ -342,11 +342,11 @@ func buildLogsQuery(panelType v3.PanelType, start, end, step int64, mq *v3.Build v3.AggregateOperatorP90, v3.AggregateOperatorP95, v3.AggregateOperatorP99: - op := fmt.Sprintf("quantile(%v)(%s)", aggregateOperatorToPercentile[mq.AggregateOperator], aggregationKey) + op := fmt.Sprintf("quantile(%v)(%s)", AggregateOperatorToPercentile[mq.AggregateOperator], aggregationKey) query := fmt.Sprintf(queryTmpl, op, filterSubQuery, groupBy, having, orderBy) return query, nil case v3.AggregateOperatorAvg, v3.AggregateOperatorSum, v3.AggregateOperatorMin, v3.AggregateOperatorMax: - op := fmt.Sprintf("%s(%s)", aggregateOperatorToSQLFunc[mq.AggregateOperator], aggregationKey) + op := fmt.Sprintf("%s(%s)", AggregateOperatorToSQLFunc[mq.AggregateOperator], aggregationKey) query := fmt.Sprintf(queryTmpl, op, filterSubQuery, groupBy, having, orderBy) return query, nil case v3.AggregateOperatorCount: @@ -394,7 +394,7 @@ func groupBy(panelType v3.PanelType, graphLimitQtype string, tags ...string) str return strings.Join(tags, ",") } -func groupByAttributeKeyTags(panelType v3.PanelType, graphLimitQtype string, tags ...v3.AttributeKey) string { +func GroupByAttributeKeyTags(panelType v3.PanelType, graphLimitQtype string, tags ...v3.AttributeKey) string { groupTags := []string{} for _, tag := range tags { groupTags = append(groupTags, "`"+tag.Key+"`") @@ -446,7 +446,7 @@ func orderByAttributeKeyTags(panelType v3.PanelType, items []v3.OrderBy, tags [] return str } -func having(items []v3.Having) string { +func Having(items []v3.Having) string { // aggregate something and filter on that aggregate var having []string for _, item := range items { @@ -455,7 +455,7 @@ func having(items []v3.Having) string { return strings.Join(having, " AND ") } -func reduceQuery(query string, reduceTo v3.ReduceToOperator, aggregateOperator v3.AggregateOperator) (string, error) { +func ReduceQuery(query string, reduceTo v3.ReduceToOperator, aggregateOperator v3.AggregateOperator) (string, error) { // the timestamp picked is not relevant here since the final value used is show the single // chart with just the query value. switch reduceTo { @@ -475,14 +475,14 @@ func reduceQuery(query string, reduceTo v3.ReduceToOperator, aggregateOperator v return query, nil } -func addLimitToQuery(query string, limit uint64) string { +func AddLimitToQuery(query string, limit uint64) string { if limit == 0 { return query } return fmt.Sprintf("%s LIMIT %d", query, limit) } -func addOffsetToQuery(query string, offset uint64) string { +func AddOffsetToQuery(query string, offset uint64) string { return fmt.Sprintf("%s OFFSET %d", query, offset) } @@ -492,7 +492,7 @@ type Options struct { PreferRPM bool } -func isOrderByTs(orderBy []v3.OrderBy) bool { +func IsOrderByTs(orderBy []v3.OrderBy) bool { if len(orderBy) == 1 && (orderBy[0].Key == constants.TIMESTAMP || orderBy[0].ColumnName == constants.TIMESTAMP) { return true } @@ -523,7 +523,7 @@ func PrepareLogsQuery(start, end int64, queryType v3.QueryType, panelType v3.Pan if err != nil { return "", err } - query = addLimitToQuery(query, mq.Limit) + query = AddLimitToQuery(query, mq.Limit) return query, nil } else if options.GraphLimitQtype == constants.SecondQueryGraphLimit { @@ -539,7 +539,7 @@ func PrepareLogsQuery(start, end int64, queryType v3.QueryType, panelType v3.Pan return "", err } if panelType == v3.PanelTypeValue { - query, err = reduceQuery(query, mq.ReduceTo, mq.AggregateOperator) + query, err = ReduceQuery(query, mq.ReduceTo, mq.AggregateOperator) } if panelType == v3.PanelTypeList { @@ -550,21 +550,21 @@ func PrepareLogsQuery(start, end int64, queryType v3.QueryType, panelType v3.Pan if mq.PageSize > 0 { if mq.Limit > 0 && mq.Offset+mq.PageSize > mq.Limit { - query = addLimitToQuery(query, mq.Limit-mq.Offset) + query = AddLimitToQuery(query, mq.Limit-mq.Offset) } else { - query = addLimitToQuery(query, mq.PageSize) + query = AddLimitToQuery(query, mq.PageSize) } // add offset to the query only if it is not orderd by timestamp. - if !isOrderByTs(mq.OrderBy) { - query = addOffsetToQuery(query, mq.Offset) + if !IsOrderByTs(mq.OrderBy) { + query = AddOffsetToQuery(query, mq.Offset) } } else { - query = addLimitToQuery(query, mq.Limit) + query = AddLimitToQuery(query, mq.Limit) } } else if panelType == v3.PanelTypeTable { - query = addLimitToQuery(query, mq.Limit) + query = AddLimitToQuery(query, mq.Limit) } return query, err diff --git a/pkg/query-service/app/logs/v4/json_filter.go b/pkg/query-service/app/logs/v4/json_filter.go new file mode 100644 index 0000000000..cde88e748a --- /dev/null +++ b/pkg/query-service/app/logs/v4/json_filter.go @@ -0,0 +1,105 @@ +package v4 + +import ( + "fmt" + "strings" + + logsV3 "go.signoz.io/signoz/pkg/query-service/app/logs/v3" + v3 "go.signoz.io/signoz/pkg/query-service/model/v3" + "go.signoz.io/signoz/pkg/query-service/utils" +) + +var jsonLogOperators = map[v3.FilterOperator]string{ + v3.FilterOperatorEqual: "=", + v3.FilterOperatorNotEqual: "!=", + v3.FilterOperatorLessThan: "<", + v3.FilterOperatorLessThanOrEq: "<=", + v3.FilterOperatorGreaterThan: ">", + v3.FilterOperatorGreaterThanOrEq: ">=", + v3.FilterOperatorLike: "LIKE", + v3.FilterOperatorNotLike: "NOT LIKE", + v3.FilterOperatorContains: "LIKE", + v3.FilterOperatorNotContains: "NOT LIKE", + v3.FilterOperatorRegex: "match(%s, %s)", + v3.FilterOperatorNotRegex: "NOT match(%s, %s)", + v3.FilterOperatorIn: "IN", + v3.FilterOperatorNotIn: "NOT IN", + v3.FilterOperatorExists: "JSON_EXISTS(%s, '$.%s')", + v3.FilterOperatorNotExists: "NOT JSON_EXISTS(%s, '$.%s')", + v3.FilterOperatorHas: "has(%s, %s)", + v3.FilterOperatorNotHas: "NOT has(%s, %s)", +} + +func GetJSONFilter(item v3.FilterItem) (string, error) { + + dataType := item.Key.DataType + isArray := false + // check if its an array and handle it + if val, ok := logsV3.ArrayValueTypeMapping[string(item.Key.DataType)]; ok { + if item.Operator != v3.FilterOperatorHas && item.Operator != v3.FilterOperatorNotHas { + return "", fmt.Errorf("only has operator is supported for array") + } + isArray = true + dataType = v3.AttributeKeyDataType(val) + } + + key, err := logsV3.GetJSONFilterKey(item.Key, item.Operator, isArray) + if err != nil { + return "", err + } + + // non array + op := v3.FilterOperator(strings.ToLower(strings.TrimSpace(string(item.Operator)))) + + var value interface{} + if op != v3.FilterOperatorExists && op != v3.FilterOperatorNotExists { + value, err = utils.ValidateAndCastValue(item.Value, dataType) + if err != nil { + return "", fmt.Errorf("failed to validate and cast value for %s: %v", item.Key.Key, err) + } + } + + var filter string + if logsOp, ok := jsonLogOperators[op]; ok { + switch op { + case v3.FilterOperatorExists, v3.FilterOperatorNotExists: + filter = fmt.Sprintf(logsOp, key, logsV3.GetPath(strings.Split(item.Key.Key, ".")[1:])) + case v3.FilterOperatorRegex, v3.FilterOperatorNotRegex, v3.FilterOperatorHas, v3.FilterOperatorNotHas: + fmtVal := utils.ClickHouseFormattedValue(value) + filter = fmt.Sprintf(logsOp, key, fmtVal) + case v3.FilterOperatorContains, v3.FilterOperatorNotContains: + val := utils.QuoteEscapedString(fmt.Sprintf("%v", item.Value)) + filter = fmt.Sprintf("%s %s '%%%s%%'", key, logsOp, val) + default: + fmtVal := utils.ClickHouseFormattedValue(value) + filter = fmt.Sprintf("%s %s %s", key, logsOp, fmtVal) + } + } else { + return "", fmt.Errorf("unsupported operator: %s", op) + } + + filters := []string{} + + pathFilter := logsV3.GetPathIndexFilter(item.Key.Key) + if pathFilter != "" { + filters = append(filters, pathFilter) + } + if op == v3.FilterOperatorContains || + op == v3.FilterOperatorEqual || + op == v3.FilterOperatorHas { + val, ok := item.Value.(string) + if ok && len(val) >= logsV3.NGRAM_SIZE { + filters = append(filters, fmt.Sprintf("lower(body) like lower('%%%s%%')", utils.QuoteEscapedString(strings.ToLower(val)))) + } + } + + // add exists check for non array items as default values of int/float/bool will corrupt the results + if !isArray && !(item.Operator == v3.FilterOperatorExists || item.Operator == v3.FilterOperatorNotExists) { + existsFilter := fmt.Sprintf("JSON_EXISTS(body, '$.%s')", logsV3.GetPath(strings.Split(item.Key.Key, ".")[1:])) + filter = fmt.Sprintf("%s AND %s", existsFilter, filter) + } + + filters = append(filters, filter) + + return strings.Join(filters, " AND "), nil +} diff --git a/pkg/query-service/app/logs/v4/json_filter_test.go b/pkg/query-service/app/logs/v4/json_filter_test.go new file mode 100644 index 0000000000..c8b2e44847 --- /dev/null +++ b/pkg/query-service/app/logs/v4/json_filter_test.go @@ -0,0 +1,200 @@ +package v4 + +import ( + "testing" + + . "github.com/smartystreets/goconvey/convey" + logsV3 "go.signoz.io/signoz/pkg/query-service/app/logs/v3" + v3 "go.signoz.io/signoz/pkg/query-service/model/v3" +) + +var testGetJSONFilterData = []struct { + Name string + FilterItem v3.FilterItem + Filter string + Error bool +}{ + { + Name: "Array membership string", + FilterItem: v3.FilterItem{ + Key: v3.AttributeKey{ + Key: "body.requestor_list[*]", + DataType: "array(string)", + IsJSON: true, + }, + Operator: "has", + Value: "index_service", + }, + Filter: "lower(body) like lower('%requestor_list%') AND lower(body) like lower('%index_service%') AND has(JSONExtract(JSON_QUERY(body, '$.\"requestor_list\"[*]'), 'Array(String)'), 'index_service')", + }, + { + Name: "Array membership int64", + FilterItem: v3.FilterItem{ + Key: v3.AttributeKey{ + Key: "body.int_numbers[*]", + DataType: "array(int64)", + IsJSON: true, + }, + Operator: "has", + Value: 2, + }, + Filter: "lower(body) like lower('%int_numbers%') AND has(JSONExtract(JSON_QUERY(body, '$.\"int_numbers\"[*]'), '" + logsV3.ARRAY_INT64 + "'), 2)", + }, + { + Name: "Array membership float64", + FilterItem: v3.FilterItem{ + Key: v3.AttributeKey{ + Key: "body.nested_num[*].float_nums[*]", + DataType: "array(float64)", + IsJSON: true, + }, + Operator: "nhas", + Value: 2.2, + }, + Filter: "lower(body) like lower('%nested_num%float_nums%') AND NOT has(JSONExtract(JSON_QUERY(body, '$.\"nested_num\"[*].\"float_nums\"[*]'), '" + logsV3.ARRAY_FLOAT64 + "'), 2.200000)", + }, + { + Name: "Array membership bool", + FilterItem: v3.FilterItem{ + Key: v3.AttributeKey{ + Key: "body.bool[*]", + DataType: "array(bool)", + IsJSON: true, + }, + Operator: "has", + Value: true, + }, + Filter: "lower(body) like lower('%bool%') AND has(JSONExtract(JSON_QUERY(body, '$.\"bool\"[*]'), '" + logsV3.ARRAY_BOOL + "'), true)", + }, + { + Name: "eq operator", + FilterItem: v3.FilterItem{ + Key: v3.AttributeKey{ + Key: "body.message", + DataType: "string", + IsJSON: true, + }, + Operator: "=", + Value: "hello", + }, + Filter: "lower(body) like lower('%message%') AND lower(body) like lower('%hello%') AND JSON_EXISTS(body, '$.\"message\"') AND JSON_VALUE(body, '$.\"message\"') = 'hello'", + }, + { + Name: "eq operator number", + FilterItem: v3.FilterItem{ + Key: v3.AttributeKey{ + Key: "body.status", + DataType: "int64", + IsJSON: true, + }, + Operator: "=", + Value: 1, + }, + Filter: "lower(body) like lower('%status%') AND JSON_EXISTS(body, '$.\"status\"') AND JSONExtract(JSON_VALUE(body, '$.\"status\"'), '" + logsV3.INT64 + "') = 1", + }, + { + Name: "neq operator number", + FilterItem: v3.FilterItem{ + Key: v3.AttributeKey{ + Key: "body.status", + DataType: "float64", + IsJSON: true, + }, + Operator: "=", + Value: 1.1, + }, + Filter: "lower(body) like lower('%status%') AND JSON_EXISTS(body, '$.\"status\"') AND JSONExtract(JSON_VALUE(body, '$.\"status\"'), '" + logsV3.FLOAT64 + "') = 1.100000", + }, + { + Name: "eq operator bool", + FilterItem: v3.FilterItem{ + Key: v3.AttributeKey{ + Key: "body.boolkey", + DataType: "bool", + IsJSON: true, + }, + Operator: "=", + Value: true, + }, + Filter: "lower(body) like lower('%boolkey%') AND JSON_EXISTS(body, '$.\"boolkey\"') AND JSONExtract(JSON_VALUE(body, '$.\"boolkey\"'), '" + logsV3.BOOL + "') = true", + }, + { + Name: "greater than operator", + FilterItem: v3.FilterItem{ + Key: v3.AttributeKey{ + Key: "body.status", + DataType: "int64", + IsJSON: true, + }, + Operator: ">", + Value: 1, + }, + Filter: "lower(body) like lower('%status%') AND JSON_EXISTS(body, '$.\"status\"') AND JSONExtract(JSON_VALUE(body, '$.\"status\"'), '" + logsV3.INT64 + "') > 1", + }, + { + Name: "regex operator", + FilterItem: v3.FilterItem{ + Key: v3.AttributeKey{ + Key: "body.message", + DataType: "string", + IsJSON: true, + }, + Operator: "regex", + Value: "a*", + }, + Filter: "lower(body) like lower('%message%') AND JSON_EXISTS(body, '$.\"message\"') AND match(JSON_VALUE(body, '$.\"message\"'), 'a*')", + }, + { + Name: "contains operator", + FilterItem: v3.FilterItem{ + Key: v3.AttributeKey{ + Key: "body.message", + DataType: "string", + IsJSON: true, + }, + Operator: "contains", + Value: "a", + }, + Filter: "lower(body) like lower('%message%') AND JSON_EXISTS(body, '$.\"message\"') AND JSON_VALUE(body, '$.\"message\"') LIKE '%a%'", + }, + { + Name: "contains operator with quotes", + FilterItem: v3.FilterItem{ + Key: v3.AttributeKey{ + Key: "body.message", + DataType: "string", + IsJSON: true, + }, + Operator: "contains", + Value: "hello 'world'", + }, + Filter: "lower(body) like lower('%message%') AND lower(body) like lower('%hello \\'world\\'%') AND JSON_EXISTS(body, '$.\"message\"') AND JSON_VALUE(body, '$.\"message\"') LIKE '%hello \\'world\\'%'", + }, + { + Name: "exists", + FilterItem: v3.FilterItem{ + Key: v3.AttributeKey{ + Key: "body.message", + DataType: "string", + IsJSON: true, + }, + Operator: "exists", + Value: "", + }, + Filter: "lower(body) like lower('%message%') AND JSON_EXISTS(body, '$.\"message\"')", + }, +} + +func TestGetJSONFilter(t *testing.T) { + for _, tt := range testGetJSONFilterData { + Convey("testGetJSONFilter", t, func() { + filter, err := GetJSONFilter(tt.FilterItem) + if tt.Error { + So(err, ShouldNotBeNil) + } else { + So(err, ShouldBeNil) + So(filter, ShouldEqual, tt.Filter) + } + }) + } +} diff --git a/pkg/query-service/app/logs/v4/query_builder.go b/pkg/query-service/app/logs/v4/query_builder.go index 08024756bd..b96c5b9113 100644 --- a/pkg/query-service/app/logs/v4/query_builder.go +++ b/pkg/query-service/app/logs/v4/query_builder.go @@ -1,7 +1,13 @@ package v4 import ( + "fmt" + "strings" + + logsV3 "go.signoz.io/signoz/pkg/query-service/app/logs/v3" + "go.signoz.io/signoz/pkg/query-service/constants" v3 "go.signoz.io/signoz/pkg/query-service/model/v3" + "go.signoz.io/signoz/pkg/query-service/utils" ) var logOperators = map[v3.FilterOperator]string{ @@ -29,3 +35,504 @@ const ( DISTRIBUTED_LOGS_V2_RESOURCE = "distributed_logs_v2_resource" NANOSECOND = 1000000000 ) + +func getClickhouseLogsColumnDataType(columnDataType v3.AttributeKeyDataType) string { + if columnDataType == v3.AttributeKeyDataTypeFloat64 || columnDataType == v3.AttributeKeyDataTypeInt64 { + return "number" + } + if columnDataType == v3.AttributeKeyDataTypeBool { + return "bool" + } + return "string" +} + +func getClickhouseKey(key v3.AttributeKey) string { + // check if it is a top level static field + if _, ok := constants.StaticFieldsLogsV3[key.Key]; ok && key.Type == v3.AttributeKeyTypeUnspecified { + return key.Key + } + + //if the key is present in the topLevelColumn then it will be only searched in those columns, + //regardless if it is indexed/present again in resource or column attribute + if !key.IsColumn { + columnType := logsV3.GetClickhouseLogsColumnType(key.Type) + columnDataType := getClickhouseLogsColumnDataType(key.DataType) + return fmt.Sprintf("%s_%s['%s']", columnType, columnDataType, key.Key) + } + + // materialized column created from query + // https://github.com/SigNoz/signoz/pull/4775 + return "`" + utils.GetClickhouseColumnNameV2(string(key.Type), string(key.DataType), key.Key) + "`" +} + +func getSelectLabels(aggregatorOperator v3.AggregateOperator, groupBy []v3.AttributeKey) string { + var selectLabels string + if aggregatorOperator == v3.AggregateOperatorNoOp { + selectLabels = "" + } else { + for _, tag := range groupBy { + columnName := getClickhouseKey(tag) + selectLabels += fmt.Sprintf(" %s as `%s`,", columnName, tag.Key) + } + } + return selectLabels +} + +func getExistsNexistsFilter(op v3.FilterOperator, item v3.FilterItem) string { + if _, ok := constants.StaticFieldsLogsV3[item.Key.Key]; ok && item.Key.Type == v3.AttributeKeyTypeUnspecified { + // no exists filter for static fields as they exists everywhere + // TODO(nitya): Think what we can do here + return "" + } else if item.Key.IsColumn { + // get filter for materialized columns + val := true + if op == v3.FilterOperatorNotExists { + val = false + } + return fmt.Sprintf("%s_exists`=%v", strings.TrimSuffix(getClickhouseKey(item.Key), "`"), val) + } + // filter for non materialized attributes + columnType := logsV3.GetClickhouseLogsColumnType(item.Key.Type) + columnDataType := getClickhouseLogsColumnDataType(item.Key.DataType) + return fmt.Sprintf(logOperators[op], columnType, columnDataType, item.Key.Key) +} + +func buildAttributeFilter(item v3.FilterItem) (string, error) { + // check if the user is searching for value in all attributes + key := item.Key.Key + op := v3.FilterOperator(strings.ToLower(string(item.Operator))) + + var value interface{} + var err error + if op != v3.FilterOperatorExists && op != v3.FilterOperatorNotExists { + value, err = utils.ValidateAndCastValue(item.Value, item.Key.DataType) + if err != nil { + return "", fmt.Errorf("failed to validate and cast value for %s: %v", item.Key.Key, err) + } + } + + // TODO(nitya): as of now __attrs is only supports attributes_string. Discuss more on this + // also for eq and contains as now it does a exact match + if key == "__attrs" { + if (op != v3.FilterOperatorEqual && op != v3.FilterOperatorContains) || item.Key.DataType != v3.AttributeKeyDataTypeString { + return "", fmt.Errorf("only = operator and string data type is supported for __attrs") + } + val := utils.ClickHouseFormattedValue(item.Value) + return fmt.Sprintf("has(mapValues(attributes_string), %s)", val), nil + } + + keyName := getClickhouseKey(item.Key) + fmtVal := utils.ClickHouseFormattedValue(value) + + if logsOp, ok := logOperators[op]; ok { + switch op { + case v3.FilterOperatorExists, v3.FilterOperatorNotExists: + return getExistsNexistsFilter(op, item), nil + case v3.FilterOperatorRegex, v3.FilterOperatorNotRegex: + + return fmt.Sprintf(logsOp, keyName, fmtVal), nil + case v3.FilterOperatorContains, v3.FilterOperatorNotContains: + val := utils.QuoteEscapedStringForContains(fmt.Sprintf("%s", item.Value)) + // for body the contains is case insensitive + if keyName == BODY { + return fmt.Sprintf("lower(%s) %s lower('%%%s%%')", keyName, logsOp, val), nil + } else { + return fmt.Sprintf("%s %s '%%%s%%'", keyName, logsOp, val), nil + } + default: + // for use lower for like and ilike + if op == v3.FilterOperatorLike || op == v3.FilterOperatorNotLike { + if keyName == BODY { + keyName = fmt.Sprintf("lower(%s)", keyName) + fmtVal = fmt.Sprintf("lower(%s)", fmtVal) + } + } + return fmt.Sprintf("%s %s %s", keyName, logsOp, fmtVal), nil + } + } else { + return "", fmt.Errorf("unsupported operator: %s", op) + } +} + +func buildLogsTimeSeriesFilterQuery(fs *v3.FilterSet, groupBy []v3.AttributeKey, aggregateAttribute v3.AttributeKey) (string, error) { + var conditions []string + + if fs == nil || len(fs.Items) == 0 { + return "", nil + } + + for _, item := range fs.Items { + // skip if it's a resource attribute + if item.Key.Type == v3.AttributeKeyTypeResource { + continue + } + + // if the filter is json filter + if item.Key.IsJSON { + filter, err := GetJSONFilter(item) + if err != nil { + return "", err + } + conditions = append(conditions, filter) + continue + } + + // generate the filter + filter, err := buildAttributeFilter(item) + if err != nil { + return "", err + } + conditions = append(conditions, filter) + + // add extra condition for map contains + // by default clickhouse is not able to utilize indexes for keys with all operators. + // mapContains forces the use of index. + op := v3.FilterOperator(strings.ToLower(string(item.Operator))) + if item.Key.IsColumn == false && op != v3.FilterOperatorExists && op != v3.FilterOperatorNotExists { + conditions = append(conditions, getExistsNexistsFilter(v3.FilterOperatorExists, item)) + } + } + + // add group by conditions to filter out log lines which doesn't have the key + for _, attr := range groupBy { + // skip if it's a resource attribute + if attr.Type == v3.AttributeKeyTypeResource { + continue + } + + if !attr.IsColumn { + columnType := logsV3.GetClickhouseLogsColumnType(attr.Type) + columnDataType := getClickhouseLogsColumnDataType(attr.DataType) + conditions = append(conditions, fmt.Sprintf("mapContains(%s_%s, '%s')", columnType, columnDataType, attr.Key)) + } else if attr.Type != v3.AttributeKeyTypeUnspecified { + // for materialzied columns and not the top level static fields + name := utils.GetClickhouseColumnNameV2(string(attr.Type), string(attr.DataType), attr.Key) + conditions = append(conditions, fmt.Sprintf("`%s_exists`=true", name)) + } + } + + // add conditions for aggregate attribute + if aggregateAttribute.Key != "" && aggregateAttribute.Type != v3.AttributeKeyTypeResource { + existsFilter := getExistsNexistsFilter(v3.FilterOperatorExists, v3.FilterItem{Key: aggregateAttribute}) + conditions = append(conditions, existsFilter) + } + + queryString := strings.Join(conditions, " AND ") + return queryString, nil +} + +// orderBy returns a string of comma separated tags for order by clause +// if there are remaining items which are not present in tags they are also added +// if the order is not specified, it defaults to ASC +func orderBy(panelType v3.PanelType, items []v3.OrderBy, tagLookup map[string]struct{}) []string { + var orderBy []string + + for _, item := range items { + if item.ColumnName == constants.SigNozOrderByValue { + orderBy = append(orderBy, fmt.Sprintf("value %s", item.Order)) + } else if _, ok := tagLookup[item.ColumnName]; ok { + orderBy = append(orderBy, fmt.Sprintf("`%s` %s", item.ColumnName, item.Order)) + } else if panelType == v3.PanelTypeList { + attr := v3.AttributeKey{Key: item.ColumnName, DataType: item.DataType, Type: item.Type, IsColumn: item.IsColumn} + name := getClickhouseKey(attr) + if item.IsColumn { + name = "`" + name + "`" + } + orderBy = append(orderBy, fmt.Sprintf("%s %s", name, item.Order)) + } + } + return orderBy +} + +func orderByAttributeKeyTags(panelType v3.PanelType, items []v3.OrderBy, tags []v3.AttributeKey) string { + + tagLookup := map[string]struct{}{} + for _, v := range tags { + tagLookup[v.Key] = struct{}{} + } + + orderByArray := orderBy(panelType, items, tagLookup) + + if len(orderByArray) == 0 { + if panelType == v3.PanelTypeList { + orderByArray = append(orderByArray, constants.TIMESTAMP+" DESC") + } else { + orderByArray = append(orderByArray, "value DESC") + } + } + + str := strings.Join(orderByArray, ",") + return str +} + +func generateAggregateClause(aggOp v3.AggregateOperator, + aggKey string, + step int64, + preferRPM bool, + timeFilter string, + whereClause string, + groupBy string, + having string, + orderBy string, +) (string, error) { + queryTmpl := " %s as value from signoz_logs." + DISTRIBUTED_LOGS_V2 + + " where " + timeFilter + "%s" + + "%s%s" + + "%s" + switch aggOp { + case v3.AggregateOperatorRate: + rate := float64(step) + if preferRPM { + rate = rate / 60.0 + } + + op := fmt.Sprintf("count(%s)/%f", aggKey, rate) + query := fmt.Sprintf(queryTmpl, op, whereClause, groupBy, having, orderBy) + return query, nil + case + v3.AggregateOperatorRateSum, + v3.AggregateOperatorRateMax, + v3.AggregateOperatorRateAvg, + v3.AggregateOperatorRateMin: + rate := float64(step) + if preferRPM { + rate = rate / 60.0 + } + + op := fmt.Sprintf("%s(%s)/%f", logsV3.AggregateOperatorToSQLFunc[aggOp], aggKey, rate) + query := fmt.Sprintf(queryTmpl, op, whereClause, groupBy, having, orderBy) + return query, nil + case + v3.AggregateOperatorP05, + v3.AggregateOperatorP10, + v3.AggregateOperatorP20, + v3.AggregateOperatorP25, + v3.AggregateOperatorP50, + v3.AggregateOperatorP75, + v3.AggregateOperatorP90, + v3.AggregateOperatorP95, + v3.AggregateOperatorP99: + op := fmt.Sprintf("quantile(%v)(%s)", logsV3.AggregateOperatorToPercentile[aggOp], aggKey) + query := fmt.Sprintf(queryTmpl, op, whereClause, groupBy, having, orderBy) + return query, nil + case v3.AggregateOperatorAvg, v3.AggregateOperatorSum, v3.AggregateOperatorMin, v3.AggregateOperatorMax: + op := fmt.Sprintf("%s(%s)", logsV3.AggregateOperatorToSQLFunc[aggOp], aggKey) + query := fmt.Sprintf(queryTmpl, op, whereClause, groupBy, having, orderBy) + return query, nil + case v3.AggregateOperatorCount: + op := "toFloat64(count(*))" + query := fmt.Sprintf(queryTmpl, op, whereClause, groupBy, having, orderBy) + return query, nil + case v3.AggregateOperatorCountDistinct: + op := fmt.Sprintf("toFloat64(count(distinct(%s)))", aggKey) + query := fmt.Sprintf(queryTmpl, op, whereClause, groupBy, having, orderBy) + return query, nil + default: + return "", fmt.Errorf("unsupported aggregate operator") + } +} + +func buildLogsQuery(panelType v3.PanelType, start, end, step int64, mq *v3.BuilderQuery, graphLimitQtype string, preferRPM bool) (string, error) { + // timerange will be sent in epoch millisecond + logsStart := utils.GetEpochNanoSecs(start) + logsEnd := utils.GetEpochNanoSecs(end) + + // -1800 this is added so that the bucket start considers all the fingerprints. + bucketStart := logsStart/NANOSECOND - 1800 + bucketEnd := logsEnd / NANOSECOND + + // timestamp filter , bucket_start filter is added for primary key + timeFilter := fmt.Sprintf("(timestamp >= %d AND timestamp <= %d) AND (ts_bucket_start >= %d AND ts_bucket_start <= %d)", logsStart, logsEnd, bucketStart, bucketEnd) + + // build the where clause for main table + filterSubQuery, err := buildLogsTimeSeriesFilterQuery(mq.Filters, mq.GroupBy, mq.AggregateAttribute) + if err != nil { + return "", err + } + if filterSubQuery != "" { + filterSubQuery = " AND " + filterSubQuery + } + + // build the where clause for resource table + resourceSubQuery, err := buildResourceSubQuery(bucketStart, bucketEnd, mq.Filters, mq.GroupBy, mq.AggregateAttribute, false) + if err != nil { + return "", err + } + // join both the filter clauses + if resourceSubQuery != "" { + filterSubQuery = filterSubQuery + " AND (resource_fingerprint GLOBAL IN " + resourceSubQuery + ")" + } + + // get the select labels + selectLabels := getSelectLabels(mq.AggregateOperator, mq.GroupBy) + + // get the order by clause + orderBy := orderByAttributeKeyTags(panelType, mq.OrderBy, mq.GroupBy) + if panelType != v3.PanelTypeList && orderBy != "" { + orderBy = " order by " + orderBy + } + + // if noop create the query and return + if mq.AggregateOperator == v3.AggregateOperatorNoOp { + // with noop any filter or different order by other than ts will use new table + sqlSelect := constants.LogsSQLSelectV2 + queryTmpl := sqlSelect + "from signoz_logs.%s where %s%s order by %s" + query := fmt.Sprintf(queryTmpl, DISTRIBUTED_LOGS_V2, timeFilter, filterSubQuery, orderBy) + return query, nil + // ---- NOOP ends here ---- + } + + // ---- FOR aggregation queries ---- + + // get the having conditions + having := logsV3.Having(mq.Having) + if having != "" { + having = " having " + having + } + + // get the group by clause + groupBy := logsV3.GroupByAttributeKeyTags(panelType, graphLimitQtype, mq.GroupBy...) + if panelType != v3.PanelTypeList && groupBy != "" { + groupBy = " group by " + groupBy + } + + // get the aggregation key + aggregationKey := "" + if mq.AggregateAttribute.Key != "" { + aggregationKey = getClickhouseKey(mq.AggregateAttribute) + } + + // for limit queries, there are two queries formed + // in the second query we need to add the placeholder so that first query can be placed + if graphLimitQtype == constants.SecondQueryGraphLimit { + filterSubQuery = filterSubQuery + " AND " + fmt.Sprintf("(%s) GLOBAL IN (", logsV3.GetSelectKeys(mq.AggregateOperator, mq.GroupBy)) + "#LIMIT_PLACEHOLDER)" + } + + aggClause, err := generateAggregateClause(mq.AggregateOperator, aggregationKey, step, preferRPM, timeFilter, filterSubQuery, groupBy, having, orderBy) + if err != nil { + return "", err + } + + var queryTmplPrefix string + if graphLimitQtype == constants.FirstQueryGraphLimit { + queryTmplPrefix = "SELECT" + } else if panelType == v3.PanelTypeTable { + queryTmplPrefix = + "SELECT" + // step or aggregate interval is whole time period in case of table panel + step = (utils.GetEpochNanoSecs(end) - utils.GetEpochNanoSecs(start)) / NANOSECOND + } else if panelType == v3.PanelTypeGraph || panelType == v3.PanelTypeValue { + // Select the aggregate value for interval + queryTmplPrefix = + fmt.Sprintf("SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL %d SECOND) AS ts,", step) + } + + query := queryTmplPrefix + selectLabels + aggClause + + // for limit query this is the first query, + // we don't the the aggregation value here as we are just concerned with the names of group by + // for applying the limit + if graphLimitQtype == constants.FirstQueryGraphLimit { + query = "SELECT " + logsV3.GetSelectKeys(mq.AggregateOperator, mq.GroupBy) + " from (" + query + ")" + } + return query, nil +} + +func buildLogsLiveTailQuery(mq *v3.BuilderQuery) (string, error) { + filterSubQuery, err := buildLogsTimeSeriesFilterQuery(mq.Filters, mq.GroupBy, v3.AttributeKey{}) + if err != nil { + return "", err + } + + // no values for bucket start and end + resourceSubQuery, err := buildResourceSubQuery(0, 0, mq.Filters, mq.GroupBy, mq.AggregateAttribute, true) + if err != nil { + return "", err + } + // join both the filter clauses + if resourceSubQuery != "" { + filterSubQuery = filterSubQuery + " AND (resource_fingerprint GLOBAL IN " + resourceSubQuery + } + + // the reader will add the timestamp and id filters + switch mq.AggregateOperator { + case v3.AggregateOperatorNoOp: + query := constants.LogsSQLSelectV2 + "from signoz_logs." + DISTRIBUTED_LOGS_V2 + " where " + if len(filterSubQuery) > 0 { + query = query + filterSubQuery + " AND " + } + + return query, nil + default: + return "", fmt.Errorf("unsupported aggregate operator in live tail") + } +} + +// PrepareLogsQuery prepares the query for logs +func PrepareLogsQuery(start, end int64, queryType v3.QueryType, panelType v3.PanelType, mq *v3.BuilderQuery, options v3.LogQBOptions) (string, error) { + + // adjust the start and end time to the step interval + // NOTE: Disabling this as it's creating confusion between charts and actual data + // if panelType != v3.PanelTypeList { + // start = start - (start % (mq.StepInterval * 1000)) + // end = end - (end % (mq.StepInterval * 1000)) + // } + + if options.IsLivetailQuery { + query, err := buildLogsLiveTailQuery(mq) + if err != nil { + return "", err + } + return query, nil + } else if options.GraphLimitQtype == constants.FirstQueryGraphLimit { + // give me just the group_by names (no values) + query, err := buildLogsQuery(panelType, start, end, mq.StepInterval, mq, options.GraphLimitQtype, options.PreferRPM) + if err != nil { + return "", err + } + query = logsV3.AddLimitToQuery(query, mq.Limit) + + return query, nil + } else if options.GraphLimitQtype == constants.SecondQueryGraphLimit { + query, err := buildLogsQuery(panelType, start, end, mq.StepInterval, mq, options.GraphLimitQtype, options.PreferRPM) + if err != nil { + return "", err + } + return query, nil + } + + query, err := buildLogsQuery(panelType, start, end, mq.StepInterval, mq, options.GraphLimitQtype, options.PreferRPM) + if err != nil { + return "", err + } + if panelType == v3.PanelTypeValue { + query, err = logsV3.ReduceQuery(query, mq.ReduceTo, mq.AggregateOperator) + } + + if panelType == v3.PanelTypeList { + // check if limit exceeded + if mq.Limit > 0 && mq.Offset >= mq.Limit { + return "", fmt.Errorf("max limit exceeded") + } + + if mq.PageSize > 0 { + if mq.Limit > 0 && mq.Offset+mq.PageSize > mq.Limit { + query = logsV3.AddLimitToQuery(query, mq.Limit-mq.Offset) + } else { + query = logsV3.AddLimitToQuery(query, mq.PageSize) + } + + // add offset to the query only if it is not orderd by timestamp. + if !logsV3.IsOrderByTs(mq.OrderBy) { + query = logsV3.AddOffsetToQuery(query, mq.Offset) + } + + } else { + query = logsV3.AddLimitToQuery(query, mq.Limit) + } + } else if panelType == v3.PanelTypeTable { + query = logsV3.AddLimitToQuery(query, mq.Limit) + } + + return query, err +} diff --git a/pkg/query-service/app/logs/v4/query_builder_test.go b/pkg/query-service/app/logs/v4/query_builder_test.go new file mode 100644 index 0000000000..7bc831437c --- /dev/null +++ b/pkg/query-service/app/logs/v4/query_builder_test.go @@ -0,0 +1,1099 @@ +package v4 + +import ( + "testing" + + "go.signoz.io/signoz/pkg/query-service/constants" + v3 "go.signoz.io/signoz/pkg/query-service/model/v3" +) + +func Test_getClickhouseKey(t *testing.T) { + type args struct { + key v3.AttributeKey + } + tests := []struct { + name string + args args + want string + }{ + { + name: "attribute", + args: args{ + key: v3.AttributeKey{Key: "user_name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, + }, + want: "attributes_string['user_name']", + }, + { + name: "resource", + args: args{ + key: v3.AttributeKey{Key: "servicename", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeResource}, + }, + want: "resources_string['servicename']", + }, + { + name: "selected field", + args: args{ + key: v3.AttributeKey{Key: "bytes", DataType: v3.AttributeKeyDataTypeFloat64, Type: v3.AttributeKeyTypeTag, IsColumn: true}, + }, + want: "`attribute_number_bytes`", + }, + { + name: "selected field resource", + args: args{ + key: v3.AttributeKey{Key: "servicename", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeResource, IsColumn: true}, + }, + want: "`resource_string_servicename`", + }, + { + name: "top level key", + args: args{ + key: v3.AttributeKey{Key: "trace_id", DataType: v3.AttributeKeyDataTypeString}, + }, + want: "trace_id", + }, + { + name: "name with -", + args: args{ + key: v3.AttributeKey{Key: "service-name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag, IsColumn: true}, + }, + want: "`attribute_string_service-name`", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := getClickhouseKey(tt.args.key); got != tt.want { + t.Errorf("getClickhouseKey() = %v, want %v", got, tt.want) + } + }) + } +} + +func Test_getSelectLabels(t *testing.T) { + type args struct { + aggregatorOperator v3.AggregateOperator + groupBy []v3.AttributeKey + } + tests := []struct { + name string + args args + want string + }{ + { + name: "count", + args: args{ + aggregatorOperator: v3.AggregateOperatorCount, + groupBy: []v3.AttributeKey{{Key: "user_name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}}, + }, + want: " attributes_string['user_name'] as `user_name`,", + }, + { + name: "multiple group by", + args: args{ + aggregatorOperator: v3.AggregateOperatorCount, + groupBy: []v3.AttributeKey{ + {Key: "user_name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, + {Key: "service_name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeResource, IsColumn: true}, + }, + }, + want: " attributes_string['user_name'] as `user_name`, `resource_string_service_name` as `service_name`,", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := getSelectLabels(tt.args.aggregatorOperator, tt.args.groupBy); got != tt.want { + t.Errorf("getSelectLabels() = %v, want %v", got, tt.want) + } + }) + } +} + +func Test_getExistsNexistsFilter(t *testing.T) { + type args struct { + op v3.FilterOperator + item v3.FilterItem + } + tests := []struct { + name string + args args + want string + }{ + { + name: "exists", + args: args{ + op: v3.FilterOperatorExists, + item: v3.FilterItem{Key: v3.AttributeKey{Key: "user_name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}}, + }, + want: "mapContains(attributes_string, 'user_name')", + }, + { + name: "not exists", + args: args{ + op: v3.FilterOperatorNotExists, + item: v3.FilterItem{Key: v3.AttributeKey{Key: "user_name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}}, + }, + want: "not mapContains(attributes_string, 'user_name')", + }, + { + name: "exists mat column", + args: args{ + op: v3.FilterOperatorExists, + item: v3.FilterItem{Key: v3.AttributeKey{Key: "bytes", DataType: v3.AttributeKeyDataTypeFloat64, Type: v3.AttributeKeyTypeTag, IsColumn: true}}, + }, + want: "`attribute_number_bytes_exists`=true", + }, + { + name: "exists top level column", + args: args{ + op: v3.FilterOperatorExists, + item: v3.FilterItem{Key: v3.AttributeKey{Key: "trace_id", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeUnspecified}}, + }, + want: "", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := getExistsNexistsFilter(tt.args.op, tt.args.item); got != tt.want { + t.Errorf("getExistsNexistsFilter() = %v, want %v", got, tt.want) + } + }) + } +} + +func Test_buildAttributeFilter(t *testing.T) { + type args struct { + item v3.FilterItem + } + tests := []struct { + name string + args args + want string + wantErr bool + }{ + { + name: "build attribute filter", + args: args{ + item: v3.FilterItem{ + Key: v3.AttributeKey{ + Key: "service.name", + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeResource, + }, + Operator: v3.FilterOperatorEqual, + Value: "test", + }, + }, + want: "resources_string['service.name'] = 'test'", + wantErr: false, + }, + { + name: "test for value search across all attributes", + args: args{ + item: v3.FilterItem{ + Key: v3.AttributeKey{ + Key: "__attrs", + DataType: v3.AttributeKeyDataTypeString, + }, + Operator: v3.FilterOperatorContains, + Value: "test", + }, + }, + want: "has(mapValues(attributes_string), 'test')", + }, + { + name: "build attribute filter exists", + args: args{ + item: v3.FilterItem{ + Key: v3.AttributeKey{ + Key: "service.name", + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeResource, + }, + Operator: v3.FilterOperatorExists, + }, + }, + want: "mapContains(resources_string, 'service.name')", + wantErr: false, + }, + { + name: "build attribute filter regex", + args: args{ + item: v3.FilterItem{ + Key: v3.AttributeKey{ + Key: "service.name", + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeResource, + }, + Operator: v3.FilterOperatorRegex, + Value: "^test", + }, + }, + want: "match(resources_string['service.name'], '^test')", + }, + { + name: "build attribute filter contains", + args: args{ + item: v3.FilterItem{ + Key: v3.AttributeKey{ + Key: "service.name", + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeResource, + }, + Operator: v3.FilterOperatorContains, + Value: "test", + }, + }, + want: "resources_string['service.name'] LIKE '%test%'", + }, + { + name: "build attribute filter contains- body", + args: args{ + item: v3.FilterItem{ + Key: v3.AttributeKey{ + Key: "body", + DataType: v3.AttributeKeyDataTypeString, + IsColumn: true, + }, + Operator: v3.FilterOperatorContains, + Value: "test", + }, + }, + want: "lower(body) LIKE lower('%test%')", + }, + { + name: "build attribute filter like", + args: args{ + item: v3.FilterItem{ + Key: v3.AttributeKey{ + Key: "service.name", + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeResource, + }, + Operator: v3.FilterOperatorLike, + Value: "test", + }, + }, + want: "resources_string['service.name'] LIKE 'test'", + }, + { + name: "build attribute filter like-body", + args: args{ + item: v3.FilterItem{ + Key: v3.AttributeKey{ + Key: "body", + DataType: v3.AttributeKeyDataTypeString, + IsColumn: true, + }, + Operator: v3.FilterOperatorLike, + Value: "test", + }, + }, + want: "lower(body) LIKE lower('test')", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := buildAttributeFilter(tt.args.item) + if (err != nil) != tt.wantErr { + t.Errorf("buildAttributeFilter() error = %v, wantErr %v", err, tt.wantErr) + return + } + if got != tt.want { + t.Errorf("buildAttributeFilter() = %v, want %v", got, tt.want) + } + }) + } +} + +func Test_buildLogsTimeSeriesFilterQuery(t *testing.T) { + type args struct { + fs *v3.FilterSet + groupBy []v3.AttributeKey + aggregateAttribute v3.AttributeKey + } + tests := []struct { + name string + args args + want string + wantErr bool + }{ + { + name: "build logs time series filter query", + args: args{ + fs: &v3.FilterSet{ + Items: []v3.FilterItem{ + { + Key: v3.AttributeKey{ + Key: "service.name", + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeTag, + }, + Operator: v3.FilterOperatorEqual, + Value: "test", + }, + { + Key: v3.AttributeKey{ + Key: "method", + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeTag, + }, + Operator: v3.FilterOperatorEqual, + Value: "GET", + }, + }, + }, + }, + want: "attributes_string['service.name'] = 'test' AND mapContains(attributes_string, 'service.name') " + + "AND attributes_string['method'] = 'GET' AND mapContains(attributes_string, 'method')", + }, + { + name: "build logs time series filter query with group by and aggregate attribute", + args: args{ + fs: &v3.FilterSet{ + Items: []v3.FilterItem{ + { + Key: v3.AttributeKey{ + Key: "service.name", + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeTag, + }, + Operator: v3.FilterOperatorEqual, + Value: "test", + }, + }, + }, + groupBy: []v3.AttributeKey{ + { + Key: "user_name", + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeTag, + }, + }, + aggregateAttribute: v3.AttributeKey{ + Key: "test", + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeTag, + }, + }, + want: "attributes_string['service.name'] = 'test' AND mapContains(attributes_string, 'service.name') " + + "AND mapContains(attributes_string, 'user_name') AND mapContains(attributes_string, 'test')", + }, + { + name: "build logs time series filter query with multiple group by and aggregate attribute", + args: args{ + fs: &v3.FilterSet{ + Items: []v3.FilterItem{ + { + Key: v3.AttributeKey{ + Key: "service.name", + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeTag, + }, + Operator: v3.FilterOperatorEqual, + Value: "test", + }, + }, + }, + groupBy: []v3.AttributeKey{ + { + Key: "user_name", + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeTag, + }, + { + Key: "host", + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeResource, + }, + { + Key: "method", + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeTag, + IsColumn: true, + }, + { + Key: "trace_id", + DataType: v3.AttributeKeyDataTypeString, + IsColumn: true, + }, + }, + aggregateAttribute: v3.AttributeKey{ + Key: "test", + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeTag, + }, + }, + want: "attributes_string['service.name'] = 'test' AND mapContains(attributes_string, 'service.name') " + + "AND mapContains(attributes_string, 'user_name') AND `attribute_string_method_exists`=true AND mapContains(attributes_string, 'test')", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := buildLogsTimeSeriesFilterQuery(tt.args.fs, tt.args.groupBy, tt.args.aggregateAttribute) + if (err != nil) != tt.wantErr { + t.Errorf("buildLogsTimeSeriesFilterQuery() error = %v, wantErr %v", err, tt.wantErr) + return + } + if got != tt.want { + t.Errorf("buildLogsTimeSeriesFilterQuery() = %v, want %v", got, tt.want) + } + }) + } +} + +func Test_orderByAttributeKeyTags(t *testing.T) { + type args struct { + panelType v3.PanelType + items []v3.OrderBy + tags []v3.AttributeKey + } + tests := []struct { + name string + args args + want string + }{ + { + name: "Test 1", + args: args{ + panelType: v3.PanelTypeGraph, + items: []v3.OrderBy{ + { + ColumnName: "name", + Order: "asc", + }, + { + ColumnName: constants.SigNozOrderByValue, + Order: "desc", + }, + }, + tags: []v3.AttributeKey{ + {Key: "name"}, + }, + }, + want: "`name` asc,value desc", + }, + { + name: "Test Graph item not present in tag", + args: args{ + panelType: v3.PanelTypeGraph, + items: []v3.OrderBy{ + { + ColumnName: "name", + Order: "asc", + }, + { + ColumnName: "bytes", + Order: "asc", + }, + { + ColumnName: "method", + Order: "asc", + }, + }, + tags: []v3.AttributeKey{ + {Key: "name"}, + {Key: "bytes"}, + }, + }, + want: "`name` asc,`bytes` asc", + }, + { + name: "Test panel list", + args: args{ + panelType: v3.PanelTypeList, + items: []v3.OrderBy{ + { + ColumnName: "name", + Order: "asc", + }, + { + ColumnName: constants.SigNozOrderByValue, + Order: "asc", + }, + { + ColumnName: "bytes", + Order: "asc", + }, + }, + tags: []v3.AttributeKey{ + {Key: "name"}, + {Key: "bytes"}, + }, + }, + want: "`name` asc,value asc,`bytes` asc", + }, + { + name: "test 4", + args: args{ + panelType: v3.PanelTypeList, + items: []v3.OrderBy{ + { + ColumnName: "name", + Order: "asc", + }, + { + ColumnName: constants.SigNozOrderByValue, + Order: "asc", + }, + { + ColumnName: "response_time", + Order: "desc", + Key: "response_time", + Type: v3.AttributeKeyTypeTag, + DataType: v3.AttributeKeyDataTypeString, + }, + }, + tags: []v3.AttributeKey{ + {Key: "name"}, + {Key: "value"}, + }, + }, + want: "`name` asc,value asc,attributes_string['response_time'] desc", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := orderByAttributeKeyTags(tt.args.panelType, tt.args.items, tt.args.tags); got != tt.want { + t.Errorf("orderByAttributeKeyTags() = %v, want %v", got, tt.want) + } + }) + } +} + +func Test_generateAggregateClause(t *testing.T) { + type args struct { + op v3.AggregateOperator + aggKey string + step int64 + preferRPM bool + timeFilter string + whereClause string + groupBy string + having string + orderBy string + } + tests := []struct { + name string + args args + want string + wantErr bool + }{ + { + name: "test rate", + args: args{ + op: v3.AggregateOperatorRate, + aggKey: "test", + step: 60, + preferRPM: false, + timeFilter: "(timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) AND (ts_bucket_start >= 1680064560 AND ts_bucket_start <= 1680066458)", + whereClause: " AND attributes_string['service.name'] = 'test'", + groupBy: " group by `user_name`", + having: "", + orderBy: " order by `user_name` desc", + }, + want: " count(test)/60.000000 as value from signoz_logs.distributed_logs_v2 where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) AND " + + "(ts_bucket_start >= 1680064560 AND ts_bucket_start <= 1680066458) AND attributes_string['service.name'] = 'test' " + + "group by `user_name` order by `user_name` desc", + }, + { + name: "test P10 with all args", + args: args{ + op: v3.AggregateOperatorRate, + aggKey: "test", + step: 60, + preferRPM: false, + timeFilter: "(timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) AND (ts_bucket_start >= 1680064560 AND ts_bucket_start <= 1680066458)", + whereClause: " AND attributes_string['service.name'] = 'test'", + groupBy: " group by `user_name`", + having: " having value > 10", + orderBy: " order by `user_name` desc", + }, + want: " count(test)/60.000000 as value from signoz_logs.distributed_logs_v2 where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) AND " + + "(ts_bucket_start >= 1680064560 AND ts_bucket_start <= 1680066458) AND attributes_string['service.name'] = 'test' group by `user_name` having value > 10 order by " + + "`user_name` desc", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := generateAggregateClause(tt.args.op, tt.args.aggKey, tt.args.step, tt.args.preferRPM, tt.args.timeFilter, tt.args.whereClause, tt.args.groupBy, tt.args.having, tt.args.orderBy) + if (err != nil) != tt.wantErr { + t.Errorf("generateAggreagteClause() error = %v, wantErr %v", err, tt.wantErr) + return + } + if got != tt.want { + t.Errorf("generateAggreagteClause() = %v, want %v", got, tt.want) + } + }) + } +} + +func Test_buildLogsQuery(t *testing.T) { + type args struct { + panelType v3.PanelType + start int64 + end int64 + step int64 + mq *v3.BuilderQuery + graphLimitQtype string + preferRPM bool + } + tests := []struct { + name string + args args + want string + wantErr bool + }{ + { + name: "build logs query", + args: args{ + panelType: v3.PanelTypeTable, + start: 1680066360726210000, + end: 1680066458000000000, + step: 1000, + mq: &v3.BuilderQuery{ + AggregateOperator: v3.AggregateOperatorCount, + Filters: &v3.FilterSet{ + Items: []v3.FilterItem{ + { + Key: v3.AttributeKey{ + Key: "service.name", + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeTag, + }, + Operator: v3.FilterOperatorEqual, + Value: "test", + }, + }, + }, + GroupBy: []v3.AttributeKey{ + { + Key: "user_name", + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeTag, + }, + }, + OrderBy: []v3.OrderBy{ + { + ColumnName: "user_name", + Order: "desc", + }, + }, + }, + }, + want: "SELECT attributes_string['user_name'] as `user_name`, toFloat64(count(*)) as value from signoz_logs.distributed_logs_v2 " + + "where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) AND (ts_bucket_start >= 1680064560 AND ts_bucket_start <= 1680066458) " + + "AND attributes_string['service.name'] = 'test' AND mapContains(attributes_string, 'service.name') AND mapContains(attributes_string, 'user_name') " + + "group by `user_name` order by `user_name` desc", + }, + { + name: "build logs query noop", + args: args{ + panelType: v3.PanelTypeList, + start: 1680066360726210000, + end: 1680066458000000000, + step: 1000, + mq: &v3.BuilderQuery{ + AggregateOperator: v3.AggregateOperatorNoOp, + Filters: &v3.FilterSet{ + Items: []v3.FilterItem{ + { + Key: v3.AttributeKey{ + Key: "service.name", + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeTag, + }, + Operator: v3.FilterOperatorEqual, + Value: "test", + }, + }, + }, + OrderBy: []v3.OrderBy{ + { + ColumnName: "timestamp", + Order: "desc", + }, + }, + }, + }, + want: "SELECT timestamp, id, trace_id, span_id, trace_flags, severity_text, severity_number, body, attributes_string, attributes_number, attributes_bool, resources_string " + + "from signoz_logs.distributed_logs_v2 where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) AND (ts_bucket_start >= 1680064560 AND ts_bucket_start <= 1680066458) " + + "AND attributes_string['service.name'] = 'test' AND mapContains(attributes_string, 'service.name') order by timestamp desc", + }, + { + name: "build logs query with all args", + args: args{ + panelType: v3.PanelTypeGraph, + start: 1680066360726210000, + end: 1680066458000000000, + step: 60, + mq: &v3.BuilderQuery{ + AggregateOperator: v3.AggregateOperatorAvg, + AggregateAttribute: v3.AttributeKey{ + Key: "duration", + Type: v3.AttributeKeyTypeTag, + DataType: v3.AttributeKeyDataTypeFloat64, + }, + Filters: &v3.FilterSet{ + Items: []v3.FilterItem{ + { + Key: v3.AttributeKey{ + Key: "service.name", + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeResource, + }, + Operator: v3.FilterOperatorEqual, + Value: "test", + }, + { + Key: v3.AttributeKey{ + Key: "duration", + DataType: v3.AttributeKeyDataTypeFloat64, + Type: v3.AttributeKeyTypeTag, + }, + Operator: v3.FilterOperatorGreaterThan, + Value: 1000, + }, + }, + }, + GroupBy: []v3.AttributeKey{ + { + Key: "host", + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeResource, + }, + }, + OrderBy: []v3.OrderBy{ + { + ColumnName: "host", + Order: "desc", + }, + }, + }, + }, + want: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts, resources_string['host'] as `host`, avg(attributes_number['duration']) as value " + + "from signoz_logs.distributed_logs_v2 where (timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) AND (ts_bucket_start >= 1680064560 AND ts_bucket_start <= 1680066458) " + + "AND attributes_number['duration'] > 1000.000000 AND mapContains(attributes_number, 'duration') AND mapContains(attributes_number, 'duration') AND " + + "(resource_fingerprint GLOBAL IN (SELECT fingerprint FROM signoz_logs.distributed_logs_v2_resource WHERE (seen_at_ts_bucket_start >= 1680064560) AND (seen_at_ts_bucket_start <= 1680066458) " + + "AND simpleJSONExtractString(labels, 'service.name') = 'test' AND labels like '%service.name%test%' AND ( (simpleJSONHas(labels, 'host') AND labels like '%host%') ))) " + + "group by `host`,ts order by `host` desc", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := buildLogsQuery(tt.args.panelType, tt.args.start, tt.args.end, tt.args.step, tt.args.mq, tt.args.graphLimitQtype, tt.args.preferRPM) + if (err != nil) != tt.wantErr { + t.Errorf("buildLogsQuery() error = %v, wantErr %v", err, tt.wantErr) + return + } + if got != tt.want { + t.Errorf("buildLogsQuery() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestPrepareLogsQuery(t *testing.T) { + type args struct { + start int64 + end int64 + queryType v3.QueryType + panelType v3.PanelType + mq *v3.BuilderQuery + options v3.LogQBOptions + } + tests := []struct { + name string + args args + want string + wantErr bool + }{ + { + name: "TABLE: Test count with JSON Filter Array, groupBy, orderBy", + args: args{ + start: 1680066360726210000, + end: 1680066458000000000, + panelType: v3.PanelTypeTable, + mq: &v3.BuilderQuery{ + QueryName: "A", + StepInterval: 60, + AggregateOperator: v3.AggregateOperatorCount, + Expression: "A", + Filters: &v3.FilterSet{ + Operator: "AND", + Items: []v3.FilterItem{ + { + Key: v3.AttributeKey{ + Key: "body.requestor_list[*]", + DataType: "array(string)", + IsJSON: true, + }, + Operator: "has", + Value: "index_service", + }, + }, + }, + GroupBy: []v3.AttributeKey{ + {Key: "name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, + {Key: "host", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeResource}, + }, + OrderBy: []v3.OrderBy{ + {ColumnName: "name", Order: "DESC"}, + }, + }, + }, + want: "SELECT attributes_string['name'] as `name`, resources_string['host'] as `host`, toFloat64(count(*)) as value from signoz_logs.distributed_logs_v2 where " + + "(timestamp >= 1680066360726210000 AND timestamp <= 1680066458000000000) AND (ts_bucket_start >= 1680064560 AND ts_bucket_start <= 1680066458) AND lower(body) like lower('%requestor_list%') " + + "AND lower(body) like lower('%index_service%') AND has(JSONExtract(JSON_QUERY(body, '$.\"requestor_list\"[*]'), 'Array(String)'), 'index_service') AND mapContains(attributes_string, 'name') AND " + + "(resource_fingerprint GLOBAL IN (SELECT fingerprint FROM signoz_logs.distributed_logs_v2_resource WHERE (seen_at_ts_bucket_start >= 1680064560) AND (seen_at_ts_bucket_start <= 1680066458) AND " + + "( (simpleJSONHas(labels, 'host') AND labels like '%host%') ))) group by `name`,`host` order by `name` DESC", + }, + { + name: "Test TS with limit- first", + args: args{ + start: 1680066360726, + end: 1680066458000, + queryType: v3.QueryTypeBuilder, + panelType: v3.PanelTypeGraph, + mq: &v3.BuilderQuery{ + QueryName: "A", + StepInterval: 60, + AggregateAttribute: v3.AttributeKey{Key: "name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, + AggregateOperator: v3.AggregateOperatorCountDistinct, + Expression: "A", + Filters: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{ + {Key: v3.AttributeKey{Key: "method", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, Value: "GET", Operator: "="}, + {Key: v3.AttributeKey{Key: "service.name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeResource}, Value: "app", Operator: "="}, + }, + }, + Limit: 10, + GroupBy: []v3.AttributeKey{{Key: "user", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}}, + }, + options: v3.LogQBOptions{GraphLimitQtype: constants.FirstQueryGraphLimit, PreferRPM: true}, + }, + want: "SELECT `user` from (SELECT attributes_string['user'] as `user`, toFloat64(count(distinct(attributes_string['name']))) as value from signoz_logs.distributed_logs_v2 " + + "where (timestamp >= 1680066360726000000 AND timestamp <= 1680066458000000000) AND (ts_bucket_start >= 1680064560 AND ts_bucket_start <= 1680066458) AND attributes_string['method'] = 'GET' " + + "AND mapContains(attributes_string, 'method') AND mapContains(attributes_string, 'user') AND mapContains(attributes_string, 'name') AND (resource_fingerprint GLOBAL IN " + + "(SELECT fingerprint FROM signoz_logs.distributed_logs_v2_resource WHERE (seen_at_ts_bucket_start >= 1680064560) AND (seen_at_ts_bucket_start <= 1680066458) AND simpleJSONExtractString(labels, 'service.name') = 'app' " + + "AND labels like '%service.name%app%')) group by `user` order by value DESC) LIMIT 10", + }, + { + name: "Test TS with limit- second", + args: args{ + start: 1680066360726, + end: 1680066458000, + queryType: v3.QueryTypeBuilder, + panelType: v3.PanelTypeGraph, + mq: &v3.BuilderQuery{ + QueryName: "A", + StepInterval: 60, + AggregateAttribute: v3.AttributeKey{Key: "name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, + AggregateOperator: v3.AggregateOperatorCountDistinct, + Expression: "A", + Filters: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{ + {Key: v3.AttributeKey{Key: "method", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, Value: "GET", Operator: "="}, + {Key: v3.AttributeKey{Key: "service.name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeResource}, Value: "app", Operator: "="}, + }, + }, + GroupBy: []v3.AttributeKey{{Key: "user", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}}, + Limit: 2, + }, + options: v3.LogQBOptions{GraphLimitQtype: constants.SecondQueryGraphLimit}, + }, + want: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 60 SECOND) AS ts, attributes_string['user'] as `user`, toFloat64(count(distinct(attributes_string['name']))) as value " + + "from signoz_logs.distributed_logs_v2 where (timestamp >= 1680066360726000000 AND timestamp <= 1680066458000000000) AND (ts_bucket_start >= 1680064560 AND ts_bucket_start <= 1680066458) AND " + + "attributes_string['method'] = 'GET' AND mapContains(attributes_string, 'method') AND mapContains(attributes_string, 'user') AND mapContains(attributes_string, 'name') AND " + + "(resource_fingerprint GLOBAL IN (SELECT fingerprint FROM signoz_logs.distributed_logs_v2_resource WHERE (seen_at_ts_bucket_start >= 1680064560) AND (seen_at_ts_bucket_start <= 1680066458) AND " + + "simpleJSONExtractString(labels, 'service.name') = 'app' AND labels like '%service.name%app%')) AND (`user`) GLOBAL IN (#LIMIT_PLACEHOLDER) group by `user`,ts order by value DESC", + }, + { + name: "Live Tail Query", + args: args{ + start: 1680066360726, + end: 1680066458000, + queryType: v3.QueryTypeBuilder, + panelType: v3.PanelTypeList, + mq: &v3.BuilderQuery{ + QueryName: "A", + StepInterval: 60, + AggregateOperator: v3.AggregateOperatorNoOp, + Expression: "A", + Filters: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{ + {Key: v3.AttributeKey{Key: "method", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, Value: "GET", Operator: "="}, + }, + }, + }, + options: v3.LogQBOptions{IsLivetailQuery: true}, + }, + want: "SELECT timestamp, id, trace_id, span_id, trace_flags, severity_text, severity_number, body, attributes_string, attributes_number, attributes_bool, resources_string " + + "from signoz_logs.distributed_logs_v2 where attributes_string['method'] = 'GET' AND mapContains(attributes_string, 'method') AND ", + }, + { + name: "Live Tail Query with resource attribute", + args: args{ + start: 1680066360726, + end: 1680066458000, + queryType: v3.QueryTypeBuilder, + panelType: v3.PanelTypeList, + mq: &v3.BuilderQuery{ + QueryName: "A", + StepInterval: 60, + AggregateOperator: v3.AggregateOperatorNoOp, + Expression: "A", + Filters: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{ + {Key: v3.AttributeKey{Key: "method", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, Value: "GET", Operator: "="}, + {Key: v3.AttributeKey{Key: "service.name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeResource}, Value: "app", Operator: "contains"}, + }, + }, + }, + options: v3.LogQBOptions{IsLivetailQuery: true}, + }, + want: "SELECT timestamp, id, trace_id, span_id, trace_flags, severity_text, severity_number, body, attributes_string, attributes_number, attributes_bool, resources_string from " + + "signoz_logs.distributed_logs_v2 where attributes_string['method'] = 'GET' AND mapContains(attributes_string, 'method') AND " + + "(resource_fingerprint GLOBAL IN (SELECT fingerprint FROM signoz_logs.distributed_logs_v2_resource WHERE simpleJSONExtractString(labels, 'service.name') LIKE '%app%' AND labels like '%service.name%app%' AND ", + }, + { + name: "Live Tail Query W/O filter", + args: args{ + start: 1680066360726, + end: 1680066458000, + queryType: v3.QueryTypeBuilder, + panelType: v3.PanelTypeList, + mq: &v3.BuilderQuery{ + QueryName: "A", + StepInterval: 60, + AggregateOperator: v3.AggregateOperatorNoOp, + Expression: "A", + Filters: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{}}, + }, + options: v3.LogQBOptions{IsLivetailQuery: true}, + }, + want: "SELECT timestamp, id, trace_id, span_id, trace_flags, severity_text, severity_number, body, attributes_string, attributes_number, attributes_bool, resources_string " + + "from signoz_logs.distributed_logs_v2 where ", + }, + { + name: "Table query with limit", + args: args{ + start: 1680066360726, + end: 1680066458000, + queryType: v3.QueryTypeBuilder, + panelType: v3.PanelTypeTable, + mq: &v3.BuilderQuery{ + QueryName: "A", + StepInterval: 60, + AggregateOperator: v3.AggregateOperatorCount, + Expression: "A", + Filters: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{}}, + Limit: 10, + }, + }, + want: "SELECT toFloat64(count(*)) as value from signoz_logs.distributed_logs_v2 where (timestamp >= 1680066360726000000 AND timestamp <= 1680066458000000000) AND (ts_bucket_start >= 1680064560 AND ts_bucket_start <= 1680066458) order by value DESC LIMIT 10", + }, + { + name: "Test limit less than pageSize - order by ts", + args: args{ + start: 1680066360726, + end: 1680066458000, + queryType: v3.QueryTypeBuilder, + panelType: v3.PanelTypeList, + mq: &v3.BuilderQuery{ + QueryName: "A", + StepInterval: 60, + AggregateOperator: v3.AggregateOperatorNoOp, + Expression: "A", + Filters: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{}}, + OrderBy: []v3.OrderBy{{ColumnName: constants.TIMESTAMP, Order: "desc", Key: constants.TIMESTAMP, DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeUnspecified, IsColumn: true}}, + Limit: 1, + Offset: 0, + PageSize: 5, + }, + }, + want: "SELECT timestamp, id, trace_id, span_id, trace_flags, severity_text, severity_number, body, attributes_string, attributes_number, attributes_bool, resources_string from " + + "signoz_logs.distributed_logs_v2 where (timestamp >= 1680066360726000000 AND timestamp <= 1680066458000000000) AND (ts_bucket_start >= 1680064560 AND ts_bucket_start <= 1680066458) " + + "order by `timestamp` desc LIMIT 1", + }, + { + name: "Test limit greater than pageSize - order by ts", + args: args{ + start: 1680066360726, + end: 1680066458000, + queryType: v3.QueryTypeBuilder, + panelType: v3.PanelTypeList, + mq: &v3.BuilderQuery{ + QueryName: "A", + StepInterval: 60, + AggregateOperator: v3.AggregateOperatorNoOp, + Expression: "A", + Filters: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{ + {Key: v3.AttributeKey{Key: "id", Type: v3.AttributeKeyTypeUnspecified, DataType: v3.AttributeKeyDataTypeString, IsColumn: true}, Operator: v3.FilterOperatorLessThan, Value: "2TNh4vp2TpiWyLt3SzuadLJF2s4"}, + }}, + OrderBy: []v3.OrderBy{{ColumnName: constants.TIMESTAMP, Order: "desc", Key: constants.TIMESTAMP, DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeUnspecified, IsColumn: true}}, + Limit: 100, + Offset: 10, + PageSize: 10, + }, + }, + want: "SELECT timestamp, id, trace_id, span_id, trace_flags, severity_text, severity_number, body, attributes_string, attributes_number, attributes_bool, resources_string from " + + "signoz_logs.distributed_logs_v2 where (timestamp >= 1680066360726000000 AND timestamp <= 1680066458000000000) AND (ts_bucket_start >= 1680064560 AND ts_bucket_start <= 1680066458) " + + "AND id < '2TNh4vp2TpiWyLt3SzuadLJF2s4' order by `timestamp` desc LIMIT 10", + }, + { + name: "Test limit less than pageSize - order by custom", + args: args{ + start: 1680066360726, + end: 1680066458000, + queryType: v3.QueryTypeBuilder, + panelType: v3.PanelTypeList, + mq: &v3.BuilderQuery{ + QueryName: "A", + StepInterval: 60, + AggregateOperator: v3.AggregateOperatorNoOp, + Expression: "A", + Filters: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{}}, + OrderBy: []v3.OrderBy{{ColumnName: "method", Order: "desc", Key: "method", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}}, + Limit: 1, + Offset: 0, + PageSize: 5, + }, + }, + want: "SELECT timestamp, id, trace_id, span_id, trace_flags, severity_text, severity_number, body, attributes_string, attributes_number, attributes_bool, resources_string from " + + "signoz_logs.distributed_logs_v2 where (timestamp >= 1680066360726000000 AND timestamp <= 1680066458000000000) AND (ts_bucket_start >= 1680064560 AND ts_bucket_start <= 1680066458) " + + "order by attributes_string['method'] desc LIMIT 1 OFFSET 0", + }, + { + name: "Test limit greater than pageSize - order by custom", + args: args{ + start: 1680066360726, + end: 1680066458000, + queryType: v3.QueryTypeBuilder, + panelType: v3.PanelTypeList, + mq: &v3.BuilderQuery{ + QueryName: "A", + StepInterval: 60, + AggregateOperator: v3.AggregateOperatorNoOp, + Expression: "A", + Filters: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{ + {Key: v3.AttributeKey{Key: "id", Type: v3.AttributeKeyTypeUnspecified, DataType: v3.AttributeKeyDataTypeString, IsColumn: true}, Operator: v3.FilterOperatorLessThan, Value: "2TNh4vp2TpiWyLt3SzuadLJF2s4"}, + }}, + OrderBy: []v3.OrderBy{{ColumnName: "method", Order: "desc", Key: "method", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}}, + Limit: 100, + Offset: 50, + PageSize: 50, + }, + }, + want: "SELECT timestamp, id, trace_id, span_id, trace_flags, severity_text, severity_number, body, attributes_string, attributes_number, attributes_bool, resources_string from " + + "signoz_logs.distributed_logs_v2 where (timestamp >= 1680066360726000000 AND timestamp <= 1680066458000000000) AND (ts_bucket_start >= 1680064560 AND ts_bucket_start <= 1680066458) AND " + + "id < '2TNh4vp2TpiWyLt3SzuadLJF2s4' order by attributes_string['method'] desc LIMIT 50 OFFSET 50", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := PrepareLogsQuery(tt.args.start, tt.args.end, tt.args.queryType, tt.args.panelType, tt.args.mq, tt.args.options) + if (err != nil) != tt.wantErr { + t.Errorf("PrepareLogsQuery() error = %v, wantErr %v", err, tt.wantErr) + return + } + if got != tt.want { + t.Errorf("PrepareLogsQuery() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/pkg/query-service/app/logs/v4/resource_query_builder.go b/pkg/query-service/app/logs/v4/resource_query_builder.go index 004c9269fb..12d6c1a36a 100644 --- a/pkg/query-service/app/logs/v4/resource_query_builder.go +++ b/pkg/query-service/app/logs/v4/resource_query_builder.go @@ -164,7 +164,7 @@ func buildResourceFiltersFromAggregateAttribute(aggregateAttribute v3.AttributeK return "" } -func buildResourceSubQuery(bucketStart, bucketEnd int64, fs *v3.FilterSet, groupBy []v3.AttributeKey, aggregateAttribute v3.AttributeKey) (string, error) { +func buildResourceSubQuery(bucketStart, bucketEnd int64, fs *v3.FilterSet, groupBy []v3.AttributeKey, aggregateAttribute v3.AttributeKey, isLiveTail bool) (string, error) { // BUILD THE WHERE CLAUSE var conditions []string @@ -193,9 +193,14 @@ func buildResourceSubQuery(bucketStart, bucketEnd int64, fs *v3.FilterSet, group conditionStr := strings.Join(conditions, " AND ") // BUILD THE FINAL QUERY - query := fmt.Sprintf("SELECT fingerprint FROM signoz_logs.%s WHERE (seen_at_ts_bucket_start >= %d) AND (seen_at_ts_bucket_start <= %d) AND ", DISTRIBUTED_LOGS_V2_RESOURCE, bucketStart, bucketEnd) - - query = "(" + query + conditionStr + ")" + var query string + if isLiveTail { + query = fmt.Sprintf("SELECT fingerprint FROM signoz_logs.%s WHERE ", DISTRIBUTED_LOGS_V2_RESOURCE) + query = "(" + query + conditionStr + } else { + query = fmt.Sprintf("SELECT fingerprint FROM signoz_logs.%s WHERE (seen_at_ts_bucket_start >= %d) AND (seen_at_ts_bucket_start <= %d) AND ", DISTRIBUTED_LOGS_V2_RESOURCE, bucketStart, bucketEnd) + query = "(" + query + conditionStr + ")" + } return query, nil } diff --git a/pkg/query-service/app/logs/v4/resource_query_builder_test.go b/pkg/query-service/app/logs/v4/resource_query_builder_test.go index 1616c29e08..130fd9e98c 100644 --- a/pkg/query-service/app/logs/v4/resource_query_builder_test.go +++ b/pkg/query-service/app/logs/v4/resource_query_builder_test.go @@ -469,7 +469,7 @@ func Test_buildResourceSubQuery(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got, err := buildResourceSubQuery(tt.args.bucketStart, tt.args.bucketEnd, tt.args.fs, tt.args.groupBy, tt.args.aggregateAttribute) + got, err := buildResourceSubQuery(tt.args.bucketStart, tt.args.bucketEnd, tt.args.fs, tt.args.groupBy, tt.args.aggregateAttribute, false) if (err != nil) != tt.wantErr { t.Errorf("buildResourceSubQuery() error = %v, wantErr %v", err, tt.wantErr) return diff --git a/pkg/query-service/constants/constants.go b/pkg/query-service/constants/constants.go index 70eda959dc..71a1e39032 100644 --- a/pkg/query-service/constants/constants.go +++ b/pkg/query-service/constants/constants.go @@ -316,6 +316,12 @@ const ( "CAST((attributes_float64_key, attributes_float64_value), 'Map(String, Float64)') as attributes_float64," + "CAST((attributes_bool_key, attributes_bool_value), 'Map(String, Bool)') as attributes_bool," + "CAST((resources_string_key, resources_string_value), 'Map(String, String)') as resources_string " + LogsSQLSelectV2 = "SELECT " + + "timestamp, id, trace_id, span_id, trace_flags, severity_text, severity_number, body, " + + "attributes_string, " + + "attributes_number, " + + "attributes_bool, " + + "resources_string " TracesExplorerViewSQLSelectWithSubQuery = "WITH subQuery AS (SELECT distinct on (traceID) traceID, durationNano, " + "serviceName, name FROM %s.%s WHERE parentSpanID = '' AND %s %s ORDER BY durationNano DESC " TracesExplorerViewSQLSelectQuery = "SELECT subQuery.serviceName, subQuery.name, count() AS " + @@ -380,6 +386,12 @@ var StaticFieldsLogsV3 = map[string]v3.AttributeKey{ Type: v3.AttributeKeyTypeUnspecified, IsColumn: true, }, + "__attrs": { + Key: "__attrs", + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeUnspecified, + IsColumn: true, + }, } const SigNozOrderByValue = "#SIGNOZ_VALUE" diff --git a/pkg/query-service/model/v3/v3.go b/pkg/query-service/model/v3/v3.go index 0128536ac2..c21d47229c 100644 --- a/pkg/query-service/model/v3/v3.go +++ b/pkg/query-service/model/v3/v3.go @@ -1290,3 +1290,9 @@ type URLShareableOptions struct { Format string `json:"format"` SelectColumns []AttributeKey `json:"selectColumns"` } + +type LogQBOptions struct { + GraphLimitQtype string + IsLivetailQuery bool + PreferRPM bool +} diff --git a/pkg/query-service/utils/format.go b/pkg/query-service/utils/format.go index c623d3e8e0..e9b7a0b7e3 100644 --- a/pkg/query-service/utils/format.go +++ b/pkg/query-service/utils/format.go @@ -272,6 +272,28 @@ func GetClickhouseColumnName(typeName string, dataType, field string) string { return colName } +func GetClickhouseColumnNameV2(typeName string, dataType, field string) string { + if typeName == string(v3.AttributeKeyTypeTag) { + typeName = constants.Attributes + } + + if typeName != string(v3.AttributeKeyTypeResource) { + typeName = typeName[:len(typeName)-1] + } + + dataType = strings.ToLower(dataType) + + if dataType == "int64" || dataType == "float64" { + dataType = "number" + } + + // if name contains . replace it with `$$` + field = strings.ReplaceAll(field, ".", "$$") + + colName := fmt.Sprintf("%s_%s_%s", strings.ToLower(typeName), dataType, field) + return colName +} + // GetEpochNanoSecs takes epoch and returns it in ns func GetEpochNanoSecs(epoch int64) int64 { temp := epoch From 10ebd0cad6edf27c6846f48a485d52644c7a4d0a Mon Sep 17 00:00:00 2001 From: Nityananda Gohain Date: Thu, 12 Sep 2024 10:58:07 +0530 Subject: [PATCH 06/79] feat: use new schema flag (#5930) --- ee/query-service/app/api/api.go | 3 ++ ee/query-service/app/db/reader.go | 3 +- ee/query-service/app/server.go | 13 +++++++-- ee/query-service/main.go | 3 ++ ee/query-service/rules/manager.go | 1 + .../app/clickhouseReader/reader.go | 7 ++++- pkg/query-service/app/http_handler.go | 28 +++++++++++------- pkg/query-service/app/querier/querier.go | 9 ++++-- pkg/query-service/app/querier/v2/querier.go | 9 ++++-- pkg/query-service/app/server.go | 29 +++++++++++-------- pkg/query-service/main.go | 3 ++ pkg/query-service/rules/manager.go | 12 ++++++++ pkg/query-service/rules/threshold_rule.go | 19 +++++++----- .../rules/threshold_rule_test.go | 20 ++++++------- .../tests/integration/test_utils.go | 1 + 15 files changed, 109 insertions(+), 51 deletions(-) diff --git a/ee/query-service/app/api/api.go b/ee/query-service/app/api/api.go index 66b462e167..bb36fdf479 100644 --- a/ee/query-service/app/api/api.go +++ b/ee/query-service/app/api/api.go @@ -39,6 +39,8 @@ type APIHandlerOptions struct { Gateway *httputil.ReverseProxy // Querier Influx Interval FluxInterval time.Duration + + UseLogsNewSchema bool } type APIHandler struct { @@ -63,6 +65,7 @@ func NewAPIHandler(opts APIHandlerOptions) (*APIHandler, error) { LogsParsingPipelineController: opts.LogsParsingPipelineController, Cache: opts.Cache, FluxInterval: opts.FluxInterval, + UseLogsNewSchema: opts.UseLogsNewSchema, }) if err != nil { diff --git a/ee/query-service/app/db/reader.go b/ee/query-service/app/db/reader.go index b8326058ec..fcab1cb991 100644 --- a/ee/query-service/app/db/reader.go +++ b/ee/query-service/app/db/reader.go @@ -25,8 +25,9 @@ func NewDataConnector( maxOpenConns int, dialTimeout time.Duration, cluster string, + useLogsNewSchema bool, ) *ClickhouseReader { - ch := basechr.NewReader(localDB, promConfigPath, lm, maxIdleConns, maxOpenConns, dialTimeout, cluster) + ch := basechr.NewReader(localDB, promConfigPath, lm, maxIdleConns, maxOpenConns, dialTimeout, cluster, useLogsNewSchema) return &ClickhouseReader{ conn: ch.GetConn(), appdb: localDB, diff --git a/ee/query-service/app/server.go b/ee/query-service/app/server.go index ee019e639a..9845ee670b 100644 --- a/ee/query-service/app/server.go +++ b/ee/query-service/app/server.go @@ -77,6 +77,7 @@ type ServerOptions struct { FluxInterval string Cluster string GatewayUrl string + UseLogsNewSchema bool } // Server runs HTTP api service @@ -154,6 +155,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) { serverOptions.MaxOpenConns, serverOptions.DialTimeout, serverOptions.Cluster, + serverOptions.UseLogsNewSchema, ) go qb.Start(readerReady) reader = qb @@ -176,7 +178,9 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) { localDB, reader, serverOptions.DisableRules, - lm) + lm, + serverOptions.UseLogsNewSchema, + ) if err != nil { return nil, err @@ -265,6 +269,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) { Cache: c, FluxInterval: fluxInterval, Gateway: gatewayProxy, + UseLogsNewSchema: serverOptions.UseLogsNewSchema, } apiHandler, err := api.NewAPIHandler(apiOpts) @@ -728,7 +733,8 @@ func makeRulesManager( db *sqlx.DB, ch baseint.Reader, disableRules bool, - fm baseint.FeatureLookup) (*baserules.Manager, error) { + fm baseint.FeatureLookup, + useLogsNewSchema bool) (*baserules.Manager, error) { // create engine pqle, err := pqle.FromConfigPath(promConfigPath) @@ -756,7 +762,8 @@ func makeRulesManager( Reader: ch, EvalDelay: baseconst.GetEvalDelay(), - PrepareTaskFunc: rules.PrepareTaskFunc, + PrepareTaskFunc: rules.PrepareTaskFunc, + UseLogsNewSchema: useLogsNewSchema, } // create Manager diff --git a/ee/query-service/main.go b/ee/query-service/main.go index c5a03f4c0f..75a49500d0 100644 --- a/ee/query-service/main.go +++ b/ee/query-service/main.go @@ -87,6 +87,7 @@ func main() { var ruleRepoURL string var cluster string + var useLogsNewSchema bool var cacheConfigPath, fluxInterval string var enableQueryServiceLogOTLPExport bool var preferSpanMetrics bool @@ -96,6 +97,7 @@ func main() { var dialTimeout time.Duration var gatewayUrl string + flag.BoolVar(&useLogsNewSchema, "use-logs-new-schema", false, "use logs_v2 schema for logs") flag.StringVar(&promConfigPath, "config", "./config/prometheus.yml", "(prometheus config to read metrics)") flag.StringVar(&skipTopLvlOpsPath, "skip-top-level-ops", "", "(config file to skip top level operations)") flag.BoolVar(&disableRules, "rules.disable", false, "(disable rule evaluation)") @@ -134,6 +136,7 @@ func main() { FluxInterval: fluxInterval, Cluster: cluster, GatewayUrl: gatewayUrl, + UseLogsNewSchema: useLogsNewSchema, } // Read the jwt secret key diff --git a/ee/query-service/rules/manager.go b/ee/query-service/rules/manager.go index d3bc03f58a..2b80441f0c 100644 --- a/ee/query-service/rules/manager.go +++ b/ee/query-service/rules/manager.go @@ -20,6 +20,7 @@ func PrepareTaskFunc(opts baserules.PrepareTaskOptions) (baserules.Task, error) opts.Rule, opts.FF, opts.Reader, + opts.UseLogsNewSchema, baserules.WithEvalDelay(opts.ManagerOpts.EvalDelay), ) diff --git a/pkg/query-service/app/clickhouseReader/reader.go b/pkg/query-service/app/clickhouseReader/reader.go index 2984fa0fa5..b3ef773da0 100644 --- a/pkg/query-service/app/clickhouseReader/reader.go +++ b/pkg/query-service/app/clickhouseReader/reader.go @@ -132,6 +132,8 @@ type ClickHouseReader struct { liveTailRefreshSeconds int cluster string + + useLogsNewSchema bool } // NewTraceReader returns a TraceReader for the database @@ -143,6 +145,7 @@ func NewReader( maxOpenConns int, dialTimeout time.Duration, cluster string, + useLogsNewSchema bool, ) *ClickHouseReader { datasource := os.Getenv("ClickHouseUrl") @@ -153,7 +156,7 @@ func NewReader( zap.L().Fatal("failed to initialize ClickHouse", zap.Error(err)) } - return NewReaderFromClickhouseConnection(db, options, localDB, configFile, featureFlag, cluster) + return NewReaderFromClickhouseConnection(db, options, localDB, configFile, featureFlag, cluster, useLogsNewSchema) } func NewReaderFromClickhouseConnection( @@ -163,6 +166,7 @@ func NewReaderFromClickhouseConnection( configFile string, featureFlag interfaces.FeatureLookup, cluster string, + useLogsNewSchema bool, ) *ClickHouseReader { alertManager, err := am.New("") if err != nil { @@ -219,6 +223,7 @@ func NewReaderFromClickhouseConnection( featureFlags: featureFlag, cluster: cluster, queryProgressTracker: queryprogress.NewQueryProgressTracker(), + useLogsNewSchema: useLogsNewSchema, } } diff --git a/pkg/query-service/app/http_handler.go b/pkg/query-service/app/http_handler.go index 957ea5aaff..d347bf576e 100644 --- a/pkg/query-service/app/http_handler.go +++ b/pkg/query-service/app/http_handler.go @@ -105,6 +105,8 @@ type APIHandler struct { // Websocket connection upgrader Upgrader *websocket.Upgrader + + UseLogsNewSchema bool } type APIHandlerOpts struct { @@ -140,6 +142,9 @@ type APIHandlerOpts struct { // Querier Influx Interval FluxInterval time.Duration + + // Use new schema + UseLogsNewSchema bool } // NewAPIHandler returns an APIHandler @@ -151,19 +156,21 @@ func NewAPIHandler(opts APIHandlerOpts) (*APIHandler, error) { } querierOpts := querier.QuerierOptions{ - Reader: opts.Reader, - Cache: opts.Cache, - KeyGenerator: queryBuilder.NewKeyGenerator(), - FluxInterval: opts.FluxInterval, - FeatureLookup: opts.FeatureFlags, + Reader: opts.Reader, + Cache: opts.Cache, + KeyGenerator: queryBuilder.NewKeyGenerator(), + FluxInterval: opts.FluxInterval, + FeatureLookup: opts.FeatureFlags, + UseLogsNewSchema: opts.UseLogsNewSchema, } querierOptsV2 := querierV2.QuerierOptions{ - Reader: opts.Reader, - Cache: opts.Cache, - KeyGenerator: queryBuilder.NewKeyGenerator(), - FluxInterval: opts.FluxInterval, - FeatureLookup: opts.FeatureFlags, + Reader: opts.Reader, + Cache: opts.Cache, + KeyGenerator: queryBuilder.NewKeyGenerator(), + FluxInterval: opts.FluxInterval, + FeatureLookup: opts.FeatureFlags, + UseLogsNewSchema: opts.UseLogsNewSchema, } querier := querier.NewQuerier(querierOpts) @@ -185,6 +192,7 @@ func NewAPIHandler(opts APIHandlerOpts) (*APIHandler, error) { LogsParsingPipelineController: opts.LogsParsingPipelineController, querier: querier, querierV2: querierv2, + UseLogsNewSchema: opts.UseLogsNewSchema, } builderOpts := queryBuilder.QueryBuilderOptions{ diff --git a/pkg/query-service/app/querier/querier.go b/pkg/query-service/app/querier/querier.go index 86a77da114..0663afd126 100644 --- a/pkg/query-service/app/querier/querier.go +++ b/pkg/query-service/app/querier/querier.go @@ -54,6 +54,8 @@ type querier struct { timeRanges [][]int returnedSeries []*v3.Series returnedErr error + + UseLogsNewSchema bool } type QuerierOptions struct { @@ -64,9 +66,10 @@ type QuerierOptions struct { FeatureLookup interfaces.FeatureLookup // used for testing - TestingMode bool - ReturnedSeries []*v3.Series - ReturnedErr error + TestingMode bool + ReturnedSeries []*v3.Series + ReturnedErr error + UseLogsNewSchema bool } func NewQuerier(opts QuerierOptions) interfaces.Querier { diff --git a/pkg/query-service/app/querier/v2/querier.go b/pkg/query-service/app/querier/v2/querier.go index d0c3a77d13..01cbf6d649 100644 --- a/pkg/query-service/app/querier/v2/querier.go +++ b/pkg/query-service/app/querier/v2/querier.go @@ -54,6 +54,8 @@ type querier struct { timeRanges [][]int returnedSeries []*v3.Series returnedErr error + + UseLogsNewSchema bool } type QuerierOptions struct { @@ -64,9 +66,10 @@ type QuerierOptions struct { FeatureLookup interfaces.FeatureLookup // used for testing - TestingMode bool - ReturnedSeries []*v3.Series - ReturnedErr error + TestingMode bool + ReturnedSeries []*v3.Series + ReturnedErr error + UseLogsNewSchema bool } func NewQuerier(opts QuerierOptions) interfaces.Querier { diff --git a/pkg/query-service/app/server.go b/pkg/query-service/app/server.go index 557b082f42..a1fc0dd329 100644 --- a/pkg/query-service/app/server.go +++ b/pkg/query-service/app/server.go @@ -66,6 +66,7 @@ type ServerOptions struct { CacheConfigPath string FluxInterval string Cluster string + UseLogsNewSchema bool } // Server runs HTTP, Mux and a grpc server @@ -128,6 +129,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) { serverOptions.MaxOpenConns, serverOptions.DialTimeout, serverOptions.Cluster, + serverOptions.UseLogsNewSchema, ) go clickhouseReader.Start(readerReady) reader = clickhouseReader @@ -144,7 +146,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) { } <-readerReady - rm, err := makeRulesManager(serverOptions.PromConfigPath, constants.GetAlertManagerApiPrefix(), serverOptions.RuleRepoURL, localDB, reader, serverOptions.DisableRules, fm) + rm, err := makeRulesManager(serverOptions.PromConfigPath, constants.GetAlertManagerApiPrefix(), serverOptions.RuleRepoURL, localDB, reader, serverOptions.DisableRules, fm, serverOptions.UseLogsNewSchema) if err != nil { return nil, err } @@ -197,6 +199,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) { LogsParsingPipelineController: logParsingPipelineController, Cache: c, FluxInterval: fluxInterval, + UseLogsNewSchema: serverOptions.UseLogsNewSchema, }) if err != nil { return nil, err @@ -713,7 +716,8 @@ func makeRulesManager( db *sqlx.DB, ch interfaces.Reader, disableRules bool, - fm interfaces.FeatureLookup) (*rules.Manager, error) { + fm interfaces.FeatureLookup, + useLogsNewSchema bool) (*rules.Manager, error) { // create engine pqle, err := pqle.FromReader(ch) @@ -730,16 +734,17 @@ func makeRulesManager( // create manager opts managerOpts := &rules.ManagerOptions{ - NotifierOpts: notifierOpts, - PqlEngine: pqle, - RepoURL: ruleRepoURL, - DBConn: db, - Context: context.Background(), - Logger: nil, - DisableRules: disableRules, - FeatureFlags: fm, - Reader: ch, - EvalDelay: constants.GetEvalDelay(), + NotifierOpts: notifierOpts, + PqlEngine: pqle, + RepoURL: ruleRepoURL, + DBConn: db, + Context: context.Background(), + Logger: nil, + DisableRules: disableRules, + FeatureFlags: fm, + Reader: ch, + EvalDelay: constants.GetEvalDelay(), + UseLogsNewSchema: useLogsNewSchema, } // create Manager diff --git a/pkg/query-service/main.go b/pkg/query-service/main.go index 3063e07b12..d1b191f248 100644 --- a/pkg/query-service/main.go +++ b/pkg/query-service/main.go @@ -33,6 +33,7 @@ func main() { // disables rule execution but allows change to the rule definition var disableRules bool + var useLogsNewSchema bool // the url used to build link in the alert messages in slack and other systems var ruleRepoURL, cacheConfigPath, fluxInterval string var cluster string @@ -43,6 +44,7 @@ func main() { var maxOpenConns int var dialTimeout time.Duration + flag.BoolVar(&useLogsNewSchema, "use-logs-new-schema", false, "use logs_v2 schema for logs") flag.StringVar(&promConfigPath, "config", "./config/prometheus.yml", "(prometheus config to read metrics)") flag.StringVar(&skipTopLvlOpsPath, "skip-top-level-ops", "", "(config file to skip top level operations)") flag.BoolVar(&disableRules, "rules.disable", false, "(disable rule evaluation)") @@ -79,6 +81,7 @@ func main() { CacheConfigPath: cacheConfigPath, FluxInterval: fluxInterval, Cluster: cluster, + UseLogsNewSchema: useLogsNewSchema, } // Read the jwt secret key diff --git a/pkg/query-service/rules/manager.go b/pkg/query-service/rules/manager.go index 120d674a9a..eaabc4f27a 100644 --- a/pkg/query-service/rules/manager.go +++ b/pkg/query-service/rules/manager.go @@ -35,6 +35,8 @@ type PrepareTaskOptions struct { FF interfaces.FeatureLookup ManagerOpts *ManagerOptions NotifyFunc NotifyFunc + + UseLogsNewSchema bool } const taskNamesuffix = "webAppEditor" @@ -75,6 +77,8 @@ type ManagerOptions struct { EvalDelay time.Duration PrepareTaskFunc func(opts PrepareTaskOptions) (Task, error) + + UseLogsNewSchema bool } // The Manager manages recording and alerting rules. @@ -96,6 +100,8 @@ type Manager struct { reader interfaces.Reader prepareTaskFunc func(opts PrepareTaskOptions) (Task, error) + + UseLogsNewSchema bool } func defaultOptions(o *ManagerOptions) *ManagerOptions { @@ -130,6 +136,7 @@ func defaultPrepareTaskFunc(opts PrepareTaskOptions) (Task, error) { opts.Rule, opts.FF, opts.Reader, + opts.UseLogsNewSchema, WithEvalDelay(opts.ManagerOpts.EvalDelay), ) @@ -333,6 +340,8 @@ func (m *Manager) editTask(rule *PostableRule, taskName string) error { FF: m.featureFlags, ManagerOpts: m.opts, NotifyFunc: m.prepareNotifyFunc(), + + UseLogsNewSchema: m.opts.UseLogsNewSchema, }) if err != nil { @@ -452,6 +461,8 @@ func (m *Manager) addTask(rule *PostableRule, taskName string) error { FF: m.featureFlags, ManagerOpts: m.opts, NotifyFunc: m.prepareNotifyFunc(), + + UseLogsNewSchema: m.opts.UseLogsNewSchema, }) for _, r := range newTask.Rules() { @@ -794,6 +805,7 @@ func (m *Manager) TestNotification(ctx context.Context, ruleStr string) (int, *m parsedRule, m.featureFlags, m.reader, + m.opts.UseLogsNewSchema, WithSendAlways(), WithSendUnmatched(), ) diff --git a/pkg/query-service/rules/threshold_rule.go b/pkg/query-service/rules/threshold_rule.go index d35798035e..964774500e 100644 --- a/pkg/query-service/rules/threshold_rule.go +++ b/pkg/query-service/rules/threshold_rule.go @@ -60,6 +60,7 @@ func NewThresholdRule( p *PostableRule, featureFlags interfaces.FeatureLookup, reader interfaces.Reader, + useLogsNewSchema bool, opts ...RuleOption, ) (*ThresholdRule, error) { @@ -77,17 +78,19 @@ func NewThresholdRule( } querierOption := querier.QuerierOptions{ - Reader: reader, - Cache: nil, - KeyGenerator: queryBuilder.NewKeyGenerator(), - FeatureLookup: featureFlags, + Reader: reader, + Cache: nil, + KeyGenerator: queryBuilder.NewKeyGenerator(), + FeatureLookup: featureFlags, + UseLogsNewSchema: useLogsNewSchema, } querierOptsV2 := querierV2.QuerierOptions{ - Reader: reader, - Cache: nil, - KeyGenerator: queryBuilder.NewKeyGenerator(), - FeatureLookup: featureFlags, + Reader: reader, + Cache: nil, + KeyGenerator: queryBuilder.NewKeyGenerator(), + FeatureLookup: featureFlags, + UseLogsNewSchema: useLogsNewSchema, } t.querier = querier.NewQuerier(querierOption) diff --git a/pkg/query-service/rules/threshold_rule_test.go b/pkg/query-service/rules/threshold_rule_test.go index 734347793d..ab37ad6af1 100644 --- a/pkg/query-service/rules/threshold_rule_test.go +++ b/pkg/query-service/rules/threshold_rule_test.go @@ -685,7 +685,7 @@ func TestThresholdRuleShouldAlert(t *testing.T) { postableRule.RuleCondition.MatchType = MatchType(c.matchType) postableRule.RuleCondition.Target = &c.target - rule, err := NewThresholdRule("69", &postableRule, fm, nil, WithEvalDelay(2*time.Minute)) + rule, err := NewThresholdRule("69", &postableRule, fm, nil, true, WithEvalDelay(2*time.Minute)) if err != nil { assert.NoError(t, err) } @@ -774,7 +774,7 @@ func TestPrepareLinksToLogs(t *testing.T) { } fm := featureManager.StartManager() - rule, err := NewThresholdRule("69", &postableRule, fm, nil, WithEvalDelay(2*time.Minute)) + rule, err := NewThresholdRule("69", &postableRule, fm, nil, true, WithEvalDelay(2*time.Minute)) if err != nil { assert.NoError(t, err) } @@ -816,7 +816,7 @@ func TestPrepareLinksToTraces(t *testing.T) { } fm := featureManager.StartManager() - rule, err := NewThresholdRule("69", &postableRule, fm, nil, WithEvalDelay(2*time.Minute)) + rule, err := NewThresholdRule("69", &postableRule, fm, nil, true, WithEvalDelay(2*time.Minute)) if err != nil { assert.NoError(t, err) } @@ -892,7 +892,7 @@ func TestThresholdRuleLabelNormalization(t *testing.T) { postableRule.RuleCondition.MatchType = MatchType(c.matchType) postableRule.RuleCondition.Target = &c.target - rule, err := NewThresholdRule("69", &postableRule, fm, nil, WithEvalDelay(2*time.Minute)) + rule, err := NewThresholdRule("69", &postableRule, fm, nil, true, WithEvalDelay(2*time.Minute)) if err != nil { assert.NoError(t, err) } @@ -945,7 +945,7 @@ func TestThresholdRuleEvalDelay(t *testing.T) { fm := featureManager.StartManager() for idx, c := range cases { - rule, err := NewThresholdRule("69", &postableRule, fm, nil) // no eval delay + rule, err := NewThresholdRule("69", &postableRule, fm, nil, true) // no eval delay if err != nil { assert.NoError(t, err) } @@ -994,7 +994,7 @@ func TestThresholdRuleClickHouseTmpl(t *testing.T) { fm := featureManager.StartManager() for idx, c := range cases { - rule, err := NewThresholdRule("69", &postableRule, fm, nil, WithEvalDelay(2*time.Minute)) + rule, err := NewThresholdRule("69", &postableRule, fm, nil, true, WithEvalDelay(2*time.Minute)) if err != nil { assert.NoError(t, err) } @@ -1135,9 +1135,9 @@ func TestThresholdRuleUnitCombinations(t *testing.T) { } options := clickhouseReader.NewOptions("", 0, 0, 0, "", "archiveNamespace") - reader := clickhouseReader.NewReaderFromClickhouseConnection(mock, options, nil, "", fm, "") + reader := clickhouseReader.NewReaderFromClickhouseConnection(mock, options, nil, "", fm, "", true) - rule, err := NewThresholdRule("69", &postableRule, fm, reader) + rule, err := NewThresholdRule("69", &postableRule, fm, reader, true) rule.temporalityMap = map[string]map[v3.Temporality]bool{ "signoz_calls_total": { v3.Delta: true, @@ -1234,9 +1234,9 @@ func TestThresholdRuleNoData(t *testing.T) { } options := clickhouseReader.NewOptions("", 0, 0, 0, "", "archiveNamespace") - reader := clickhouseReader.NewReaderFromClickhouseConnection(mock, options, nil, "", fm, "") + reader := clickhouseReader.NewReaderFromClickhouseConnection(mock, options, nil, "", fm, "", true) - rule, err := NewThresholdRule("69", &postableRule, fm, reader) + rule, err := NewThresholdRule("69", &postableRule, fm, reader, true) rule.temporalityMap = map[string]map[v3.Temporality]bool{ "signoz_calls_total": { v3.Delta: true, diff --git a/pkg/query-service/tests/integration/test_utils.go b/pkg/query-service/tests/integration/test_utils.go index 65140e5fc8..d060433dba 100644 --- a/pkg/query-service/tests/integration/test_utils.go +++ b/pkg/query-service/tests/integration/test_utils.go @@ -45,6 +45,7 @@ func NewMockClickhouseReader( "", featureFlags, "", + true, ) return reader, mockDB From 381a4de88a7d4b33e63e6586770a7fdf5293a85e Mon Sep 17 00:00:00 2001 From: Srikanth Chekuri Date: Thu, 12 Sep 2024 12:48:50 +0530 Subject: [PATCH 07/79] chore: use json formatting for ClickHouse logs (#5241) Co-authored-by: Prashant Shahi --- deploy/docker-swarm/clickhouse-setup/clickhouse-config.xml | 3 +++ deploy/docker/clickhouse-setup/clickhouse-config.xml | 3 +++ 2 files changed, 6 insertions(+) diff --git a/deploy/docker-swarm/clickhouse-setup/clickhouse-config.xml b/deploy/docker-swarm/clickhouse-setup/clickhouse-config.xml index 4e8dc00b30..f285997166 100644 --- a/deploy/docker-swarm/clickhouse-setup/clickhouse-config.xml +++ b/deploy/docker-swarm/clickhouse-setup/clickhouse-config.xml @@ -23,6 +23,9 @@ [1]: https://github.com/pocoproject/poco/blob/poco-1.9.4-release/Foundation/include/Poco/Logger.h#L105-L114 --> information + + json + /var/log/clickhouse-server/clickhouse-server.log /var/log/clickhouse-server/clickhouse-server.err.log information + + json + /var/log/clickhouse-server/clickhouse-server.log /var/log/clickhouse-server/clickhouse-server.err.log -*Use this template to request a new dashboard for the SigNoz Dashboards repository. Please provide as much detail as possible to help us understand your needs.* +## Dashboard Name ---- - -### 1. Dashboard Name + -Name of the requested dashboard (e.g., MySQL Monitoring Dashboard): +## Expected Dashboard Sections and Panels ---- +(Can be tweaked (add or remove panels/sections) according to available metrics) -### 2. Expected Dashboard Sections and Panels +### Section Name -#### Section Name + -Brief description of the section (e.g., "Resource usage metrics for MySQL database"). +### Panel Name -#### Panel Name + -Panel description (e.g., "Value-type panels displaying current CPU usage, memory usage, etc."). - -- **Example:** + ---- + -### 3. Expected Variables +## Expected Dashboard Variables -List any variables you expect to use in the dashboard (e.g., `deployment.environment`, `hostname`, etc.). + ---- +## Additional Comments or Requirements -### 4. Additional Comments or Requirements + -Any additional details or special requirements for the dashboard? +## References or Screenshots ---- + -### 📋 Notes +## 📋 Notes Please review the [CONTRIBUTING.md](https://github.com/SigNoz/dashboards/blob/main/CONTRIBUTING.md) for guidelines on dashboard structure, naming conventions, and how to submit a pull request. - ---- -Thank you for your request! We will review it and provide feedback or guidance as necessary. diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index cc1c4399d8..632a4e98e8 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -30,6 +30,7 @@ Also, have a look at these [good first issues label](https://github.com/SigNoz/s - [To run ClickHouse setup](#41-to-run-clickhouse-setup-recommended-for-local-development) - [Contribute to SigNoz Helm Chart](#5-contribute-to-signoz-helm-chart-) - [To run helm chart for local development](#51-to-run-helm-chart-for-local-development) +- [Contribute to Dashboards](#6-contribute-to-dashboards-) - [Other Ways to Contribute](#other-ways-to-contribute) # 1. General Instructions 📝 @@ -369,6 +370,17 @@ curl -sL https://github.com/SigNoz/signoz/raw/develop/sample-apps/hotrod/hotrod- --- +# 6. Contribute to Dashboards 📈 + +**Need to Update: [https://github.com/SigNoz/dashboards](https://github.com/SigNoz/dashboards)** + +To contribute a new dashboard template for any service, follow the contribution guidelines in the [Dashboard Contributing Guide](https://github.com/SigNoz/dashboards/blob/main/CONTRIBUTING.md). In brief: + +1. Create a dashboard JSON file. +2. Add a README file explaining the dashboard, the metrics ingested, and the configurations needed. +3. Include screenshots of the dashboard in the `assets/` directory. +4. Submit a pull request for review. + ## Other Ways to Contribute There are many other ways to get involved with the community and to participate in this project: @@ -379,7 +391,6 @@ There are many other ways to get involved with the community and to participate - Help answer questions on forums such as Stack Overflow and [SigNoz Community Slack Channel](https://signoz.io/slack). - Tell others about the project on Twitter, your blog, etc. - Again, Feel free to ping us on [`#contributing`](https://signoz-community.slack.com/archives/C01LWQ8KS7M) or [`#contributing-frontend`](https://signoz-community.slack.com/archives/C027134DM8B) on our slack community if you need any help on this :) Thank You! From 70fb5af19f0c5cad25670f4993436336d88b9a4e Mon Sep 17 00:00:00 2001 From: SagarRajput-7 <162284829+SagarRajput-7@users.noreply.github.com> Date: Thu, 19 Sep 2024 19:18:37 +0530 Subject: [PATCH 44/79] chore: removed empty signoz-core-ui folder (#6030) --- signoz-core-ui | 1 - 1 file changed, 1 deletion(-) delete mode 160000 signoz-core-ui diff --git a/signoz-core-ui b/signoz-core-ui deleted file mode 160000 index f8c925d842..0000000000 --- a/signoz-core-ui +++ /dev/null @@ -1 +0,0 @@ -Subproject commit f8c925d842922f8a30063012a7bfb688a3bf0f36 From 2f7d208eb56d36f4ab02082a74931d194cecde5b Mon Sep 17 00:00:00 2001 From: Vikrant Gupta Date: Thu, 19 Sep 2024 19:23:12 +0530 Subject: [PATCH 45/79] fix: added time range key for query and local storage handling (#6009) * fix: added time range key for query and local storage handling * chore: fix jest test cases * fix: send single element array for only variable option as well * fix: intermediate stale data should not be shown --- .../GridCardLayout/GridCard/index.tsx | 21 ++++++++++ .../VariableItem.test.tsx | 8 +--- .../VariableItem.tsx | 40 +++++++++++++++++-- 3 files changed, 58 insertions(+), 11 deletions(-) diff --git a/frontend/src/container/GridCardLayout/GridCard/index.tsx b/frontend/src/container/GridCardLayout/GridCard/index.tsx index a618f807a5..66ce70fb86 100644 --- a/frontend/src/container/GridCardLayout/GridCard/index.tsx +++ b/frontend/src/container/GridCardLayout/GridCard/index.tsx @@ -11,6 +11,7 @@ import { isEqual } from 'lodash-es'; import isEmpty from 'lodash-es/isEmpty'; import { useDashboard } from 'providers/Dashboard/Dashboard'; import { memo, useEffect, useRef, useState } from 'react'; +import { useQueryClient } from 'react-query'; import { useDispatch, useSelector } from 'react-redux'; import { UpdateTimeInterval } from 'store/actions'; import { AppState } from 'store/reducers'; @@ -48,6 +49,7 @@ function GridCardGraph({ AppState, GlobalReducer >((state) => state.globalTime); + const queryClient = useQueryClient(); const handleBackNavigation = (): void => { const searchParams = new URLSearchParams(window.location.search); @@ -136,6 +138,25 @@ function GridCardGraph({ }; }); + // TODO [vikrantgupta25] remove this useEffect with refactor as this is prone to race condition + // this is added to tackle the case of async communication between VariableItem.tsx and GridCard.tsx + useEffect(() => { + if (variablesToGetUpdated.length > 0) { + queryClient.cancelQueries([ + maxTime, + minTime, + globalSelectedInterval, + variables, + widget?.query, + widget?.panelTypes, + widget.timePreferance, + widget.fillSpans, + requestData, + ]); + } + // eslint-disable-next-line react-hooks/exhaustive-deps + }, [variablesToGetUpdated]); + useEffect(() => { if (!isEqual(updatedQuery, requestData.query)) { setRequestData((prev) => ({ diff --git a/frontend/src/container/NewDashboard/DashboardVariablesSelection/VariableItem.test.tsx b/frontend/src/container/NewDashboard/DashboardVariablesSelection/VariableItem.test.tsx index 0c8fbd51ae..1cb89d6b95 100644 --- a/frontend/src/container/NewDashboard/DashboardVariablesSelection/VariableItem.test.tsx +++ b/frontend/src/container/NewDashboard/DashboardVariablesSelection/VariableItem.test.tsx @@ -1,14 +1,8 @@ import '@testing-library/jest-dom/extend-expect'; -import { - act, - fireEvent, - render, - screen, - waitFor, -} from '@testing-library/react'; import MockQueryClientProvider from 'providers/test/MockQueryClientProvider'; import React, { useEffect } from 'react'; +import { act, fireEvent, render, screen, waitFor } from 'tests/test-utils'; import { IDashboardVariable } from 'types/api/dashboard/getAll'; import VariableItem from './VariableItem'; diff --git a/frontend/src/container/NewDashboard/DashboardVariablesSelection/VariableItem.tsx b/frontend/src/container/NewDashboard/DashboardVariablesSelection/VariableItem.tsx index baa8228b3c..e14162d0ce 100644 --- a/frontend/src/container/NewDashboard/DashboardVariablesSelection/VariableItem.tsx +++ b/frontend/src/container/NewDashboard/DashboardVariablesSelection/VariableItem.tsx @@ -1,3 +1,4 @@ +/* eslint-disable sonarjs/cognitive-complexity */ /* eslint-disable jsx-a11y/click-events-have-key-events */ /* eslint-disable jsx-a11y/no-static-element-interactions */ /* eslint-disable @typescript-eslint/no-explicit-any */ @@ -25,8 +26,11 @@ import { debounce, isArray, isString } from 'lodash-es'; import map from 'lodash-es/map'; import { ChangeEvent, memo, useEffect, useMemo, useState } from 'react'; import { useQuery } from 'react-query'; +import { useSelector } from 'react-redux'; +import { AppState } from 'store/reducers'; import { IDashboardVariable } from 'types/api/dashboard/getAll'; import { VariableResponseProps } from 'types/api/dashboard/variables/query'; +import { GlobalReducer } from 'types/reducer/globalTime'; import { popupContainer } from 'utils/selectPopupContainer'; import { variablePropsToPayloadVariables } from '../utils'; @@ -80,6 +84,23 @@ function VariableItem({ [], ); + const { maxTime, minTime } = useSelector( + (state) => state.globalTime, + ); + + useEffect(() => { + if (variableData.allSelected && variableData.type === 'QUERY') { + setVariablesToGetUpdated((prev) => { + const variablesQueue = [...prev.filter((v) => v !== variableData.name)]; + if (variableData.name) { + variablesQueue.push(variableData.name); + } + return variablesQueue; + }); + } + // eslint-disable-next-line react-hooks/exhaustive-deps + }, [minTime, maxTime]); + const [errorMessage, setErrorMessage] = useState(null); const getDependentVariables = (queryValue: string): string[] => { @@ -111,7 +132,14 @@ function VariableItem({ const variableKey = dependentVariablesStr.replace(/\s/g, ''); - return [REACT_QUERY_KEY.DASHBOARD_BY_ID, variableName, variableKey]; + // added this time dependency for variables query as API respects the passed time range now + return [ + REACT_QUERY_KEY.DASHBOARD_BY_ID, + variableName, + variableKey, + `${minTime}`, + `${maxTime}`, + ]; }; // eslint-disable-next-line sonarjs/cognitive-complexity @@ -151,10 +179,14 @@ function VariableItem({ valueNotInList = true; } } + // variablesData.allSelected is added for the case where on change of options we need to update the + // local storage if ( variableData.type === 'QUERY' && variableData.name && - (variablesToGetUpdated.includes(variableData.name) || valueNotInList) + (variablesToGetUpdated.includes(variableData.name) || + valueNotInList || + variableData.allSelected) ) { let value = variableData.selectedValue; let allSelected = false; @@ -338,8 +370,8 @@ function VariableItem({ (Array.isArray(selectValue) && selectValue?.includes(option.toString())); if (isChecked) { - if (mode === ToggleTagValue.Only) { - handleChange(option.toString()); + if (mode === ToggleTagValue.Only && variableData.multiSelect) { + handleChange([option.toString()]); } else if (!variableData.multiSelect) { handleChange(option.toString()); } else { From 8eb2cf144e3e66e758621c772396a95e4e9b8220 Mon Sep 17 00:00:00 2001 From: Nityananda Gohain Date: Thu, 19 Sep 2024 21:20:57 +0530 Subject: [PATCH 46/79] fix: issues with like and ilike fixed in v4 qb (#6018) --- .../app/logs/v4/query_builder.go | 16 ++++++----- .../app/logs/v4/query_builder_test.go | 4 +-- .../app/logs/v4/resource_query_builder.go | 17 +++++++++--- .../logs/v4/resource_query_builder_test.go | 27 ++++++++++++++++--- 4 files changed, 48 insertions(+), 16 deletions(-) diff --git a/pkg/query-service/app/logs/v4/query_builder.go b/pkg/query-service/app/logs/v4/query_builder.go index 47fda73c2a..e906c605a1 100644 --- a/pkg/query-service/app/logs/v4/query_builder.go +++ b/pkg/query-service/app/logs/v4/query_builder.go @@ -146,6 +146,7 @@ func buildAttributeFilter(item v3.FilterItem) (string, error) { return fmt.Sprintf(logsOp, keyName, fmtVal), nil case v3.FilterOperatorContains, v3.FilterOperatorNotContains: + // we also want to treat %, _ as literals for contains val := utils.QuoteEscapedStringForContains(fmt.Sprintf("%s", item.Value)) // for body the contains is case insensitive if keyName == BODY { @@ -153,14 +154,15 @@ func buildAttributeFilter(item v3.FilterItem) (string, error) { } else { return fmt.Sprintf("%s %s '%%%s%%'", keyName, logsOp, val), nil } - default: - // for use lower for like and ilike - if op == v3.FilterOperatorLike || op == v3.FilterOperatorNotLike { - if keyName == BODY { - keyName = fmt.Sprintf("lower(%s)", keyName) - fmtVal = fmt.Sprintf("lower(%s)", fmtVal) - } + case v3.FilterOperatorLike, v3.FilterOperatorNotLike: + // for body use lower for like and ilike + val := utils.QuoteEscapedString(fmt.Sprintf("%s", item.Value)) + if keyName == BODY { + return fmt.Sprintf("lower(%s) %s lower('%s')", keyName, logsOp, val), nil + } else { + return fmt.Sprintf("%s %s '%s'", keyName, logsOp, val), nil } + default: return fmt.Sprintf("%s %s %s", keyName, logsOp, fmtVal), nil } } else { diff --git a/pkg/query-service/app/logs/v4/query_builder_test.go b/pkg/query-service/app/logs/v4/query_builder_test.go index 1b24a6aac6..34ea7e1f6f 100644 --- a/pkg/query-service/app/logs/v4/query_builder_test.go +++ b/pkg/query-service/app/logs/v4/query_builder_test.go @@ -277,10 +277,10 @@ func Test_buildAttributeFilter(t *testing.T) { Type: v3.AttributeKeyTypeResource, }, Operator: v3.FilterOperatorLike, - Value: "test", + Value: "test%", }, }, - want: "resources_string['service.name'] LIKE 'test'", + want: "resources_string['service.name'] LIKE 'test%'", }, { name: "build attribute filter like-body", diff --git a/pkg/query-service/app/logs/v4/resource_query_builder.go b/pkg/query-service/app/logs/v4/resource_query_builder.go index 12d6c1a36a..2a56549b43 100644 --- a/pkg/query-service/app/logs/v4/resource_query_builder.go +++ b/pkg/query-service/app/logs/v4/resource_query_builder.go @@ -23,8 +23,13 @@ func buildResourceFilter(logsOp string, key string, op v3.FilterOperator, value return fmt.Sprintf(logsOp, searchKey, chFmtVal) case v3.FilterOperatorContains, v3.FilterOperatorNotContains: // this is required as clickhouseFormattedValue add's quotes to the string + // we also want to treat %, _ as literals for contains escapedStringValue := utils.QuoteEscapedStringForContains(fmt.Sprintf("%s", value)) return fmt.Sprintf("%s %s '%%%s%%'", searchKey, logsOp, escapedStringValue) + case v3.FilterOperatorLike, v3.FilterOperatorNotLike: + // this is required as clickhouseFormattedValue add's quotes to the string + escapedStringValue := utils.QuoteEscapedString(fmt.Sprintf("%s", value)) + return fmt.Sprintf("%s %s '%s'", searchKey, logsOp, escapedStringValue) default: return fmt.Sprintf("%s %s %s", searchKey, logsOp, chFmtVal) } @@ -74,13 +79,19 @@ func buildIndexFilterForInOperator(key string, op v3.FilterOperator, value inter // example:= x like '%john%' = labels like '%x%john%' func buildResourceIndexFilter(key string, op v3.FilterOperator, value interface{}) string { // not using clickhouseFormattedValue as we don't wan't the quotes - formattedValueEscaped := utils.QuoteEscapedStringForContains(fmt.Sprintf("%s", value)) + strVal := fmt.Sprintf("%s", value) + formattedValueEscapedForContains := utils.QuoteEscapedStringForContains(strVal) + formattedValueEscaped := utils.QuoteEscapedString(strVal) // add index filters switch op { - case v3.FilterOperatorContains, v3.FilterOperatorEqual, v3.FilterOperatorLike: + case v3.FilterOperatorContains: + return fmt.Sprintf("labels like '%%%s%%%s%%'", key, formattedValueEscapedForContains) + case v3.FilterOperatorNotContains: + return fmt.Sprintf("labels not like '%%%s%%%s%%'", key, formattedValueEscapedForContains) + case v3.FilterOperatorLike, v3.FilterOperatorEqual: return fmt.Sprintf("labels like '%%%s%%%s%%'", key, formattedValueEscaped) - case v3.FilterOperatorNotContains, v3.FilterOperatorNotEqual, v3.FilterOperatorNotLike: + case v3.FilterOperatorNotLike, v3.FilterOperatorNotEqual: return fmt.Sprintf("labels not like '%%%s%%%s%%'", key, formattedValueEscaped) case v3.FilterOperatorNotRegex: return fmt.Sprintf("labels not like '%%%s%%'", key) diff --git a/pkg/query-service/app/logs/v4/resource_query_builder_test.go b/pkg/query-service/app/logs/v4/resource_query_builder_test.go index 130fd9e98c..e315f739a3 100644 --- a/pkg/query-service/app/logs/v4/resource_query_builder_test.go +++ b/pkg/query-service/app/logs/v4/resource_query_builder_test.go @@ -61,9 +61,9 @@ func Test_buildResourceFilter(t *testing.T) { logsOp: "=", key: "service.name", op: v3.FilterOperatorEqual, - value: "Application", + value: "Application%", }, - want: `simpleJSONExtractString(labels, 'service.name') = 'Application'`, + want: `simpleJSONExtractString(labels, 'service.name') = 'Application%'`, }, { name: "test value with quotes", @@ -75,6 +75,16 @@ func Test_buildResourceFilter(t *testing.T) { }, want: `simpleJSONExtractString(labels, 'service.name') = 'Application\'s'`, }, + { + name: "test like", + args: args{ + logsOp: "LIKE", + key: "service.name", + op: v3.FilterOperatorLike, + value: "Application%_", + }, + want: `simpleJSONExtractString(labels, 'service.name') LIKE 'Application%_'`, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -119,9 +129,9 @@ func Test_buildIndexFilterForInOperator(t *testing.T) { args: args{ key: "service.name", op: v3.FilterOperatorIn, - value: "application", + value: "application%", }, - want: `(labels like '%"service.name":"application"%')`, + want: `(labels like '%"service.name":"application\%"%')`, }, { name: "test nin string", @@ -180,6 +190,15 @@ func Test_buildResourceIndexFilter(t *testing.T) { }, want: `labels not like '%service.name%application\%\_test%'`, }, + { + name: "test like with % and _", + args: args{ + key: "service.name", + op: v3.FilterOperatorLike, + value: "application%_test", + }, + want: `labels like '%service.name%application%_test%'`, + }, { name: "test not regex", args: args{ From e203276678e11b0ef0cd5af4a9dd86e2db137b14 Mon Sep 17 00:00:00 2001 From: Vikrant Gupta Date: Thu, 19 Sep 2024 23:02:32 +0530 Subject: [PATCH 47/79] chore: improve colors for the log line indicators (#6032) --- .../LogStateIndicator/LogStateIndicator.styles.scss | 11 +++-------- frontend/src/container/LogsExplorerChart/utils.ts | 11 +++++++---- 2 files changed, 10 insertions(+), 12 deletions(-) diff --git a/frontend/src/components/Logs/LogStateIndicator/LogStateIndicator.styles.scss b/frontend/src/components/Logs/LogStateIndicator/LogStateIndicator.styles.scss index 61870abc71..2260bf5aa3 100644 --- a/frontend/src/components/Logs/LogStateIndicator/LogStateIndicator.styles.scss +++ b/frontend/src/components/Logs/LogStateIndicator/LogStateIndicator.styles.scss @@ -22,26 +22,21 @@ } &.INFO { - background-color: var(--bg-slate-400); + background-color: var(--bg-robin-500); } - &.WARNING, &.WARN { background-color: var(--bg-amber-500); } - &.ERROR { background-color: var(--bg-cherry-500); } - &.TRACE { - background-color: var(--bg-robin-300); + background-color: var(--bg-forest-400); } - &.DEBUG { - background-color: var(--bg-forest-500); + background-color: var(--bg-aqua-500); } - &.FATAL { background-color: var(--bg-sakura-500); } diff --git a/frontend/src/container/LogsExplorerChart/utils.ts b/frontend/src/container/LogsExplorerChart/utils.ts index d052a01585..40253750da 100644 --- a/frontend/src/container/LogsExplorerChart/utils.ts +++ b/frontend/src/container/LogsExplorerChart/utils.ts @@ -9,15 +9,18 @@ export function getColorsForSeverityLabels( const lowerCaseLabel = label.toLowerCase(); if (lowerCaseLabel.includes(`{severity_text="trace"}`)) { - return Color.BG_ROBIN_300; + return Color.BG_FOREST_400; } if (lowerCaseLabel.includes(`{severity_text="debug"}`)) { - return Color.BG_FOREST_500; + return Color.BG_AQUA_500; } - if (lowerCaseLabel.includes(`{severity_text="info"}`)) { - return Color.BG_SLATE_400; + if ( + lowerCaseLabel.includes(`{severity_text="info"}`) || + lowerCaseLabel.includes(`{severity_text=""}`) + ) { + return Color.BG_ROBIN_500; } if (lowerCaseLabel.includes(`{severity_text="warn"}`)) { From 4edc6dbeae834162a64ddce2f575f4d61244f913 Mon Sep 17 00:00:00 2001 From: Srikanth Chekuri Date: Thu, 19 Sep 2024 23:21:31 +0530 Subject: [PATCH 48/79] feat: add last option to alert condition match type (#5929) --- frontend/public/locales/en-GB/alerts.json | 1 + frontend/public/locales/en-GB/rules.json | 1 + frontend/public/locales/en/alerts.json | 1 + frontend/public/locales/en/rules.json | 1 + .../container/FormAlertRules/RuleOptions.tsx | 1 + pkg/query-service/rules/alerting.go | 1 + pkg/query-service/rules/base_rule.go | 21 ++++ .../rules/threshold_rule_test.go | 105 ++++++++++++++++++ 8 files changed, 132 insertions(+) diff --git a/frontend/public/locales/en-GB/alerts.json b/frontend/public/locales/en-GB/alerts.json index a43d04ab59..86f21c8c78 100644 --- a/frontend/public/locales/en-GB/alerts.json +++ b/frontend/public/locales/en-GB/alerts.json @@ -53,6 +53,7 @@ "option_atleastonce": "at least once", "option_onaverage": "on average", "option_intotal": "in total", + "option_last": "last", "option_above": "above", "option_below": "below", "option_equal": "is equal to", diff --git a/frontend/public/locales/en-GB/rules.json b/frontend/public/locales/en-GB/rules.json index 9d55a0ba0f..9ac3641c7a 100644 --- a/frontend/public/locales/en-GB/rules.json +++ b/frontend/public/locales/en-GB/rules.json @@ -40,6 +40,7 @@ "option_atleastonce": "at least once", "option_onaverage": "on average", "option_intotal": "in total", + "option_last": "last", "option_above": "above", "option_below": "below", "option_equal": "is equal to", diff --git a/frontend/public/locales/en/alerts.json b/frontend/public/locales/en/alerts.json index e7ed6232ad..02d20a2977 100644 --- a/frontend/public/locales/en/alerts.json +++ b/frontend/public/locales/en/alerts.json @@ -53,6 +53,7 @@ "option_atleastonce": "at least once", "option_onaverage": "on average", "option_intotal": "in total", + "option_last": "last", "option_above": "above", "option_below": "below", "option_equal": "is equal to", diff --git a/frontend/public/locales/en/rules.json b/frontend/public/locales/en/rules.json index 9d55a0ba0f..9ac3641c7a 100644 --- a/frontend/public/locales/en/rules.json +++ b/frontend/public/locales/en/rules.json @@ -40,6 +40,7 @@ "option_atleastonce": "at least once", "option_onaverage": "on average", "option_intotal": "in total", + "option_last": "last", "option_above": "above", "option_below": "below", "option_equal": "is equal to", diff --git a/frontend/src/container/FormAlertRules/RuleOptions.tsx b/frontend/src/container/FormAlertRules/RuleOptions.tsx index 2ef6bba4c0..da265f34cc 100644 --- a/frontend/src/container/FormAlertRules/RuleOptions.tsx +++ b/frontend/src/container/FormAlertRules/RuleOptions.tsx @@ -103,6 +103,7 @@ function RuleOptions({ {t('option_allthetimes')} {t('option_onaverage')} {t('option_intotal')} + {t('option_last')} ); diff --git a/pkg/query-service/rules/alerting.go b/pkg/query-service/rules/alerting.go index cb5205f99e..77c9fbe219 100644 --- a/pkg/query-service/rules/alerting.go +++ b/pkg/query-service/rules/alerting.go @@ -112,6 +112,7 @@ const ( AllTheTimes MatchType = "2" OnAverage MatchType = "3" InTotal MatchType = "4" + Last MatchType = "5" ) type RuleCondition struct { diff --git a/pkg/query-service/rules/base_rule.go b/pkg/query-service/rules/base_rule.go index a108938b1d..b82aab91b5 100644 --- a/pkg/query-service/rules/base_rule.go +++ b/pkg/query-service/rules/base_rule.go @@ -483,6 +483,27 @@ func (r *BaseRule) shouldAlert(series v3.Series) (Sample, bool) { shouldAlert = true } } + case Last: + // If the last sample matches the condition, the rule is firing. + shouldAlert = false + alertSmpl = Sample{Point: Point{V: series.Points[len(series.Points)-1].Value}, Metric: lblsNormalized, MetricOrig: lbls} + if r.compareOp() == ValueIsAbove { + if series.Points[len(series.Points)-1].Value > r.targetVal() { + shouldAlert = true + } + } else if r.compareOp() == ValueIsBelow { + if series.Points[len(series.Points)-1].Value < r.targetVal() { + shouldAlert = true + } + } else if r.compareOp() == ValueIsEq { + if series.Points[len(series.Points)-1].Value == r.targetVal() { + shouldAlert = true + } + } else if r.compareOp() == ValueIsNotEq { + if series.Points[len(series.Points)-1].Value != r.targetVal() { + shouldAlert = true + } + } } return alertSmpl, shouldAlert } diff --git a/pkg/query-service/rules/threshold_rule_test.go b/pkg/query-service/rules/threshold_rule_test.go index 65d020d25f..8f9554db52 100644 --- a/pkg/query-service/rules/threshold_rule_test.go +++ b/pkg/query-service/rules/threshold_rule_test.go @@ -677,6 +677,111 @@ func TestThresholdRuleShouldAlert(t *testing.T) { matchType: "4", // InTotal target: 20.0, }, + // Test cases for Last + // greater than last + { + values: v3.Series{ + Points: []v3.Point{ + {Value: 10.0}, + {Value: 10.0}, + }, + }, + expectAlert: true, + compareOp: "1", // Greater Than + matchType: "5", // Last + target: 5.0, + expectedAlertSample: v3.Point{Value: 10.0}, + }, + { + values: v3.Series{ + Points: []v3.Point{ + {Value: 10.0}, + {Value: 10.0}, + }, + }, + expectAlert: false, + compareOp: "1", // Greater Than + matchType: "5", // Last + target: 20.0, + }, + // less than last + { + values: v3.Series{ + Points: []v3.Point{ + {Value: 10.0}, + {Value: 10.0}, + }, + }, + expectAlert: true, + compareOp: "2", // Less Than + matchType: "5", // Last + target: 15.0, + expectedAlertSample: v3.Point{Value: 10.0}, + }, + { + values: v3.Series{ + Points: []v3.Point{ + {Value: 10.0}, + {Value: 10.0}, + }, + }, + expectAlert: false, + compareOp: "2", // Less Than + matchType: "5", // Last + target: 5.0, + }, + // equals last + { + values: v3.Series{ + Points: []v3.Point{ + {Value: 10.0}, + {Value: 10.0}, + }, + }, + expectAlert: true, + compareOp: "3", // Equals + matchType: "5", // Last + target: 10.0, + expectedAlertSample: v3.Point{Value: 10.0}, + }, + { + values: v3.Series{ + Points: []v3.Point{ + {Value: 10.0}, + {Value: 10.0}, + }, + }, + expectAlert: false, + compareOp: "3", // Equals + matchType: "5", // Last + target: 5.0, + }, + // not equals last + { + values: v3.Series{ + Points: []v3.Point{ + {Value: 10.0}, + {Value: 10.0}, + }, + }, + expectAlert: true, + compareOp: "4", // Not Equals + matchType: "5", // Last + target: 5.0, + expectedAlertSample: v3.Point{Value: 10.0}, + }, + { + values: v3.Series{ + Points: []v3.Point{ + {Value: 10.0}, + {Value: 10.0}, + }, + }, + expectAlert: false, + compareOp: "4", // Not Equals + matchType: "5", // Last + target: 10.0, + }, } fm := featureManager.StartManager() From 54d5666b92020ea5a4afebe8ef2832e620e4d8e7 Mon Sep 17 00:00:00 2001 From: SagarRajput-7 <162284829+SagarRajput-7@users.noreply.github.com> Date: Fri, 20 Sep 2024 11:39:10 +0530 Subject: [PATCH 49/79] fix: fixed dashboard header and list title alignment (#6035) * fix: fixed dashboard header and list title alignment * fix: fixed dashboard header and list title alignment * fix: fixed existing styles --- .../ListOfDashboard/DashboardList.styles.scss | 10 +++++++-- .../ListOfDashboard/DashboardsList.tsx | 22 ++++++++++--------- .../Description.styles.scss | 6 ++++- .../DashboardDescription/index.tsx | 7 ++---- 4 files changed, 27 insertions(+), 18 deletions(-) diff --git a/frontend/src/container/ListOfDashboard/DashboardList.styles.scss b/frontend/src/container/ListOfDashboard/DashboardList.styles.scss index cf9ec283d2..6a5a148180 100644 --- a/frontend/src/container/ListOfDashboard/DashboardList.styles.scss +++ b/frontend/src/container/ListOfDashboard/DashboardList.styles.scss @@ -64,9 +64,9 @@ .dashboard-icon { display: inline-block; - margin-top: 4px; - margin-right: 4px; line-height: 20px; + height: 14px; + width: 14px; } .dot { @@ -75,6 +75,12 @@ border-radius: 50%; } + .title-link { + display: flex; + align-items: center; + gap: 8px; + } + .title { color: var(--bg-vanilla-100); font-size: var(--font-size-sm); diff --git a/frontend/src/container/ListOfDashboard/DashboardsList.tsx b/frontend/src/container/ListOfDashboard/DashboardsList.tsx index c131119a6c..9908374a1b 100644 --- a/frontend/src/container/ListOfDashboard/DashboardsList.tsx +++ b/frontend/src/container/ListOfDashboard/DashboardsList.tsx @@ -459,17 +459,19 @@ function DashboardsList(): JSX.Element { placement="left" overlayClassName="title-toolip" > - - - dashboard-image + + dashboard-image + {dashboard.name} - - + +
diff --git a/frontend/src/container/NewDashboard/DashboardDescription/Description.styles.scss b/frontend/src/container/NewDashboard/DashboardDescription/Description.styles.scss index d82c2da7b6..0f4b2dcc95 100644 --- a/frontend/src/container/NewDashboard/DashboardDescription/Description.styles.scss +++ b/frontend/src/container/NewDashboard/DashboardDescription/Description.styles.scss @@ -130,12 +130,16 @@ .left-section { display: flex; - flex-wrap: wrap; align-items: center; gap: 8px; width: 45%; + .dashboard-img { + height: 16px; + width: 16px; + } + .dashboard-title { color: #fff; font-family: Inter; diff --git a/frontend/src/container/NewDashboard/DashboardDescription/index.tsx b/frontend/src/container/NewDashboard/DashboardDescription/index.tsx index 31b5e4c247..ea59dc4bcf 100644 --- a/frontend/src/container/NewDashboard/DashboardDescription/index.tsx +++ b/frontend/src/container/NewDashboard/DashboardDescription/index.tsx @@ -306,16 +306,13 @@ function DashboardDescription(props: DashboardDescriptionProps): JSX.Element {
+ dashboard-img 30 ? title : ''}> - dashboard-img{' '} + {' '} {title} From ced72f86a4b5684bf3988cec1f2efd3fcaaead06 Mon Sep 17 00:00:00 2001 From: Vishal Sharma Date: Fri, 20 Sep 2024 13:27:18 +0530 Subject: [PATCH 50/79] doc: add info on request dashboard to contributing md (#6040) --- CONTRIBUTING.md | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 632a4e98e8..613b225353 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -38,7 +38,7 @@ Also, have a look at these [good first issues label](https://github.com/SigNoz/s ## 1.1 For Creating Issue(s) Before making any significant changes and before filing a new issue, please check [existing open](https://github.com/SigNoz/signoz/issues?q=is%3Aopen+is%3Aissue), or [recently closed](https://github.com/SigNoz/signoz/issues?q=is%3Aissue+is%3Aclosed) issues to make sure somebody else hasn't already reported the issue. Please try to include as much information as you can. -**Issue Types** - [Bug Report](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=&template=bug_report.md&title=) | [Feature Request](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=&template=feature_request.md&title=) | [Performance Issue Report](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=&template=performance-issue-report.md&title=) | [Report a Security Vulnerability](https://github.com/SigNoz/signoz/security/policy) +**Issue Types** - [Bug Report](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=&template=bug_report.md&title=) | [Feature Request](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=&template=feature_request.md&title=) | [Performance Issue Report](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=&template=performance-issue-report.md&title=) | [Request Dashboard](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=dashboard-template&projects=&template=request_dashboard.md&title=%5BDashboard+Request%5D+) | [Report a Security Vulnerability](https://github.com/SigNoz/signoz/security/policy) #### Details like these are incredibly useful: @@ -57,7 +57,7 @@ Before making any significant changes and before filing a new issue, please chec Discussing your proposed changes ahead of time will make the contribution process smooth for everyone 🙌. - **[`^top^`](#)** + **[`^top^`](#contributing-guidelines)**
@@ -98,13 +98,14 @@ GitHub provides additional document on [forking a repository](https://help.githu stability and quality of the component. -You can always reach out to `ankit@signoz.io` to understand more about the repo and product. We are very responsive over email and [SLACK](https://signoz.io/slack). +You can always reach out to `ankit@signoz.io` to understand more about the repo and product. We are very responsive over email and [slack community](https://signoz.io/slack). ### Pointers: - If you find any **bugs** → please create an [**issue.**](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=&template=bug_report.md&title=) - If you find anything **missing** in documentation → you can create an issue with the label **`documentation`**. - If you want to build any **new feature** → please create an [issue with the label **`enhancement`**.](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=&template=feature_request.md&title=) - If you want to **discuss** something about the product, start a new [**discussion**.](https://github.com/SigNoz/signoz/discussions) +- If you want to request a new **dashboard template** → please create an issue [here](https://github.com/SigNoz/signoz/issues/new?assignees=&labels=dashboard-template&projects=&template=request_dashboard.md&title=%5BDashboard+Request%5D+).
@@ -118,7 +119,7 @@ e.g. If you are submitting a fix for an issue in frontend, the PR name should be - Feel free to ping us on [`#contributing`](https://signoz-community.slack.com/archives/C01LWQ8KS7M) or [`#contributing-frontend`](https://signoz-community.slack.com/archives/C027134DM8B) on our slack community if you need any help on this :) - **[`^top^`](#)** + **[`^top^`](#contributing-guidelines)**
@@ -128,14 +129,13 @@ e.g. If you are submitting a fix for an issue in frontend, the PR name should be - [**Frontend**](#3-develop-frontend-) (Written in Typescript, React) - [**Backend**](#4-contribute-to-backend-query-service-) (Query Service, written in Go) +- [**Dashboard Templates**](#6-contribute-to-dashboards-) (JSON dashboard templates built with SigNoz) Depending upon your area of expertise & interest, you can choose one or more to contribute. Below are detailed instructions to contribute in each area. -**Please note:** If you want to work on an issue, please ask the maintainers to assign the issue to you before starting work on it. This would help us understand who is working on an issue and prevent duplicate work. 🙏🏻 +**Please note:** If you want to work on an issue, please add a brief description of your solution on the issue before starting work on it. -⚠️ If you just raise a PR, without the corresponding issue being assigned to you - it may not be accepted. - - **[`^top^`](#)** + **[`^top^`](#contributing-guidelines)**
@@ -189,7 +189,7 @@ Also, have a look at [Frontend README.md](https://github.com/SigNoz/signoz/blob/ ### Important Notes: The Maintainers / Contributors who will change Line Numbers of `Frontend` & `Query-Section`, please update line numbers in [`/.scripts/commentLinesForSetup.sh`](https://github.com/SigNoz/signoz/blob/develop/.scripts/commentLinesForSetup.sh) - **[`^top^`](#)** + **[`^top^`](#contributing-guidelines)** ## 3.2 Contribute to Frontend without installing SigNoz backend @@ -210,7 +210,7 @@ Please ping us in the [`#contributing`](https://signoz-community.slack.com/archi **Frontend should now be accessible at** [`http://localhost:3301/services`](http://localhost:3301/services) - **[`^top^`](#)** + **[`^top^`](#contributing-guidelines)**
@@ -310,7 +310,7 @@ Click the button below. A workspace with all required environments will be creat > To use it on your forked repo, edit the 'Open in Gitpod' button URL to `https://gitpod.io/#https://github.com//signoz` --> - **[`^top^`](#)** + **[`^top^`](#contributing-guidelines)**
@@ -366,7 +366,7 @@ curl -sL https://github.com/SigNoz/signoz/raw/develop/sample-apps/hotrod/hotrod- | HOTROD_NAMESPACE=sample-application bash ``` - **[`^top^`](#)** + **[`^top^`](#contributing-guidelines)** --- From cb1cd3555b3b63bdb441512dacdebf2599db67d7 Mon Sep 17 00:00:00 2001 From: SagarRajput-7 <162284829+SagarRajput-7@users.noreply.github.com> Date: Fri, 20 Sep 2024 16:36:35 +0530 Subject: [PATCH 51/79] feat: added global search on table panel (#5893) * feat: added global search on table panel * feat: added global search on table panel * feat: added global search conditionally and with new design * feat: removed state from datasource * feat: added global search in full view * feat: added lightMode styles * feat: added test cases for querytable and widgetHeader - global search --- .../FullView/WidgetFullView.styles.scss | 19 + .../GridCard/FullView/index.tsx | 25 +- .../GridCard/WidgetGraphComponent.tsx | 4 + .../WidgetHeader/WidgetHeader.styles.scss | 23 +- .../GridCardLayout/WidgetHeader/index.tsx | 124 ++- .../src/container/GridTableComponent/types.ts | 1 + .../container/PanelWrapper/PanelWrapper.tsx | 2 + .../PanelWrapper/TablePanelWrapper.tsx | 2 + .../PanelWrapper/panelWrapper.types.ts | 1 + .../QueryTable/QueryTable.intefaces.ts | 1 + .../src/container/QueryTable/QueryTable.tsx | 31 +- .../QueryTable/__test__/QueryTable.test.tsx | 73 ++ .../container/QueryTable/__test__/mocks.ts | 797 ++++++++++++++++++ 13 files changed, 1056 insertions(+), 47 deletions(-) create mode 100644 frontend/src/container/QueryTable/__test__/QueryTable.test.tsx create mode 100644 frontend/src/container/QueryTable/__test__/mocks.ts diff --git a/frontend/src/container/GridCardLayout/GridCard/FullView/WidgetFullView.styles.scss b/frontend/src/container/GridCardLayout/GridCard/FullView/WidgetFullView.styles.scss index 29d578f096..78c4459929 100644 --- a/frontend/src/container/GridCardLayout/GridCard/FullView/WidgetFullView.styles.scss +++ b/frontend/src/container/GridCardLayout/GridCard/FullView/WidgetFullView.styles.scss @@ -15,6 +15,13 @@ box-sizing: border-box; margin: 16px 0; border-radius: 3px; + + .global-search { + .ant-input-group-addon { + border: none; + background-color: var(--bg-ink-300); + } + } } .height-widget { @@ -55,3 +62,15 @@ } } } + +.lightMode { + .full-view-container { + .graph-container { + .global-search { + .ant-input-group-addon { + background-color: var(--bg-vanilla-200); + } + } + } + } +} diff --git a/frontend/src/container/GridCardLayout/GridCard/FullView/index.tsx b/frontend/src/container/GridCardLayout/GridCard/FullView/index.tsx index 974a35a39c..d682af12a8 100644 --- a/frontend/src/container/GridCardLayout/GridCard/FullView/index.tsx +++ b/frontend/src/container/GridCardLayout/GridCard/FullView/index.tsx @@ -1,7 +1,11 @@ import './WidgetFullView.styles.scss'; -import { LoadingOutlined, SyncOutlined } from '@ant-design/icons'; -import { Button, Spin } from 'antd'; +import { + LoadingOutlined, + SearchOutlined, + SyncOutlined, +} from '@ant-design/icons'; +import { Button, Input, Spin } from 'antd'; import cx from 'classnames'; import { ToggleGraphProps } from 'components/Graph/types'; import Spinner from 'components/Spinner'; @@ -172,6 +176,10 @@ function FullView({ const isListView = widget.panelTypes === PANEL_TYPES.LIST; + const isTablePanel = widget.panelTypes === PANEL_TYPES.TABLE; + + const [searchTerm, setSearchTerm] = useState(''); + if (response.isLoading && widget.panelTypes !== PANEL_TYPES.LIST) { return ; } @@ -216,6 +224,18 @@ function FullView({ }} isGraphLegendToggleAvailable={canModifyChart} > + {isTablePanel && ( + } + className="global-search" + placeholder="Search..." + allowClear + key={widget.id} + onChange={(e): void => { + setSearchTerm(e.target.value || ''); + }} + /> + )}
diff --git a/frontend/src/container/GridCardLayout/GridCard/WidgetGraphComponent.tsx b/frontend/src/container/GridCardLayout/GridCard/WidgetGraphComponent.tsx index b76c7c9f73..4d5c7fa94c 100644 --- a/frontend/src/container/GridCardLayout/GridCard/WidgetGraphComponent.tsx +++ b/frontend/src/container/GridCardLayout/GridCard/WidgetGraphComponent.tsx @@ -234,6 +234,8 @@ function WidgetGraphComponent({ }); }; + const [searchTerm, setSearchTerm] = useState(''); + const loadingState = (queryResponse.isLoading || queryResponse.status === 'idle') && widget.panelTypes !== PANEL_TYPES.LIST; @@ -317,6 +319,7 @@ function WidgetGraphComponent({ isWarning={isWarning} isFetchingResponse={isFetchingResponse} tableProcessedDataRef={tableProcessedDataRef} + setSearchTerm={setSearchTerm} /> {queryResponse.isLoading && widget.panelTypes !== PANEL_TYPES.LIST && ( @@ -337,6 +340,7 @@ function WidgetGraphComponent({ onDragSelect={onDragSelect} tableProcessedDataRef={tableProcessedDataRef} customTooltipElement={customTooltipElement} + searchTerm={searchTerm} /> )} diff --git a/frontend/src/container/GridCardLayout/WidgetHeader/WidgetHeader.styles.scss b/frontend/src/container/GridCardLayout/WidgetHeader/WidgetHeader.styles.scss index 2fcb3e8e6f..11659e9a3e 100644 --- a/frontend/src/container/GridCardLayout/WidgetHeader/WidgetHeader.styles.scss +++ b/frontend/src/container/GridCardLayout/WidgetHeader/WidgetHeader.styles.scss @@ -2,7 +2,7 @@ display: flex; justify-content: space-between; align-items: center; - height: 30px; + height: 36px; width: 100%; padding: 0.5rem; box-sizing: border-box; @@ -10,6 +10,14 @@ font-weight: 600; cursor: move; + + .ant-input-group-addon { + border: none; + background-color: var(--bg-ink-500); + } + .search-header-icons { + cursor: pointer; + } } .widget-header-title { @@ -19,6 +27,7 @@ .widget-header-actions { display: flex; align-items: center; + gap: 8px; } .widget-header-more-options { visibility: hidden; @@ -30,6 +39,10 @@ padding: 8px; } +.widget-header-more-options-visible { + visibility: visible; +} + .widget-header-hover { visibility: visible; } @@ -37,3 +50,11 @@ .widget-api-actions { padding-right: 0.25rem; } + +.lightMode { + .widget-header-container { + .ant-input-group-addon { + background-color: inherit; + } + } +} diff --git a/frontend/src/container/GridCardLayout/WidgetHeader/index.tsx b/frontend/src/container/GridCardLayout/WidgetHeader/index.tsx index 7daa4e553d..d4aa6a4c09 100644 --- a/frontend/src/container/GridCardLayout/WidgetHeader/index.tsx +++ b/frontend/src/container/GridCardLayout/WidgetHeader/index.tsx @@ -9,9 +9,10 @@ import { ExclamationCircleOutlined, FullscreenOutlined, MoreOutlined, + SearchOutlined, WarningOutlined, } from '@ant-design/icons'; -import { Dropdown, MenuProps, Tooltip, Typography } from 'antd'; +import { Dropdown, Input, MenuProps, Tooltip, Typography } from 'antd'; import Spinner from 'components/Spinner'; import { QueryParams } from 'constants/query'; import { PANEL_TYPES } from 'constants/queryBuilder'; @@ -20,8 +21,9 @@ import useComponentPermission from 'hooks/useComponentPermission'; import history from 'lib/history'; import { RowData } from 'lib/query/createTableColumnsFromQuery'; import { isEmpty } from 'lodash-es'; +import { X } from 'lucide-react'; import { unparse } from 'papaparse'; -import { ReactNode, useCallback, useMemo } from 'react'; +import { ReactNode, useCallback, useMemo, useState } from 'react'; import { UseQueryResult } from 'react-query'; import { useSelector } from 'react-redux'; import { AppState } from 'store/reducers'; @@ -51,6 +53,7 @@ interface IWidgetHeaderProps { isWarning: boolean; isFetchingResponse: boolean; tableProcessedDataRef: React.MutableRefObject; + setSearchTerm: React.Dispatch>; } function WidgetHeader({ @@ -67,6 +70,7 @@ function WidgetHeader({ isWarning, isFetchingResponse, tableProcessedDataRef, + setSearchTerm, }: IWidgetHeaderProps): JSX.Element | null { const onEditHandler = useCallback((): void => { const widgetId = widget.id; @@ -187,6 +191,10 @@ function WidgetHeader({ const updatedMenuList = useMemo(() => generateMenuList(actions), [actions]); + const [showGlobalSearch, setShowGlobalSearch] = useState(false); + + const globalSearchAvailable = widget.panelTypes === PANEL_TYPES.TABLE; + const menu = useMemo( () => ({ items: updatedMenuList, @@ -201,46 +209,80 @@ function WidgetHeader({ return (
- - {title} - -
-
{threshold}
- {isFetchingResponse && !queryResponse.isError && ( - - )} - {queryResponse.isError && ( - } + placeholder="Search..." + bordered={false} + data-testid="widget-header-search-input" + autoFocus + addonAfter={ + { + e.stopPropagation(); + e.preventDefault(); + setShowGlobalSearch(false); + }} + className="search-header-icons" + /> + } + key={widget.id} + onChange={(e): void => { + setSearchTerm(e.target.value || ''); + }} + /> + ) : ( + <> + - - - )} - - {isWarning && ( - - - - )} - - - -
+ {title} + +
+
{threshold}
+ {isFetchingResponse && !queryResponse.isError && ( + + )} + {queryResponse.isError && ( + + + + )} + + {isWarning && ( + + + + )} + {globalSearchAvailable && ( + setShowGlobalSearch(true)} + data-testid="widget-header-search" + /> + )} + + + +
+ + )}
); } diff --git a/frontend/src/container/GridTableComponent/types.ts b/frontend/src/container/GridTableComponent/types.ts index 6088f9dcb8..883e280b38 100644 --- a/frontend/src/container/GridTableComponent/types.ts +++ b/frontend/src/container/GridTableComponent/types.ts @@ -14,6 +14,7 @@ export type GridTableComponentProps = { columnUnits?: ColumnUnit; tableProcessedDataRef?: React.MutableRefObject; sticky?: TableProps['sticky']; + searchTerm?: string; } & Pick & Omit, 'columns' | 'dataSource'>; diff --git a/frontend/src/container/PanelWrapper/PanelWrapper.tsx b/frontend/src/container/PanelWrapper/PanelWrapper.tsx index ed105b3948..2f5b35485e 100644 --- a/frontend/src/container/PanelWrapper/PanelWrapper.tsx +++ b/frontend/src/container/PanelWrapper/PanelWrapper.tsx @@ -16,6 +16,7 @@ function PanelWrapper({ selectedGraph, tableProcessedDataRef, customTooltipElement, + searchTerm, }: PanelWrapperProps): JSX.Element { const Component = PanelTypeVsPanelWrapper[ selectedGraph || widget.panelTypes @@ -39,6 +40,7 @@ function PanelWrapper({ selectedGraph={selectedGraph} tableProcessedDataRef={tableProcessedDataRef} customTooltipElement={customTooltipElement} + searchTerm={searchTerm} /> ); } diff --git a/frontend/src/container/PanelWrapper/TablePanelWrapper.tsx b/frontend/src/container/PanelWrapper/TablePanelWrapper.tsx index 0eab4143a2..c5222e8d53 100644 --- a/frontend/src/container/PanelWrapper/TablePanelWrapper.tsx +++ b/frontend/src/container/PanelWrapper/TablePanelWrapper.tsx @@ -8,6 +8,7 @@ function TablePanelWrapper({ widget, queryResponse, tableProcessedDataRef, + searchTerm, }: PanelWrapperProps): JSX.Element { const panelData = (queryResponse.data?.payload?.data?.result?.[0] as any)?.table || []; @@ -20,6 +21,7 @@ function TablePanelWrapper({ columnUnits={widget.columnUnits} tableProcessedDataRef={tableProcessedDataRef} sticky={widget.panelTypes === PANEL_TYPES.TABLE} + searchTerm={searchTerm} // eslint-disable-next-line react/jsx-props-no-spreading {...GRID_TABLE_CONFIG} /> diff --git a/frontend/src/container/PanelWrapper/panelWrapper.types.ts b/frontend/src/container/PanelWrapper/panelWrapper.types.ts index 7d5e3122e8..4778ffdb97 100644 --- a/frontend/src/container/PanelWrapper/panelWrapper.types.ts +++ b/frontend/src/container/PanelWrapper/panelWrapper.types.ts @@ -23,6 +23,7 @@ export type PanelWrapperProps = { onDragSelect: (start: number, end: number) => void; selectedGraph?: PANEL_TYPES; tableProcessedDataRef?: React.MutableRefObject; + searchTerm?: string; customTooltipElement?: HTMLDivElement; }; diff --git a/frontend/src/container/QueryTable/QueryTable.intefaces.ts b/frontend/src/container/QueryTable/QueryTable.intefaces.ts index 7576d796ec..254e4885e7 100644 --- a/frontend/src/container/QueryTable/QueryTable.intefaces.ts +++ b/frontend/src/container/QueryTable/QueryTable.intefaces.ts @@ -19,4 +19,5 @@ export type QueryTableProps = Omit< columns?: ColumnsType; dataSource?: RowData[]; sticky?: TableProps['sticky']; + searchTerm?: string; }; diff --git a/frontend/src/container/QueryTable/QueryTable.tsx b/frontend/src/container/QueryTable/QueryTable.tsx index 1786e5d4e3..e438070173 100644 --- a/frontend/src/container/QueryTable/QueryTable.tsx +++ b/frontend/src/container/QueryTable/QueryTable.tsx @@ -3,8 +3,11 @@ import './QueryTable.styles.scss'; import { ResizeTable } from 'components/ResizeTable'; import Download from 'container/Download/Download'; import { IServiceName } from 'container/MetricsApplication/Tabs/types'; -import { createTableColumnsFromQuery } from 'lib/query/createTableColumnsFromQuery'; -import { useMemo } from 'react'; +import { + createTableColumnsFromQuery, + RowData, +} from 'lib/query/createTableColumnsFromQuery'; +import { useCallback, useEffect, useMemo, useState } from 'react'; import { useParams } from 'react-router-dom'; import { QueryTableProps } from './QueryTable.intefaces'; @@ -20,6 +23,7 @@ export function QueryTable({ columns, dataSource, sticky, + searchTerm, ...props }: QueryTableProps): JSX.Element { const { isDownloadEnabled = false, fileName = '' } = downloadOption || {}; @@ -55,6 +59,27 @@ export function QueryTable({ hideOnSinglePage: true, }; + const [filterTable, setFilterTable] = useState(null); + + const onTableSearch = useCallback( + (value?: string): void => { + const filterTable = newDataSource.filter((o) => + Object.keys(o).some((k) => + String(o[k]) + .toLowerCase() + .includes(value?.toLowerCase() || ''), + ), + ); + + setFilterTable(filterTable); + }, + [newDataSource], + ); + + useEffect(() => { + onTableSearch(searchTerm); + }, [newDataSource, onTableSearch, searchTerm]); + return (
{isDownloadEnabled && ( @@ -69,7 +94,7 @@ export function QueryTable({ ({ + ...jest.requireActual('react-router-dom'), + useLocation: (): { pathname: string } => ({ + pathname: ``, + }), +})); + +// Mock useDashabord hook +jest.mock('providers/Dashboard/Dashboard', () => ({ + useDashboard: (): any => ({ + selectedDashboard: { + data: { + variables: [], + }, + }, + }), +})); + +describe('QueryTable -', () => { + it('should render correctly with all the data rows', () => { + const { container } = render(); + const tableRows = container.querySelectorAll('tr.ant-table-row'); + expect(tableRows.length).toBe(QueryTableProps.queryTableData.rows.length); + }); + + it('should render correctly with searchTerm', () => { + const { container } = render( + , + ); + const tableRows = container.querySelectorAll('tr.ant-table-row'); + expect(tableRows.length).toBe(3); + }); +}); + +const setSearchTerm = jest.fn(); +describe('WidgetHeader -', () => { + it('global search option should be working', () => { + const { getByText, getByTestId } = render( + , + ); + expect(getByText('Table - Panel')).toBeInTheDocument(); + const searchWidget = getByTestId('widget-header-search'); + expect(searchWidget).toBeInTheDocument(); + // click and open the search input + fireEvent.click(searchWidget); + // check if input is opened + const searchInput = getByTestId('widget-header-search-input'); + expect(searchInput).toBeInTheDocument(); + + // enter search term + fireEvent.change(searchInput, { target: { value: 'frontend' } }); + // check if search term is set + expect(setSearchTerm).toHaveBeenCalledWith('frontend'); + expect(searchInput).toHaveValue('frontend'); + }); + + it('global search should not be present for non-table panel', () => { + const { queryByTestId } = render( + , + ); + expect(queryByTestId('widget-header-search')).not.toBeInTheDocument(); + }); +}); diff --git a/frontend/src/container/QueryTable/__test__/mocks.ts b/frontend/src/container/QueryTable/__test__/mocks.ts new file mode 100644 index 0000000000..abdb7bcfe3 --- /dev/null +++ b/frontend/src/container/QueryTable/__test__/mocks.ts @@ -0,0 +1,797 @@ +/* eslint-disable sonarjs/no-duplicate-string */ +export const QueryTableProps: any = { + props: { + loading: false, + size: 'small', + }, + queryTableData: { + columns: [ + { + name: 'resource_host_name', + queryName: '', + isValueColumn: false, + }, + { + name: 'service_name', + queryName: '', + isValueColumn: false, + }, + { + name: 'operation', + queryName: '', + isValueColumn: false, + }, + { + name: 'A', + queryName: 'A', + isValueColumn: true, + }, + ], + rows: [ + { + data: { + A: 11.5, + operation: 'GetDriver', + resource_host_name: 'test-hs-name', + service_name: 'redis', + }, + }, + { + data: { + A: 10.13, + operation: 'HTTP GET', + resource_host_name: 'test-hs-name', + service_name: 'frontend', + }, + }, + { + data: { + A: 9.21, + operation: 'HTTP GET /route', + resource_host_name: 'test-hs-name', + service_name: 'route', + }, + }, + { + data: { + A: 9.21, + operation: 'HTTP GET: /route', + resource_host_name: 'test-hs-name', + service_name: 'frontend', + }, + }, + { + data: { + A: 0.92, + operation: 'HTTP GET: /customer', + resource_host_name: 'test-hs-name', + service_name: 'frontend', + }, + }, + { + data: { + A: 0.92, + operation: 'SQL SELECT', + resource_host_name: 'test-hs-name', + service_name: 'mysql', + }, + }, + { + data: { + A: 0.92, + operation: 'HTTP GET /customer', + resource_host_name: 'test-hs-name', + service_name: 'customer', + }, + }, + ], + }, + query: { + builder: { + queryData: [ + { + aggregateAttribute: { + dataType: 'float64', + id: 'signoz_calls_total--float64--Sum--true', + isColumn: true, + isJSON: false, + key: 'signoz_calls_total', + type: 'Sum', + }, + aggregateOperator: 'rate', + dataSource: 'metrics', + disabled: false, + expression: 'A', + filters: { + items: [], + op: 'AND', + }, + functions: [], + groupBy: [ + { + dataType: 'string', + id: 'resource_host_name--string--tag--false', + isColumn: false, + isJSON: false, + key: 'resource_host_name', + type: 'tag', + }, + { + dataType: 'string', + id: 'service_name--string--tag--false', + isColumn: false, + isJSON: false, + key: 'service_name', + type: 'tag', + }, + { + dataType: 'string', + id: 'operation--string--tag--false', + isColumn: false, + isJSON: false, + key: 'operation', + type: 'tag', + }, + ], + having: [], + legend: '', + limit: null, + orderBy: [], + queryName: 'A', + reduceTo: 'avg', + spaceAggregation: 'sum', + stepInterval: 60, + timeAggregation: 'rate', + }, + ], + queryFormulas: [], + }, + clickhouse_sql: [ + { + disabled: false, + legend: '', + name: 'A', + query: '', + }, + ], + id: '1e08128f-c6a3-42ff-8033-4e38d291cf0a', + promql: [ + { + disabled: false, + legend: '', + name: 'A', + query: '', + }, + ], + queryType: 'builder', + }, + columns: [ + { + dataIndex: 'resource_host_name', + title: 'resource_host_name', + width: 145, + }, + { + dataIndex: 'service_name', + title: 'service_name', + width: 145, + }, + { + dataIndex: 'operation', + title: 'operation', + width: 145, + }, + { + dataIndex: 'A', + title: 'A', + width: 145, + }, + ], + dataSource: [ + { + A: 11.5, + operation: 'GetDriver', + resource_host_name: 'test-hs-name', + service_name: 'redis', + }, + { + A: 10.13, + operation: 'HTTP GET', + resource_host_name: 'test-hs-name', + service_name: 'frontend', + }, + { + A: 9.21, + operation: 'HTTP GET /route', + resource_host_name: 'test-hs-name', + service_name: 'route', + }, + { + A: 9.21, + operation: 'HTTP GET: /route', + resource_host_name: 'test-hs-name', + service_name: 'frontend', + }, + { + A: 0.92, + operation: 'HTTP GET: /customer', + resource_host_name: 'test-hs-name', + service_name: 'frontend', + }, + { + A: 0.92, + operation: 'SQL SELECT', + resource_host_name: 'test-hs-name', + service_name: 'mysql', + }, + { + A: 0.92, + operation: 'HTTP GET /customer', + resource_host_name: 'test-hs-name', + service_name: 'customer', + }, + ], + sticky: true, + searchTerm: '', +}; + +export const WidgetHeaderProps: any = { + title: 'Table - Panel', + widget: { + bucketCount: 30, + bucketWidth: 0, + columnUnits: {}, + description: '', + fillSpans: false, + id: 'add65f0d-7662-4024-af51-da567759235d', + isStacked: false, + mergeAllActiveQueries: false, + nullZeroValues: 'zero', + opacity: '1', + panelTypes: 'table', + query: { + builder: { + queryData: [ + { + aggregateAttribute: { + dataType: 'float64', + id: 'signoz_calls_total--float64--Sum--true', + isColumn: true, + isJSON: false, + key: 'signoz_calls_total', + type: 'Sum', + }, + aggregateOperator: 'rate', + dataSource: 'metrics', + disabled: false, + expression: 'A', + filters: { + items: [], + op: 'AND', + }, + functions: [], + groupBy: [ + { + dataType: 'string', + id: 'resource_host_name--string--tag--false', + isColumn: false, + isJSON: false, + key: 'resource_host_name', + type: 'tag', + }, + { + dataType: 'string', + id: 'service_name--string--tag--false', + isColumn: false, + isJSON: false, + key: 'service_name', + type: 'tag', + }, + { + dataType: 'string', + id: 'operation--string--tag--false', + isColumn: false, + isJSON: false, + key: 'operation', + type: 'tag', + }, + ], + having: [], + legend: '', + limit: null, + orderBy: [], + queryName: 'A', + reduceTo: 'avg', + spaceAggregation: 'sum', + stepInterval: 60, + timeAggregation: 'rate', + }, + ], + queryFormulas: [], + }, + clickhouse_sql: [ + { + disabled: false, + legend: '', + name: 'A', + query: '', + }, + ], + id: '1e08128f-c6a3-42ff-8033-4e38d291cf0a', + promql: [ + { + disabled: false, + legend: '', + name: 'A', + query: '', + }, + ], + queryType: 'builder', + }, + selectedLogFields: [ + { + dataType: 'string', + name: 'body', + type: '', + }, + { + dataType: 'string', + name: 'timestamp', + type: '', + }, + ], + selectedTracesFields: [ + { + dataType: 'string', + id: 'serviceName--string--tag--true', + isColumn: true, + isJSON: false, + key: 'serviceName', + type: 'tag', + }, + { + dataType: 'string', + id: 'name--string--tag--true', + isColumn: true, + isJSON: false, + key: 'name', + type: 'tag', + }, + { + dataType: 'float64', + id: 'durationNano--float64--tag--true', + isColumn: true, + isJSON: false, + key: 'durationNano', + type: 'tag', + }, + { + dataType: 'string', + id: 'httpMethod--string--tag--true', + isColumn: true, + isJSON: false, + key: 'httpMethod', + type: 'tag', + }, + { + dataType: 'string', + id: 'responseStatusCode--string--tag--true', + isColumn: true, + isJSON: false, + key: 'responseStatusCode', + type: 'tag', + }, + ], + softMax: 0, + softMin: 0, + stackedBarChart: false, + thresholds: [], + timePreferance: 'GLOBAL_TIME', + title: 'Table - Panel', + yAxisUnit: 'none', + }, + parentHover: false, + queryResponse: { + status: 'success', + isLoading: false, + isSuccess: true, + isError: false, + isIdle: false, + data: { + statusCode: 200, + error: null, + message: 'success', + payload: { + status: 'success', + data: { + resultType: '', + result: [ + { + table: { + columns: [ + { + name: 'resource_host_name', + queryName: '', + isValueColumn: false, + }, + { + name: 'service_name', + queryName: '', + isValueColumn: false, + }, + { + name: 'operation', + queryName: '', + isValueColumn: false, + }, + { + name: 'A', + queryName: 'A', + isValueColumn: true, + }, + ], + rows: [ + { + data: { + A: 11.67, + operation: 'GetDriver', + resource_host_name: '4f6ec470feea', + service_name: 'redis', + }, + }, + { + data: { + A: 10.26, + operation: 'HTTP GET', + resource_host_name: '4f6ec470feea', + service_name: 'frontend', + }, + }, + { + data: { + A: 9.33, + operation: 'HTTP GET: /route', + resource_host_name: '4f6ec470feea', + service_name: 'frontend', + }, + }, + { + data: { + A: 9.33, + operation: 'HTTP GET /route', + resource_host_name: '4f6ec470feea', + service_name: 'route', + }, + }, + { + data: { + A: 0.93, + operation: 'FindDriverIDs', + resource_host_name: '4f6ec470feea', + service_name: 'redis', + }, + }, + { + data: { + A: 0.93, + operation: 'HTTP GET: /customer', + resource_host_name: '4f6ec470feea', + service_name: 'frontend', + }, + }, + { + data: { + A: 0.93, + operation: '/driver.DriverService/FindNearest', + resource_host_name: '4f6ec470feea', + service_name: 'driver', + }, + }, + { + data: { + A: 0.93, + operation: '/driver.DriverService/FindNearest', + resource_host_name: '4f6ec470feea', + service_name: 'frontend', + }, + }, + { + data: { + A: 0.93, + operation: 'SQL SELECT', + resource_host_name: '4f6ec470feea', + service_name: 'mysql', + }, + }, + { + data: { + A: 0.93, + operation: 'HTTP GET /customer', + resource_host_name: '4f6ec470feea', + service_name: 'customer', + }, + }, + { + data: { + A: 0.93, + operation: 'HTTP GET /dispatch', + resource_host_name: '4f6ec470feea', + service_name: 'frontend', + }, + }, + { + data: { + A: 0.21, + operation: 'check_request limit', + resource_host_name: '', + service_name: 'demo-app', + }, + }, + { + data: { + A: 0.21, + operation: 'authenticate_check_cache', + resource_host_name: '', + service_name: 'demo-app', + }, + }, + { + data: { + A: 0.21, + operation: 'authenticate_check_db', + resource_host_name: '', + service_name: 'demo-app', + }, + }, + { + data: { + A: 0.21, + operation: 'authenticate', + resource_host_name: '', + service_name: 'demo-app', + }, + }, + { + data: { + A: 0.21, + operation: 'check cart in cache', + resource_host_name: '', + service_name: 'demo-app', + }, + }, + { + data: { + A: 0.2, + operation: 'get_cart', + resource_host_name: '', + service_name: 'demo-app', + }, + }, + { + data: { + A: 0.2, + operation: 'check cart in db', + resource_host_name: '', + service_name: 'demo-app', + }, + }, + { + data: { + A: 0.2, + operation: 'home', + resource_host_name: '', + service_name: 'demo-app', + }, + }, + ], + }, + }, + ], + }, + }, + params: { + start: 1726669030000, + end: 1726670830000, + step: 60, + variables: {}, + formatForWeb: true, + compositeQuery: { + queryType: 'builder', + panelType: 'table', + fillGaps: false, + builderQueries: { + A: { + aggregateAttribute: { + dataType: 'float64', + id: 'signoz_calls_total--float64--Sum--true', + isColumn: true, + isJSON: false, + key: 'signoz_calls_total', + type: 'Sum', + }, + aggregateOperator: 'rate', + dataSource: 'metrics', + disabled: false, + expression: 'A', + filters: { + items: [], + op: 'AND', + }, + functions: [], + groupBy: [ + { + dataType: 'string', + id: 'resource_host_name--string--tag--false', + isColumn: false, + isJSON: false, + key: 'resource_host_name', + type: 'tag', + }, + { + dataType: 'string', + id: 'service_name--string--tag--false', + isColumn: false, + isJSON: false, + key: 'service_name', + type: 'tag', + }, + { + dataType: 'string', + id: 'operation--string--tag--false', + isColumn: false, + isJSON: false, + key: 'operation', + type: 'tag', + }, + ], + having: [], + legend: '', + limit: null, + orderBy: [], + queryName: 'A', + reduceTo: 'avg', + spaceAggregation: 'sum', + stepInterval: 60, + timeAggregation: 'rate', + }, + }, + }, + }, + }, + dataUpdatedAt: 1726670830710, + error: null, + errorUpdatedAt: 0, + failureCount: 0, + errorUpdateCount: 0, + isFetched: true, + isFetchedAfterMount: true, + isFetching: false, + isRefetching: false, + isLoadingError: false, + isPlaceholderData: false, + isPreviousData: false, + isRefetchError: false, + isStale: true, + }, + headerMenuList: ['view', 'clone', 'delete', 'edit'], + isWarning: false, + isFetchingResponse: false, + tableProcessedDataRef: { + current: [ + { + resource_host_name: '4f6ec470feea', + service_name: 'redis', + operation: 'GetDriver', + A: 11.67, + }, + { + resource_host_name: '4f6ec470feea', + service_name: 'frontend', + operation: 'HTTP GET', + A: 10.26, + }, + { + resource_host_name: '4f6ec470feea', + service_name: 'frontend', + operation: 'HTTP GET: /route', + A: 9.33, + }, + { + resource_host_name: '4f6ec470feea', + service_name: 'route', + operation: 'HTTP GET /route', + A: 9.33, + }, + { + resource_host_name: '4f6ec470feea', + service_name: 'redis', + operation: 'FindDriverIDs', + A: 0.93, + }, + { + resource_host_name: '4f6ec470feea', + service_name: 'frontend', + operation: 'HTTP GET: /customer', + A: 0.93, + }, + { + resource_host_name: '4f6ec470feea', + service_name: 'driver', + operation: '/driver.DriverService/FindNearest', + A: 0.93, + }, + { + resource_host_name: '4f6ec470feea', + service_name: 'frontend', + operation: '/driver.DriverService/FindNearest', + A: 0.93, + }, + { + resource_host_name: '4f6ec470feea', + service_name: 'mysql', + operation: 'SQL SELECT', + A: 0.93, + }, + { + resource_host_name: '4f6ec470feea', + service_name: 'customer', + operation: 'HTTP GET /customer', + A: 0.93, + }, + { + resource_host_name: '4f6ec470feea', + service_name: 'frontend', + operation: 'HTTP GET /dispatch', + A: 0.93, + }, + { + resource_host_name: '', + service_name: 'demo-app', + operation: 'check_request limit', + A: 0.21, + }, + { + resource_host_name: '', + service_name: 'demo-app', + operation: 'authenticate_check_cache', + A: 0.21, + }, + { + resource_host_name: '', + service_name: 'demo-app', + operation: 'authenticate_check_db', + A: 0.21, + }, + { + resource_host_name: '', + service_name: 'demo-app', + operation: 'authenticate', + A: 0.21, + }, + { + resource_host_name: '', + service_name: 'demo-app', + operation: 'check cart in cache', + A: 0.21, + }, + { + resource_host_name: '', + service_name: 'demo-app', + operation: 'get_cart', + A: 0.2, + }, + { + resource_host_name: '', + service_name: 'demo-app', + operation: 'check cart in db', + A: 0.2, + }, + { + resource_host_name: '', + service_name: 'demo-app', + operation: 'home', + A: 0.2, + }, + ], + }, +}; From f6d3f95768385f43694c5ea1c02ac701c4e79711 Mon Sep 17 00:00:00 2001 From: Nityananda Gohain Date: Fri, 20 Sep 2024 18:02:33 +0530 Subject: [PATCH 52/79] fix: tlemetry for dashboard/alerts/views using contains on attributes (#6034) * fix: tlemetry for dashboard/alerts/views using contains on attributes * fix: update how telemetry is collected for logs * fix: revert constands * fix: check assertion for operator --- pkg/query-service/app/dashboards/model.go | 61 +++++++++++++++++++---- pkg/query-service/app/explorer/db.go | 13 +++++ pkg/query-service/model/response.go | 4 ++ pkg/query-service/rules/db.go | 28 +++++++++-- pkg/query-service/telemetry/telemetry.go | 3 ++ 5 files changed, 94 insertions(+), 15 deletions(-) diff --git a/pkg/query-service/app/dashboards/model.go b/pkg/query-service/app/dashboards/model.go index 989d266b51..21d39fcbe4 100644 --- a/pkg/query-service/app/dashboards/model.go +++ b/pkg/query-service/app/dashboards/model.go @@ -6,6 +6,7 @@ import ( "encoding/json" "fmt" "regexp" + "slices" "strings" "time" @@ -453,7 +454,6 @@ func GetDashboardsInfo(ctx context.Context) (*model.DashboardsInfo, error) { totalDashboardsWithPanelAndName := 0 var dashboardNames []string count := 0 - logChQueriesCount := 0 for _, dashboard := range dashboardsData { if isDashboardWithPanelAndName(dashboard.Data) { totalDashboardsWithPanelAndName = totalDashboardsWithPanelAndName + 1 @@ -466,19 +466,18 @@ func GetDashboardsInfo(ctx context.Context) (*model.DashboardsInfo, error) { dashboardsInfo.LogsBasedPanels += dashboardInfo.LogsBasedPanels dashboardsInfo.TracesBasedPanels += dashboardInfo.TracesBasedPanels dashboardsInfo.MetricBasedPanels += dashboardsInfo.MetricBasedPanels + dashboardsInfo.LogsPanelsWithAttrContainsOp += dashboardInfo.LogsPanelsWithAttrContainsOp + dashboardsInfo.DashboardsWithLogsChQuery += dashboardInfo.DashboardsWithLogsChQuery if isDashboardWithTSV2(dashboard.Data) { count = count + 1 } - if isDashboardWithLogsClickhouseQuery(dashboard.Data) { - logChQueriesCount = logChQueriesCount + 1 - } + // check if dashboard is a has a log operator with contains } dashboardsInfo.DashboardNames = dashboardNames dashboardsInfo.TotalDashboards = len(dashboardsData) dashboardsInfo.TotalDashboardsWithPanelAndName = totalDashboardsWithPanelAndName dashboardsInfo.QueriesWithTSV2 = count - dashboardsInfo.DashboardsWithLogsChQuery = logChQueriesCount return &dashboardsInfo, nil } @@ -495,8 +494,8 @@ func isDashboardWithLogsClickhouseQuery(data map[string]interface{}) bool { if err != nil { return false } - result := strings.Contains(string(jsonData), "signoz_logs.distributed_logs ") || - strings.Contains(string(jsonData), "signoz_logs.logs ") + result := strings.Contains(string(jsonData), "signoz_logs.distributed_logs") || + strings.Contains(string(jsonData), "signoz_logs.logs") return result } @@ -532,11 +531,38 @@ func extractDashboardName(data map[string]interface{}) string { return "" } -func countPanelsInDashboard(data map[string]interface{}) model.DashboardsInfo { - var logsPanelCount, tracesPanelCount, metricsPanelCount int +func checkLogPanelAttrContains(data map[string]interface{}) int { + var logsPanelsWithAttrContains int + filters, ok := data["filters"].(map[string]interface{}) + if ok && filters["items"] != nil { + items, ok := filters["items"].([]interface{}) + if ok { + for _, item := range items { + itemMap, ok := item.(map[string]interface{}) + if ok { + opStr, ok := itemMap["op"].(string) + if ok { + if slices.Contains([]string{"contains", "ncontains", "like", "nlike"}, opStr) { + // check if it's not body + key, ok := itemMap["key"].(map[string]string) + if ok && key["key"] != "body" { + logsPanelsWithAttrContains++ + } + } + } + } + } + } + } + return logsPanelsWithAttrContains +} + +func countPanelsInDashboard(inputData map[string]interface{}) model.DashboardsInfo { + var logsPanelCount, tracesPanelCount, metricsPanelCount, logsPanelsWithAttrContains int + var logChQuery bool // totalPanels := 0 - if data != nil && data["widgets"] != nil { - widgets, ok := data["widgets"] + if inputData != nil && inputData["widgets"] != nil { + widgets, ok := inputData["widgets"] if ok { data, ok := widgets.([]interface{}) if ok { @@ -559,20 +585,33 @@ func countPanelsInDashboard(data map[string]interface{}) model.DashboardsInfo { metricsPanelCount++ } else if data["dataSource"] == "logs" { logsPanelCount++ + logsPanelsWithAttrContains += checkLogPanelAttrContains(data) } } } } } + } else if ok && query["queryType"] == "clickhouse_sql" && query["clickhouse_sql"] != nil { + if isDashboardWithLogsClickhouseQuery(inputData) { + logChQuery = true + } } } } } } } + + logChQueryCount := 0 + if logChQuery { + logChQueryCount = 1 + } return model.DashboardsInfo{ LogsBasedPanels: logsPanelCount, TracesBasedPanels: tracesPanelCount, MetricBasedPanels: metricsPanelCount, + + DashboardsWithLogsChQuery: logChQueryCount, + LogsPanelsWithAttrContainsOp: logsPanelsWithAttrContains, } } diff --git a/pkg/query-service/app/explorer/db.go b/pkg/query-service/app/explorer/db.go index 140b0b48d8..c53345e65b 100644 --- a/pkg/query-service/app/explorer/db.go +++ b/pkg/query-service/app/explorer/db.go @@ -4,6 +4,7 @@ import ( "context" "encoding/json" "fmt" + "slices" "strings" "time" @@ -247,6 +248,18 @@ func GetSavedViewsInfo(ctx context.Context) (*model.SavedViewsInfo, error) { savedViewsInfo.TracesSavedViews += 1 } else if view.SourcePage == "logs" { savedViewsInfo.LogsSavedViews += 1 + + for _, query := range view.CompositeQuery.BuilderQueries { + if query.Filters != nil { + for _, item := range query.Filters.Items { + if slices.Contains([]string{"contains", "ncontains", "like", "nlike"}, string(item.Operator)) { + if item.Key.Key != "body" { + savedViewsInfo.LogsSavedViewWithContainsOp += 1 + } + } + } + } + } } } return &savedViewsInfo, nil diff --git a/pkg/query-service/model/response.go b/pkg/query-service/model/response.go index 03e538879c..3a720aed5e 100644 --- a/pkg/query-service/model/response.go +++ b/pkg/query-service/model/response.go @@ -632,12 +632,15 @@ type AlertsInfo struct { AlertNames []string `json:"alertNames"` AlertsWithTSV2 int `json:"alertsWithTSv2"` AlertsWithLogsChQuery int `json:"alertsWithLogsChQuery"` + AlertsWithLogsContainsOp int `json:"alertsWithLogsContainsOp"` } type SavedViewsInfo struct { TotalSavedViews int `json:"totalSavedViews"` TracesSavedViews int `json:"tracesSavedViews"` LogsSavedViews int `json:"logsSavedViews"` + + LogsSavedViewWithContainsOp int `json:"logsSavedViewWithContainsOp"` } type DashboardsInfo struct { @@ -649,6 +652,7 @@ type DashboardsInfo struct { DashboardNames []string `json:"dashboardNames"` QueriesWithTSV2 int `json:"queriesWithTSV2"` DashboardsWithLogsChQuery int `json:"dashboardsWithLogsChQuery"` + LogsPanelsWithAttrContainsOp int `json:"logsPanelsWithAttrContainsOp"` } type TagTelemetryData struct { diff --git a/pkg/query-service/rules/db.go b/pkg/query-service/rules/db.go index e6f8d6301c..697ea63f92 100644 --- a/pkg/query-service/rules/db.go +++ b/pkg/query-service/rules/db.go @@ -4,6 +4,7 @@ import ( "context" "encoding/json" "fmt" + "slices" "strconv" "strings" "time" @@ -551,10 +552,6 @@ func (r *ruleDB) GetAlertsInfo(ctx context.Context) (*model.AlertsInfo, error) { if strings.Contains(alert, "time_series_v2") { alertsInfo.AlertsWithTSV2 = alertsInfo.AlertsWithTSV2 + 1 } - if strings.Contains(alert, "signoz_logs.distributed_logs") || - strings.Contains(alert, "signoz_logs.logs") { - alertsInfo.AlertsWithLogsChQuery = alertsInfo.AlertsWithLogsChQuery + 1 - } err = json.Unmarshal([]byte(alert), &rule) if err != nil { zap.L().Error("invalid rule data", zap.Error(err)) @@ -563,6 +560,29 @@ func (r *ruleDB) GetAlertsInfo(ctx context.Context) (*model.AlertsInfo, error) { alertNames = append(alertNames, rule.AlertName) if rule.AlertType == AlertTypeLogs { alertsInfo.LogsBasedAlerts = alertsInfo.LogsBasedAlerts + 1 + + if rule.RuleCondition != nil && rule.RuleCondition.CompositeQuery != nil { + if rule.RuleCondition.CompositeQuery.QueryType == v3.QueryTypeClickHouseSQL { + if strings.Contains(alert, "signoz_logs.distributed_logs") || + strings.Contains(alert, "signoz_logs.logs") { + alertsInfo.AlertsWithLogsChQuery = alertsInfo.AlertsWithLogsChQuery + 1 + } + } + } + + for _, query := range rule.RuleCondition.CompositeQuery.BuilderQueries { + if rule.RuleCondition.CompositeQuery.QueryType == v3.QueryTypeBuilder { + if query.Filters != nil { + for _, item := range query.Filters.Items { + if slices.Contains([]string{"contains", "ncontains", "like", "nlike"}, string(item.Operator)) { + if item.Key.Key != "body" { + alertsInfo.AlertsWithLogsContainsOp += 1 + } + } + } + } + } + } } else if rule.AlertType == AlertTypeMetric { alertsInfo.MetricBasedAlerts = alertsInfo.MetricBasedAlerts + 1 if rule.RuleCondition != nil && rule.RuleCondition.CompositeQuery != nil { diff --git a/pkg/query-service/telemetry/telemetry.go b/pkg/query-service/telemetry/telemetry.go index be6ad4719c..62ff020281 100644 --- a/pkg/query-service/telemetry/telemetry.go +++ b/pkg/query-service/telemetry/telemetry.go @@ -333,6 +333,7 @@ func createTelemetry() { "dashboardNames": dashboardsInfo.DashboardNames, "alertNames": alertsInfo.AlertNames, "logsBasedPanels": dashboardsInfo.LogsBasedPanels, + "logsPanelsWithAttrContains": dashboardsInfo.LogsPanelsWithAttrContainsOp, "metricBasedPanels": dashboardsInfo.MetricBasedPanels, "tracesBasedPanels": dashboardsInfo.TracesBasedPanels, "dashboardsWithTSV2": dashboardsInfo.QueriesWithTSV2, @@ -346,6 +347,7 @@ func createTelemetry() { "totalSavedViews": savedViewsInfo.TotalSavedViews, "logsSavedViews": savedViewsInfo.LogsSavedViews, "tracesSavedViews": savedViewsInfo.TracesSavedViews, + "logSavedViewsWithContainsOp": savedViewsInfo.LogsSavedViewWithContainsOp, "slackChannels": alertsInfo.SlackChannels, "webHookChannels": alertsInfo.WebHookChannels, "pagerDutyChannels": alertsInfo.PagerDutyChannels, @@ -357,6 +359,7 @@ func createTelemetry() { "metricsPrometheusQueries": alertsInfo.MetricsPrometheusQueries, "spanMetricsPrometheusQueries": alertsInfo.SpanMetricsPrometheusQueries, "alertsWithLogsChQuery": alertsInfo.AlertsWithLogsChQuery, + "alertsWithLogsContainsOp": alertsInfo.AlertsWithLogsContainsOp, } // send event only if there are dashboards or alerts or channels if (dashboardsInfo.TotalDashboards > 0 || alertsInfo.TotalAlerts > 0 || alertsInfo.TotalChannels > 0 || savedViewsInfo.TotalSavedViews > 0) && apiErr == nil { From 0218f701b2611a62e4a92f3d0cecf22fb4fca4b6 Mon Sep 17 00:00:00 2001 From: Vikrant Gupta Date: Fri, 20 Sep 2024 18:12:16 +0530 Subject: [PATCH 53/79] fix: alerts links are broken when there is a space in value (#6043) * fix: space between values being converted as + sign in alerts generated links * fix: added inline comment * fix: added inline comment --- .../src/hooks/queryBuilder/useGetCompositeQueryParam.ts | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/frontend/src/hooks/queryBuilder/useGetCompositeQueryParam.ts b/frontend/src/hooks/queryBuilder/useGetCompositeQueryParam.ts index 442531a15b..efef00022a 100644 --- a/frontend/src/hooks/queryBuilder/useGetCompositeQueryParam.ts +++ b/frontend/src/hooks/queryBuilder/useGetCompositeQueryParam.ts @@ -13,7 +13,11 @@ export const useGetCompositeQueryParam = (): Query | null => { try { if (!compositeQuery) return null; - parsedCompositeQuery = JSON.parse(decodeURIComponent(compositeQuery)); + // MDN reference - https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/decodeURIComponent#decoding_query_parameters_from_a_url + // MDN reference to support + characters using encoding - https://developer.mozilla.org/en-US/docs/Web/API/URLSearchParams#preserving_plus_signs add later + parsedCompositeQuery = JSON.parse( + decodeURIComponent(compositeQuery.replace(/\+/g, ' ')), + ); } catch (e) { parsedCompositeQuery = null; } From 4aabfe7cf5d87e339a5ecdbda164bf15b9c35df2 Mon Sep 17 00:00:00 2001 From: Vikrant Gupta Date: Fri, 20 Sep 2024 18:13:55 +0530 Subject: [PATCH 54/79] fix: invalidate the cache for the alerts rules post update call (#6046) --- frontend/src/container/FormAlertRules/index.tsx | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/frontend/src/container/FormAlertRules/index.tsx b/frontend/src/container/FormAlertRules/index.tsx index f53a6b2cfe..2947b2a0b3 100644 --- a/frontend/src/container/FormAlertRules/index.tsx +++ b/frontend/src/container/FormAlertRules/index.tsx @@ -370,7 +370,10 @@ function FormAlertRules({ }); // invalidate rule in cache - ruleCache.invalidateQueries([REACT_QUERY_KEY.ALERT_RULE_DETAILS, ruleId]); + ruleCache.invalidateQueries([ + REACT_QUERY_KEY.ALERT_RULE_DETAILS, + `${ruleId}`, + ]); // eslint-disable-next-line sonarjs/no-identical-functions setTimeout(() => { From 033b64a62a1995bac1441b8fd616ec5e06ba29cc Mon Sep 17 00:00:00 2001 From: Srikanth Chekuri Date: Fri, 20 Sep 2024 19:23:01 +0530 Subject: [PATCH 55/79] chore: add support for caching multiple time ranges for cache key (#6008) --- pkg/query-service/app/querier/helper.go | 188 +++------- pkg/query-service/app/querier/querier.go | 219 ++---------- pkg/query-service/app/querier/querier_test.go | 170 ++++++--- pkg/query-service/app/querier/v2/helper.go | 126 ++----- pkg/query-service/app/querier/v2/querier.go | 230 ++---------- .../app/querier/v2/querier_test.go | 174 ++++++--- pkg/query-service/common/query_range.go | 34 ++ pkg/query-service/interfaces/interface.go | 6 + .../querycache/query_range_cache.go | 225 ++++++++++++ .../querycache/query_range_cache_test.go | 336 ++++++++++++++++++ 10 files changed, 964 insertions(+), 744 deletions(-) create mode 100644 pkg/query-service/querycache/query_range_cache.go create mode 100644 pkg/query-service/querycache/query_range_cache_test.go diff --git a/pkg/query-service/app/querier/helper.go b/pkg/query-service/app/querier/helper.go index a4ccfd047a..00b287ce8e 100644 --- a/pkg/query-service/app/querier/helper.go +++ b/pkg/query-service/app/querier/helper.go @@ -2,20 +2,19 @@ package querier import ( "context" - "encoding/json" "fmt" "strings" "sync" - "time" logsV3 "go.signoz.io/signoz/pkg/query-service/app/logs/v3" logsV4 "go.signoz.io/signoz/pkg/query-service/app/logs/v4" metricsV3 "go.signoz.io/signoz/pkg/query-service/app/metrics/v3" tracesV3 "go.signoz.io/signoz/pkg/query-service/app/traces/v3" - "go.signoz.io/signoz/pkg/query-service/cache/status" + "go.signoz.io/signoz/pkg/query-service/common" "go.signoz.io/signoz/pkg/query-service/constants" v3 "go.signoz.io/signoz/pkg/query-service/model/v3" "go.signoz.io/signoz/pkg/query-service/postprocess" + "go.signoz.io/signoz/pkg/query-service/querycache" "go.uber.org/zap" ) @@ -107,7 +106,8 @@ func (q *querier) runBuilderQuery( if builderQuery.DataSource == v3.DataSourceLogs { var query string var err error - if _, ok := cacheKeys[queryName]; !ok { + if _, ok := cacheKeys[queryName]; !ok || params.NoCache { + zap.L().Info("skipping cache for logs query", zap.String("queryName", queryName), zap.Int64("start", start), zap.Int64("end", end), zap.Int64("step", builderQuery.StepInterval), zap.Bool("noCache", params.NoCache), zap.String("cacheKey", cacheKeys[queryName])) query, err = prepareLogsQuery(ctx, q.UseLogsNewSchema, start, end, builderQuery, params, preferRPM) if err != nil { ch <- channelResult{Err: err, Name: queryName, Query: query, Series: nil} @@ -118,21 +118,11 @@ func (q *querier) runBuilderQuery( return } - cacheKey := cacheKeys[queryName] - var cachedData []byte - if !params.NoCache && q.cache != nil { - var retrieveStatus status.RetrieveStatus - data, retrieveStatus, err := q.cache.Retrieve(cacheKey, true) - zap.L().Info("cache retrieve status", zap.String("status", retrieveStatus.String())) - if err == nil { - cachedData = data - } - } - misses, replaceCachedData := q.findMissingTimeRanges(start, end, builderQuery.StepInterval, cachedData) - missedSeries := make([]*v3.Series, 0) - cachedSeries := make([]*v3.Series, 0) + misses := q.queryCache.FindMissingTimeRanges(start, end, builderQuery.StepInterval, cacheKeys[queryName]) + zap.L().Info("cache misses for logs query", zap.Any("misses", misses)) + missedSeries := make([]querycache.CachedSeriesData, 0) for _, miss := range misses { - query, err = prepareLogsQuery(ctx, q.UseLogsNewSchema, miss.start, miss.end, builderQuery, params, preferRPM) + query, err = prepareLogsQuery(ctx, q.UseLogsNewSchema, miss.Start, miss.End, builderQuery, params, preferRPM) if err != nil { ch <- channelResult{Err: err, Name: queryName, Query: query, Series: nil} return @@ -147,48 +137,23 @@ func (q *querier) runBuilderQuery( } return } - missedSeries = append(missedSeries, series...) - } - if err := json.Unmarshal(cachedData, &cachedSeries); err != nil && cachedData != nil { - zap.L().Error("error unmarshalling cached data", zap.Error(err)) - } - mergedSeries := mergeSerieses(cachedSeries, missedSeries) - if replaceCachedData { - mergedSeries = missedSeries - } - - var mergedSeriesData []byte - var marshallingErr error - missedSeriesLen := len(missedSeries) - if missedSeriesLen > 0 && !params.NoCache && q.cache != nil { - // caching the data - mergedSeriesData, marshallingErr = json.Marshal(mergedSeries) - if marshallingErr != nil { - zap.L().Error("error marshalling merged series", zap.Error(marshallingErr)) - } + missedSeries = append(missedSeries, querycache.CachedSeriesData{ + Start: miss.Start, + End: miss.End, + Data: series, + }) } + mergedSeries := q.queryCache.MergeWithCachedSeriesData(cacheKeys[queryName], missedSeries) - // response doesn't need everything - filterCachedPoints(mergedSeries, start, end) + resultSeries := common.GetSeriesFromCachedData(mergedSeries, start, end) ch <- channelResult{ Err: nil, Name: queryName, - Series: mergedSeries, - } - - // Cache the seriesList for future queries - if missedSeriesLen > 0 && !params.NoCache && q.cache != nil && marshallingErr == nil { - // caching the data - err = q.cache.Store(cacheKey, mergedSeriesData, time.Hour) - if err != nil { - zap.L().Error("error storing merged series", zap.Error(err)) - return - } + Series: resultSeries, } return - } if builderQuery.DataSource == v3.DataSourceTraces { @@ -242,7 +207,8 @@ func (q *querier) runBuilderQuery( // What is happening here? // We are only caching the graph panel queries. A non-existant cache key means that the query is not cached. // If the query is not cached, we execute the query and return the result without caching it. - if _, ok := cacheKeys[queryName]; !ok { + if _, ok := cacheKeys[queryName]; !ok || params.NoCache { + zap.L().Info("skipping cache for metrics query", zap.String("queryName", queryName), zap.Int64("start", start), zap.Int64("end", end), zap.Int64("step", builderQuery.StepInterval), zap.Bool("noCache", params.NoCache), zap.String("cacheKey", cacheKeys[queryName])) query, err := metricsV3.PrepareMetricQuery(start, end, params.CompositeQuery.QueryType, params.CompositeQuery.PanelType, builderQuery, metricsV3.Options{PreferRPM: preferRPM}) if err != nil { ch <- channelResult{Err: err, Name: queryName, Query: query, Series: nil} @@ -254,22 +220,13 @@ func (q *querier) runBuilderQuery( } cacheKey := cacheKeys[queryName] - var cachedData []byte - if !params.NoCache && q.cache != nil { - var retrieveStatus status.RetrieveStatus - data, retrieveStatus, err := q.cache.Retrieve(cacheKey, true) - zap.L().Info("cache retrieve status", zap.String("status", retrieveStatus.String())) - if err == nil { - cachedData = data - } - } - misses, replaceCachedData := q.findMissingTimeRanges(start, end, builderQuery.StepInterval, cachedData) - missedSeries := make([]*v3.Series, 0) - cachedSeries := make([]*v3.Series, 0) + misses := q.queryCache.FindMissingTimeRanges(start, end, builderQuery.StepInterval, cacheKey) + zap.L().Info("cache misses for metrics query", zap.Any("misses", misses)) + missedSeries := make([]querycache.CachedSeriesData, 0) for _, miss := range misses { query, err := metricsV3.PrepareMetricQuery( - miss.start, - miss.end, + miss.Start, + miss.End, params.CompositeQuery.QueryType, params.CompositeQuery.PanelType, builderQuery, @@ -294,41 +251,20 @@ func (q *querier) runBuilderQuery( } return } - missedSeries = append(missedSeries, series...) - } - if err := json.Unmarshal(cachedData, &cachedSeries); err != nil && cachedData != nil { - zap.L().Error("error unmarshalling cached data", zap.Error(err)) - } - mergedSeries := mergeSerieses(cachedSeries, missedSeries) - if replaceCachedData { - mergedSeries = missedSeries - } - var mergedSeriesData []byte - var marshallingErr error - missedSeriesLen := len(missedSeries) - if missedSeriesLen > 0 && !params.NoCache && q.cache != nil { - // caching the data - mergedSeriesData, marshallingErr = json.Marshal(mergedSeries) - if marshallingErr != nil { - zap.L().Error("error marshalling merged series", zap.Error(marshallingErr)) - } + missedSeries = append(missedSeries, querycache.CachedSeriesData{ + Start: miss.Start, + End: miss.End, + Data: series, + }) } + mergedSeries := q.queryCache.MergeWithCachedSeriesData(cacheKey, missedSeries) + + resultSeries := common.GetSeriesFromCachedData(mergedSeries, start, end) - // response doesn't need everything - filterCachedPoints(mergedSeries, start, end) ch <- channelResult{ Err: nil, Name: queryName, - Series: mergedSeries, - } - - // Cache the seriesList for future queries - if missedSeriesLen > 0 && !params.NoCache && q.cache != nil && marshallingErr == nil { - err := q.cache.Store(cacheKey, mergedSeriesData, time.Hour) - if err != nil { - zap.L().Error("error storing merged series", zap.Error(err)) - return - } + Series: resultSeries, } } @@ -350,7 +286,8 @@ func (q *querier) runBuilderExpression( return } - if _, ok := cacheKeys[queryName]; !ok { + if _, ok := cacheKeys[queryName]; !ok || params.NoCache { + zap.L().Info("skipping cache for expression query", zap.String("queryName", queryName), zap.Int64("start", params.Start), zap.Int64("end", params.End), zap.Int64("step", params.Step), zap.Bool("noCache", params.NoCache), zap.String("cacheKey", cacheKeys[queryName])) query := queries[queryName] series, err := q.execClickHouseQuery(ctx, query) ch <- channelResult{Err: err, Name: queryName, Query: query, Series: series} @@ -358,23 +295,14 @@ func (q *querier) runBuilderExpression( } cacheKey := cacheKeys[queryName] - var cachedData []byte - if !params.NoCache && q.cache != nil { - var retrieveStatus status.RetrieveStatus - data, retrieveStatus, err := q.cache.Retrieve(cacheKey, true) - zap.L().Info("cache retrieve status", zap.String("status", retrieveStatus.String())) - if err == nil { - cachedData = data - } - } step := postprocess.StepIntervalForFunction(params, queryName) - misses, replaceCachedData := q.findMissingTimeRanges(params.Start, params.End, step, cachedData) - missedSeries := make([]*v3.Series, 0) - cachedSeries := make([]*v3.Series, 0) + misses := q.queryCache.FindMissingTimeRanges(params.Start, params.End, step, cacheKey) + zap.L().Info("cache misses for expression query", zap.Any("misses", misses)) + missedSeries := make([]querycache.CachedSeriesData, 0) for _, miss := range misses { missQueries, _ := q.builder.PrepareQueries(&v3.QueryRangeParamsV3{ - Start: miss.start, - End: miss.end, + Start: miss.Start, + End: miss.End, Step: params.Step, NoCache: params.NoCache, CompositeQuery: params.CompositeQuery, @@ -386,41 +314,19 @@ func (q *querier) runBuilderExpression( ch <- channelResult{Err: err, Name: queryName, Query: query, Series: nil} return } - missedSeries = append(missedSeries, series...) - } - if err := json.Unmarshal(cachedData, &cachedSeries); err != nil && cachedData != nil { - zap.L().Error("error unmarshalling cached data", zap.Error(err)) - } - mergedSeries := mergeSerieses(cachedSeries, missedSeries) - if replaceCachedData { - mergedSeries = missedSeries + missedSeries = append(missedSeries, querycache.CachedSeriesData{ + Start: miss.Start, + End: miss.End, + Data: series, + }) } + mergedSeries := q.queryCache.MergeWithCachedSeriesData(cacheKey, missedSeries) - var mergedSeriesData []byte - missedSeriesLen := len(missedSeries) - var marshallingErr error - if missedSeriesLen > 0 && !params.NoCache && q.cache != nil { - // caching the data - mergedSeriesData, marshallingErr = json.Marshal(mergedSeries) - if marshallingErr != nil { - zap.L().Error("error marshalling merged series", zap.Error(marshallingErr)) - } - } + resultSeries := common.GetSeriesFromCachedData(mergedSeries, params.Start, params.End) - // response doesn't need everything - filterCachedPoints(mergedSeries, params.Start, params.End) ch <- channelResult{ Err: nil, Name: queryName, - Series: mergedSeries, - } - - // Cache the seriesList for future queries - if len(missedSeries) > 0 && !params.NoCache && q.cache != nil && marshallingErr == nil { - err = q.cache.Store(cacheKey, mergedSeriesData, time.Hour) - if err != nil { - zap.L().Error("error storing merged series", zap.Error(err)) - return - } + Series: resultSeries, } } diff --git a/pkg/query-service/app/querier/querier.go b/pkg/query-service/app/querier/querier.go index 50ef63394a..fd7198b334 100644 --- a/pkg/query-service/app/querier/querier.go +++ b/pkg/query-service/app/querier/querier.go @@ -2,11 +2,7 @@ package querier import ( "context" - "encoding/json" "fmt" - "math" - "sort" - "strings" "sync" "time" @@ -15,7 +11,9 @@ import ( metricsV3 "go.signoz.io/signoz/pkg/query-service/app/metrics/v3" "go.signoz.io/signoz/pkg/query-service/app/queryBuilder" tracesV3 "go.signoz.io/signoz/pkg/query-service/app/traces/v3" + "go.signoz.io/signoz/pkg/query-service/common" chErrors "go.signoz.io/signoz/pkg/query-service/errors" + "go.signoz.io/signoz/pkg/query-service/querycache" "go.signoz.io/signoz/pkg/query-service/utils" "go.signoz.io/signoz/pkg/query-service/cache" @@ -34,14 +32,11 @@ type channelResult struct { Query string } -type missInterval struct { - start, end int64 // in milliseconds -} - type querier struct { cache cache.Cache reader interfaces.Reader keyGenerator cache.KeyGenerator + queryCache interfaces.QueryCache fluxInterval time.Duration @@ -80,8 +75,11 @@ func NewQuerier(opts QuerierOptions) interfaces.Querier { logsQueryBuilder = logsV4.PrepareLogsQuery } + qc := querycache.NewQueryCache(querycache.WithCache(opts.Cache), querycache.WithFluxInterval(opts.FluxInterval)) + return &querier{ cache: opts.Cache, + queryCache: qc, reader: opts.Reader, keyGenerator: opts.KeyGenerator, fluxInterval: opts.FluxInterval, @@ -154,156 +152,6 @@ func (q *querier) execPromQuery(ctx context.Context, params *model.QueryRangePar return seriesList, nil } -// findMissingTimeRanges finds the missing time ranges in the seriesList -// and returns a list of miss structs, It takes the fluxInterval into -// account to find the missing time ranges. -// -// The [End - fluxInterval, End] is always added to the list of misses, because -// the data might still be in flux and not yet available in the database. -// -// replaceCacheData is used to indicate if the cache data should be replaced instead of merging -// with the new data -// TODO: Remove replaceCacheData with a better logic -func findMissingTimeRanges(start, end, step int64, seriesList []*v3.Series, fluxInterval time.Duration) (misses []missInterval, replaceCacheData bool) { - replaceCacheData = false - var cachedStart, cachedEnd int64 - for idx := range seriesList { - series := seriesList[idx] - for pointIdx := range series.Points { - point := series.Points[pointIdx] - if cachedStart == 0 || point.Timestamp < cachedStart { - cachedStart = point.Timestamp - } - if cachedEnd == 0 || point.Timestamp > cachedEnd { - cachedEnd = point.Timestamp - } - } - } - - // time.Now is used because here we are considering the case where data might not - // be fully ingested for last (fluxInterval) minutes - endMillis := time.Now().UnixMilli() - adjustStep := int64(math.Min(float64(step), 60)) - roundedMillis := endMillis - (endMillis % (adjustStep * 1000)) - - // Exclude the flux interval from the cached end time - cachedEnd = int64( - math.Min( - float64(cachedEnd), - float64(roundedMillis-fluxInterval.Milliseconds()), - ), - ) - - // There are five cases to consider - // 1. Cached time range is a subset of the requested time range - // 2. Cached time range is a superset of the requested time range - // 3. Cached time range is a left overlap of the requested time range - // 4. Cached time range is a right overlap of the requested time range - // 5. Cached time range is a disjoint of the requested time range - if cachedStart >= start && cachedEnd <= end { - // Case 1: Cached time range is a subset of the requested time range - // Add misses for the left and right sides of the cached time range - misses = append(misses, missInterval{start: start, end: cachedStart - 1}) - misses = append(misses, missInterval{start: cachedEnd + 1, end: end}) - } else if cachedStart <= start && cachedEnd >= end { - // Case 2: Cached time range is a superset of the requested time range - // No misses - } else if cachedStart <= start && cachedEnd >= start { - // Case 3: Cached time range is a left overlap of the requested time range - // Add a miss for the left side of the cached time range - misses = append(misses, missInterval{start: cachedEnd + 1, end: end}) - } else if cachedStart <= end && cachedEnd >= end { - // Case 4: Cached time range is a right overlap of the requested time range - // Add a miss for the right side of the cached time range - misses = append(misses, missInterval{start: start, end: cachedStart - 1}) - } else { - // Case 5: Cached time range is a disjoint of the requested time range - // Add a miss for the entire requested time range - misses = append(misses, missInterval{start: start, end: end}) - replaceCacheData = true - } - - // remove the struts with start > end - var validMisses []missInterval - for idx := range misses { - miss := misses[idx] - if miss.start < miss.end { - validMisses = append(validMisses, miss) - } - } - return validMisses, replaceCacheData -} - -// findMissingTimeRanges finds the missing time ranges in the cached data -// and returns them as a list of misses -func (q *querier) findMissingTimeRanges(start, end, step int64, cachedData []byte) (misses []missInterval, replaceCachedData bool) { - var cachedSeriesList []*v3.Series - if err := json.Unmarshal(cachedData, &cachedSeriesList); err != nil { - // In case of error, we return the entire range as a miss - return []missInterval{{start: start, end: end}}, true - } - return findMissingTimeRanges(start, end, step, cachedSeriesList, q.fluxInterval) -} - -func labelsToString(labels map[string]string) string { - type label struct { - Key string - Value string - } - var labelsList []label - for k, v := range labels { - labelsList = append(labelsList, label{Key: k, Value: v}) - } - sort.Slice(labelsList, func(i, j int) bool { - return labelsList[i].Key < labelsList[j].Key - }) - labelKVs := make([]string, len(labelsList)) - for idx := range labelsList { - labelKVs[idx] = labelsList[idx].Key + "=" + labelsList[idx].Value - } - return fmt.Sprintf("{%s}", strings.Join(labelKVs, ",")) -} - -func filterCachedPoints(cachedSeries []*v3.Series, start, end int64) { - for _, c := range cachedSeries { - points := []v3.Point{} - for _, p := range c.Points { - if (p.Timestamp < start || p.Timestamp > end) && p.Timestamp != 0 { - continue - } - points = append(points, p) - } - c.Points = points - } -} - -func mergeSerieses(cachedSeries, missedSeries []*v3.Series) []*v3.Series { - // Merge the missed series with the cached series by timestamp - mergedSeries := make([]*v3.Series, 0) - seriesesByLabels := make(map[string]*v3.Series) - for idx := range cachedSeries { - series := cachedSeries[idx] - seriesesByLabels[labelsToString(series.Labels)] = series - } - - for idx := range missedSeries { - series := missedSeries[idx] - if _, ok := seriesesByLabels[labelsToString(series.Labels)]; !ok { - seriesesByLabels[labelsToString(series.Labels)] = series - continue - } - seriesesByLabels[labelsToString(series.Labels)].Points = append(seriesesByLabels[labelsToString(series.Labels)].Points, series.Points...) - } - // Sort the points in each series by timestamp - for idx := range seriesesByLabels { - series := seriesesByLabels[idx] - series.SortPoints() - series.RemoveDuplicatePoints() - mergedSeries = append(mergedSeries, series) - } - return mergedSeries -} - func (q *querier) runBuilderQueries(ctx context.Context, params *v3.QueryRangeParamsV3) ([]*v3.Result, map[string]error, error) { cacheKeys := q.keyGenerator.GenerateKeys(params) @@ -363,51 +211,34 @@ func (q *querier) runPromQueries(ctx context.Context, params *v3.QueryRangeParam go func(queryName string, promQuery *v3.PromQuery) { defer wg.Done() cacheKey, ok := cacheKeys[queryName] - var cachedData []byte - // Ensure NoCache is not set and cache is not nil - if !params.NoCache && q.cache != nil && ok { - data, retrieveStatus, err := q.cache.Retrieve(cacheKey, true) - zap.L().Info("cache retrieve status", zap.String("status", retrieveStatus.String())) - if err == nil { - cachedData = data - } + + if !ok || params.NoCache { + zap.L().Info("skipping cache for metrics prom query", zap.String("queryName", queryName), zap.Int64("start", params.Start), zap.Int64("end", params.End), zap.Int64("step", params.Step), zap.Bool("noCache", params.NoCache), zap.String("cacheKey", cacheKeys[queryName])) + query := metricsV3.BuildPromQuery(promQuery, params.Step, params.Start, params.End) + series, err := q.execPromQuery(ctx, query) + channelResults <- channelResult{Err: err, Name: queryName, Query: query.Query, Series: series} + return } - misses, replaceCachedData := q.findMissingTimeRanges(params.Start, params.End, params.Step, cachedData) - missedSeries := make([]*v3.Series, 0) - cachedSeries := make([]*v3.Series, 0) + misses := q.queryCache.FindMissingTimeRanges(params.Start, params.End, params.Step, cacheKey) + zap.L().Info("cache misses for metrics prom query", zap.Any("misses", misses)) + missedSeries := make([]querycache.CachedSeriesData, 0) for _, miss := range misses { - query := metricsV3.BuildPromQuery(promQuery, params.Step, miss.start, miss.end) + query := metricsV3.BuildPromQuery(promQuery, params.Step, miss.Start, miss.End) series, err := q.execPromQuery(ctx, query) if err != nil { channelResults <- channelResult{Err: err, Name: queryName, Query: query.Query, Series: nil} return } - missedSeries = append(missedSeries, series...) - } - if err := json.Unmarshal(cachedData, &cachedSeries); err != nil && cachedData != nil { - // ideally we should not be getting an error here - zap.L().Error("error unmarshalling cached data", zap.Error(err)) + missedSeries = append(missedSeries, querycache.CachedSeriesData{ + Data: series, + Start: miss.Start, + End: miss.End, + }) } - mergedSeries := mergeSerieses(cachedSeries, missedSeries) - if replaceCachedData { - mergedSeries = missedSeries - } - - channelResults <- channelResult{Err: nil, Name: queryName, Query: promQuery.Query, Series: mergedSeries} + mergedSeries := q.queryCache.MergeWithCachedSeriesData(cacheKey, missedSeries) + resultSeries := common.GetSeriesFromCachedData(mergedSeries, params.Start, params.End) + channelResults <- channelResult{Err: nil, Name: queryName, Query: promQuery.Query, Series: resultSeries} - // Cache the seriesList for future queries - if len(missedSeries) > 0 && !params.NoCache && q.cache != nil && ok { - mergedSeriesData, err := json.Marshal(mergedSeries) - if err != nil { - zap.L().Error("error marshalling merged series", zap.Error(err)) - return - } - err = q.cache.Store(cacheKey, mergedSeriesData, time.Hour) - if err != nil { - zap.L().Error("error storing merged series", zap.Error(err)) - return - } - } }(queryName, promQuery) } wg.Wait() diff --git a/pkg/query-service/app/querier/querier_test.go b/pkg/query-service/app/querier/querier_test.go index a4814d0c0a..c30841546e 100644 --- a/pkg/query-service/app/querier/querier_test.go +++ b/pkg/query-service/app/querier/querier_test.go @@ -2,7 +2,9 @@ package querier import ( "context" + "encoding/json" "fmt" + "math" "strings" "testing" "time" @@ -11,8 +13,33 @@ import ( tracesV3 "go.signoz.io/signoz/pkg/query-service/app/traces/v3" "go.signoz.io/signoz/pkg/query-service/cache/inmemory" v3 "go.signoz.io/signoz/pkg/query-service/model/v3" + "go.signoz.io/signoz/pkg/query-service/querycache" ) +func minTimestamp(series []*v3.Series) int64 { + min := int64(math.MaxInt64) + for _, series := range series { + for _, point := range series.Points { + if point.Timestamp < min { + min = point.Timestamp + } + } + } + return min +} + +func maxTimestamp(series []*v3.Series) int64 { + max := int64(math.MinInt64) + for _, series := range series { + for _, point := range series.Points { + if point.Timestamp > max { + max = point.Timestamp + } + } + } + return max +} + func TestFindMissingTimeRangesZeroFreshNess(t *testing.T) { // There are five scenarios: // 1. Cached time range is a subset of the requested time range @@ -26,7 +53,7 @@ func TestFindMissingTimeRangesZeroFreshNess(t *testing.T) { requestedEnd int64 // in milliseconds requestedStep int64 // in seconds cachedSeries []*v3.Series - expectedMiss []missInterval + expectedMiss []querycache.MissInterval replaceCachedData bool }{ { @@ -51,14 +78,14 @@ func TestFindMissingTimeRangesZeroFreshNess(t *testing.T) { }, }, }, - expectedMiss: []missInterval{ + expectedMiss: []querycache.MissInterval{ { - start: 1675115596722, - end: 1675115596722 + 60*60*1000 - 1, + Start: 1675115596722, + End: 1675115596722 + 60*60*1000, }, { - start: 1675115596722 + 120*60*1000 + 1, - end: 1675115596722 + 180*60*1000, + Start: 1675115596722 + 120*60*1000, + End: 1675115596722 + 180*60*1000, }, }, }, @@ -92,7 +119,7 @@ func TestFindMissingTimeRangesZeroFreshNess(t *testing.T) { }, }, }, - expectedMiss: []missInterval{}, + expectedMiss: []querycache.MissInterval{}, }, { name: "cached time range is a left overlap of the requested time range", @@ -120,10 +147,10 @@ func TestFindMissingTimeRangesZeroFreshNess(t *testing.T) { }, }, }, - expectedMiss: []missInterval{ + expectedMiss: []querycache.MissInterval{ { - start: 1675115596722 + 120*60*1000 + 1, - end: 1675115596722 + 180*60*1000, + Start: 1675115596722 + 120*60*1000, + End: 1675115596722 + 180*60*1000, }, }, }, @@ -153,10 +180,10 @@ func TestFindMissingTimeRangesZeroFreshNess(t *testing.T) { }, }, }, - expectedMiss: []missInterval{ + expectedMiss: []querycache.MissInterval{ { - start: 1675115596722, - end: 1675115596722 + 60*60*1000 - 1, + Start: 1675115596722, + End: 1675115596722 + 60*60*1000, }, }, }, @@ -186,31 +213,48 @@ func TestFindMissingTimeRangesZeroFreshNess(t *testing.T) { }, }, }, - expectedMiss: []missInterval{ + expectedMiss: []querycache.MissInterval{ { - start: 1675115596722, - end: 1675115596722 + 180*60*1000, + Start: 1675115596722, + End: 1675115596722 + 180*60*1000, }, }, replaceCachedData: true, }, } - for _, tc := range testCases { + c := inmemory.New(&inmemory.Options{TTL: 5 * time.Minute, CleanupInterval: 10 * time.Minute}) + + qc := querycache.NewQueryCache(querycache.WithCache(c)) + + for idx, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - misses, replaceCachedData := findMissingTimeRanges(tc.requestedStart, tc.requestedEnd, tc.requestedStep, tc.cachedSeries, 0*time.Minute) + cacheKey := fmt.Sprintf("test-cache-key-%d", idx) + cachedData := &querycache.CachedSeriesData{ + Start: minTimestamp(tc.cachedSeries), + End: maxTimestamp(tc.cachedSeries), + Data: tc.cachedSeries, + } + jsonData, err := json.Marshal([]*querycache.CachedSeriesData{cachedData}) + if err != nil { + t.Errorf("error marshalling cached data: %v", err) + } + err = c.Store(cacheKey, jsonData, 5*time.Minute) + if err != nil { + t.Errorf("error storing cached data: %v", err) + } + + misses := qc.FindMissingTimeRanges(tc.requestedStart, tc.requestedEnd, tc.requestedStep, cacheKey) if len(misses) != len(tc.expectedMiss) { t.Errorf("expected %d misses, got %d", len(tc.expectedMiss), len(misses)) } - if replaceCachedData != tc.replaceCachedData { - t.Errorf("expected replaceCachedData %t, got %t", tc.replaceCachedData, replaceCachedData) - } + for i, miss := range misses { - if miss.start != tc.expectedMiss[i].start { - t.Errorf("expected start %d, got %d", tc.expectedMiss[i].start, miss.start) + if miss.Start != tc.expectedMiss[i].Start { + t.Errorf("expected start %d, got %d", tc.expectedMiss[i].Start, miss.Start) } - if miss.end != tc.expectedMiss[i].end { - t.Errorf("expected end %d, got %d", tc.expectedMiss[i].end, miss.end) + if miss.End != tc.expectedMiss[i].End { + t.Errorf("expected end %d, got %d", tc.expectedMiss[i].End, miss.End) } } }) @@ -226,7 +270,7 @@ func TestFindMissingTimeRangesWithFluxInterval(t *testing.T) { requestedStep int64 cachedSeries []*v3.Series fluxInterval time.Duration - expectedMiss []missInterval + expectedMiss []querycache.MissInterval }{ { name: "cached time range is a subset of the requested time range", @@ -251,14 +295,14 @@ func TestFindMissingTimeRangesWithFluxInterval(t *testing.T) { }, }, fluxInterval: 5 * time.Minute, - expectedMiss: []missInterval{ + expectedMiss: []querycache.MissInterval{ { - start: 1675115596722, - end: 1675115596722 + 60*60*1000 - 1, + Start: 1675115596722, + End: 1675115596722 + 60*60*1000, }, { - start: 1675115596722 + 120*60*1000 + 1, - end: 1675115596722 + 180*60*1000, + Start: 1675115596722 + 120*60*1000, + End: 1675115596722 + 180*60*1000, }, }, }, @@ -293,7 +337,7 @@ func TestFindMissingTimeRangesWithFluxInterval(t *testing.T) { }, }, fluxInterval: 5 * time.Minute, - expectedMiss: []missInterval{}, + expectedMiss: []querycache.MissInterval{}, }, { name: "cache time range is a left overlap of the requested time range", @@ -322,10 +366,10 @@ func TestFindMissingTimeRangesWithFluxInterval(t *testing.T) { }, }, fluxInterval: 5 * time.Minute, - expectedMiss: []missInterval{ + expectedMiss: []querycache.MissInterval{ { - start: 1675115596722 + 120*60*1000 + 1, - end: 1675115596722 + 180*60*1000, + Start: 1675115596722 + 120*60*1000, + End: 1675115596722 + 180*60*1000, }, }, }, @@ -356,10 +400,10 @@ func TestFindMissingTimeRangesWithFluxInterval(t *testing.T) { }, }, fluxInterval: 5 * time.Minute, - expectedMiss: []missInterval{ + expectedMiss: []querycache.MissInterval{ { - start: 1675115596722, - end: 1675115596722 + 60*60*1000 - 1, + Start: 1675115596722, + End: 1675115596722 + 60*60*1000, }, }, }, @@ -390,27 +434,45 @@ func TestFindMissingTimeRangesWithFluxInterval(t *testing.T) { }, }, fluxInterval: 5 * time.Minute, - expectedMiss: []missInterval{ + expectedMiss: []querycache.MissInterval{ { - start: 1675115596722, - end: 1675115596722 + 180*60*1000, + Start: 1675115596722, + End: 1675115596722 + 180*60*1000, }, }, }, } - for _, tc := range testCases { + c := inmemory.New(&inmemory.Options{TTL: 5 * time.Minute, CleanupInterval: 10 * time.Minute}) + + qc := querycache.NewQueryCache(querycache.WithCache(c)) + + for idx, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - misses, _ := findMissingTimeRanges(tc.requestedStart, tc.requestedEnd, tc.requestedStep, tc.cachedSeries, tc.fluxInterval) + cacheKey := fmt.Sprintf("test-cache-key-%d", idx) + cachedData := &querycache.CachedSeriesData{ + Start: minTimestamp(tc.cachedSeries), + End: maxTimestamp(tc.cachedSeries), + Data: tc.cachedSeries, + } + jsonData, err := json.Marshal([]*querycache.CachedSeriesData{cachedData}) + if err != nil { + t.Errorf("error marshalling cached data: %v", err) + } + err = c.Store(cacheKey, jsonData, 5*time.Minute) + if err != nil { + t.Errorf("error storing cached data: %v", err) + } + misses := qc.FindMissingTimeRanges(tc.requestedStart, tc.requestedEnd, tc.requestedStep, cacheKey) if len(misses) != len(tc.expectedMiss) { t.Errorf("expected %d misses, got %d", len(tc.expectedMiss), len(misses)) } for i, miss := range misses { - if miss.start != tc.expectedMiss[i].start { - t.Errorf("expected start %d, got %d", tc.expectedMiss[i].start, miss.start) + if miss.Start != tc.expectedMiss[i].Start { + t.Errorf("expected start %d, got %d", tc.expectedMiss[i].Start, miss.Start) } - if miss.end != tc.expectedMiss[i].end { - t.Errorf("expected end %d, got %d", tc.expectedMiss[i].end, miss.end) + if miss.End != tc.expectedMiss[i].End { + t.Errorf("expected end %d, got %d", tc.expectedMiss[i].End, miss.End) } } }) @@ -1022,18 +1084,18 @@ func TestQueryRangeValueTypePromQL(t *testing.T) { expectedQueryAndTimeRanges := []struct { query string - ranges []missInterval + ranges []querycache.MissInterval }{ { query: "signoz_calls_total", - ranges: []missInterval{ - {start: 1675115596722, end: 1675115596722 + 120*60*1000}, + ranges: []querycache.MissInterval{ + {Start: 1675115596722, End: 1675115596722 + 120*60*1000}, }, }, { query: "signoz_latency_bucket", - ranges: []missInterval{ - {start: 1675115596722 + 60*60*1000, end: 1675115596722 + 180*60*1000}, + ranges: []querycache.MissInterval{ + {Start: 1675115596722 + 60*60*1000, End: 1675115596722 + 180*60*1000}, }, }, } @@ -1054,10 +1116,10 @@ func TestQueryRangeValueTypePromQL(t *testing.T) { if len(q.TimeRanges()[i]) != 2 { t.Errorf("expected time ranges to be %v, got %v", expectedQueryAndTimeRanges[i].ranges, q.TimeRanges()[i]) } - if q.TimeRanges()[i][0] != int(expectedQueryAndTimeRanges[i].ranges[0].start) { + if q.TimeRanges()[i][0] != int(expectedQueryAndTimeRanges[i].ranges[0].Start) { t.Errorf("expected time ranges to be %v, got %v", expectedQueryAndTimeRanges[i].ranges, q.TimeRanges()[i]) } - if q.TimeRanges()[i][1] != int(expectedQueryAndTimeRanges[i].ranges[0].end) { + if q.TimeRanges()[i][1] != int(expectedQueryAndTimeRanges[i].ranges[0].End) { t.Errorf("expected time ranges to be %v, got %v", expectedQueryAndTimeRanges[i].ranges, q.TimeRanges()[i]) } } diff --git a/pkg/query-service/app/querier/v2/helper.go b/pkg/query-service/app/querier/v2/helper.go index f1dd33c4e6..bb41bc8c36 100644 --- a/pkg/query-service/app/querier/v2/helper.go +++ b/pkg/query-service/app/querier/v2/helper.go @@ -2,20 +2,19 @@ package v2 import ( "context" - "encoding/json" "fmt" "strings" "sync" - "time" logsV3 "go.signoz.io/signoz/pkg/query-service/app/logs/v3" logsV4 "go.signoz.io/signoz/pkg/query-service/app/logs/v4" metricsV3 "go.signoz.io/signoz/pkg/query-service/app/metrics/v3" metricsV4 "go.signoz.io/signoz/pkg/query-service/app/metrics/v4" tracesV3 "go.signoz.io/signoz/pkg/query-service/app/traces/v3" - "go.signoz.io/signoz/pkg/query-service/cache/status" + "go.signoz.io/signoz/pkg/query-service/common" "go.signoz.io/signoz/pkg/query-service/constants" v3 "go.signoz.io/signoz/pkg/query-service/model/v3" + "go.signoz.io/signoz/pkg/query-service/querycache" "go.uber.org/zap" ) @@ -108,7 +107,8 @@ func (q *querier) runBuilderQuery( if builderQuery.DataSource == v3.DataSourceLogs { var query string var err error - if _, ok := cacheKeys[queryName]; !ok { + if _, ok := cacheKeys[queryName]; !ok || params.NoCache { + zap.L().Info("skipping cache for logs query", zap.String("queryName", queryName), zap.Int64("start", params.Start), zap.Int64("end", params.End), zap.Int64("step", params.Step), zap.Bool("noCache", params.NoCache), zap.String("cacheKey", cacheKeys[queryName])) query, err = prepareLogsQuery(ctx, q.UseLogsNewSchema, start, end, builderQuery, params, preferRPM) if err != nil { ch <- channelResult{Err: err, Name: queryName, Query: query, Series: nil} @@ -118,21 +118,11 @@ func (q *querier) runBuilderQuery( ch <- channelResult{Err: err, Name: queryName, Query: query, Series: series} return } - cacheKey := cacheKeys[queryName] - var cachedData []byte - if !params.NoCache && q.cache != nil { - var retrieveStatus status.RetrieveStatus - data, retrieveStatus, err := q.cache.Retrieve(cacheKey, true) - zap.L().Info("cache retrieve status", zap.String("status", retrieveStatus.String())) - if err == nil { - cachedData = data - } - } - misses, replaceCachedData := q.findMissingTimeRanges(start, end, builderQuery.StepInterval, cachedData) - missedSeries := make([]*v3.Series, 0) - cachedSeries := make([]*v3.Series, 0) + misses := q.queryCache.FindMissingTimeRanges(start, end, builderQuery.StepInterval, cacheKeys[queryName]) + zap.L().Info("cache misses for logs query", zap.Any("misses", misses)) + missedSeries := make([]querycache.CachedSeriesData, 0) for _, miss := range misses { - query, err = prepareLogsQuery(ctx, q.UseLogsNewSchema, miss.start, miss.end, builderQuery, params, preferRPM) + query, err = prepareLogsQuery(ctx, q.UseLogsNewSchema, miss.Start, miss.End, builderQuery, params, preferRPM) if err != nil { ch <- channelResult{Err: err, Name: queryName, Query: query, Series: nil} return @@ -147,43 +137,20 @@ func (q *querier) runBuilderQuery( } return } - missedSeries = append(missedSeries, series...) - } - if err := json.Unmarshal(cachedData, &cachedSeries); err != nil && cachedData != nil { - zap.L().Error("error unmarshalling cached data", zap.Error(err)) - } - mergedSeries := mergeSerieses(cachedSeries, missedSeries) - if replaceCachedData { - mergedSeries = missedSeries - } - var mergedSeriesData []byte - var marshallingErr error - missedSeriesLen := len(missedSeries) - if missedSeriesLen > 0 && !params.NoCache && q.cache != nil { - // caching the data - mergedSeriesData, marshallingErr = json.Marshal(mergedSeries) - if marshallingErr != nil { - zap.L().Error("error marshalling merged series", zap.Error(marshallingErr)) - } + missedSeries = append(missedSeries, querycache.CachedSeriesData{ + Data: series, + Start: miss.Start, + End: miss.End, + }) } + mergedSeries := q.queryCache.MergeWithCachedSeriesData(cacheKeys[queryName], missedSeries) - // response doesn't need everything - filterCachedPoints(mergedSeries, start, end) + resultSeries := common.GetSeriesFromCachedData(mergedSeries, start, end) ch <- channelResult{ Err: nil, Name: queryName, - Series: mergedSeries, - } - - // Cache the seriesList for future queries - if missedSeriesLen > 0 && !params.NoCache && q.cache != nil && marshallingErr == nil { - // caching the data - err = q.cache.Store(cacheKey, mergedSeriesData, time.Hour) - if err != nil { - zap.L().Error("error storing merged series", zap.Error(err)) - return - } + Series: resultSeries, } return @@ -240,7 +207,8 @@ func (q *querier) runBuilderQuery( // What is happening here? // We are only caching the graph panel queries. A non-existant cache key means that the query is not cached. // If the query is not cached, we execute the query and return the result without caching it. - if _, ok := cacheKeys[queryName]; !ok { + if _, ok := cacheKeys[queryName]; !ok || params.NoCache { + zap.L().Info("skipping cache for metrics query", zap.String("queryName", queryName), zap.Int64("start", params.Start), zap.Int64("end", params.End), zap.Int64("step", params.Step), zap.Bool("noCache", params.NoCache), zap.String("cacheKey", cacheKeys[queryName])) query, err := metricsV4.PrepareMetricQuery(start, end, params.CompositeQuery.QueryType, params.CompositeQuery.PanelType, builderQuery, metricsV3.Options{PreferRPM: preferRPM}) if err != nil { ch <- channelResult{Err: err, Name: queryName, Query: query, Series: nil} @@ -251,23 +219,13 @@ func (q *querier) runBuilderQuery( return } - cacheKey := cacheKeys[queryName] - var cachedData []byte - if !params.NoCache && q.cache != nil { - var retrieveStatus status.RetrieveStatus - data, retrieveStatus, err := q.cache.Retrieve(cacheKey, true) - zap.L().Info("cache retrieve status", zap.String("status", retrieveStatus.String())) - if err == nil { - cachedData = data - } - } - misses, replaceCachedData := q.findMissingTimeRanges(start, end, builderQuery.StepInterval, cachedData) - missedSeries := make([]*v3.Series, 0) - cachedSeries := make([]*v3.Series, 0) + misses := q.queryCache.FindMissingTimeRanges(start, end, builderQuery.StepInterval, cacheKeys[queryName]) + zap.L().Info("cache misses for metrics query", zap.Any("misses", misses)) + missedSeries := make([]querycache.CachedSeriesData, 0) for _, miss := range misses { query, err := metricsV4.PrepareMetricQuery( - miss.start, - miss.end, + miss.Start, + miss.End, params.CompositeQuery.QueryType, params.CompositeQuery.PanelType, builderQuery, @@ -292,41 +250,19 @@ func (q *querier) runBuilderQuery( } return } - missedSeries = append(missedSeries, series...) - } - if err := json.Unmarshal(cachedData, &cachedSeries); err != nil && cachedData != nil { - zap.L().Error("error unmarshalling cached data", zap.Error(err)) - } - mergedSeries := mergeSerieses(cachedSeries, missedSeries) - if replaceCachedData { - mergedSeries = missedSeries + missedSeries = append(missedSeries, querycache.CachedSeriesData{ + Data: series, + Start: miss.Start, + End: miss.End, + }) } + mergedSeries := q.queryCache.MergeWithCachedSeriesData(cacheKeys[queryName], missedSeries) - var mergedSeriesData []byte - var marshallingErr error - missedSeriesLen := len(missedSeries) - if missedSeriesLen > 0 && !params.NoCache && q.cache != nil { - // caching the data - mergedSeriesData, marshallingErr = json.Marshal(mergedSeries) - if marshallingErr != nil { - zap.S().Error("error marshalling merged series", zap.Error(marshallingErr)) - } - } - - // response doesn't need everything - filterCachedPoints(mergedSeries, start, end) + resultSeries := common.GetSeriesFromCachedData(mergedSeries, start, end) ch <- channelResult{ Err: nil, Name: queryName, - Series: mergedSeries, - } - // Cache the seriesList for future queries - if missedSeriesLen > 0 && !params.NoCache && q.cache != nil && marshallingErr == nil { - err := q.cache.Store(cacheKey, mergedSeriesData, time.Hour) - if err != nil { - zap.L().Error("error storing merged series", zap.Error(err)) - return - } + Series: resultSeries, } } diff --git a/pkg/query-service/app/querier/v2/querier.go b/pkg/query-service/app/querier/v2/querier.go index b71a8cc0cc..f8316d6f6c 100644 --- a/pkg/query-service/app/querier/v2/querier.go +++ b/pkg/query-service/app/querier/v2/querier.go @@ -2,11 +2,7 @@ package v2 import ( "context" - "encoding/json" "fmt" - "math" - "sort" - "strings" "sync" "time" @@ -15,7 +11,9 @@ import ( metricsV4 "go.signoz.io/signoz/pkg/query-service/app/metrics/v4" "go.signoz.io/signoz/pkg/query-service/app/queryBuilder" tracesV3 "go.signoz.io/signoz/pkg/query-service/app/traces/v3" + "go.signoz.io/signoz/pkg/query-service/common" chErrors "go.signoz.io/signoz/pkg/query-service/errors" + "go.signoz.io/signoz/pkg/query-service/querycache" "go.signoz.io/signoz/pkg/query-service/utils" "go.signoz.io/signoz/pkg/query-service/cache" @@ -34,14 +32,11 @@ type channelResult struct { Query string } -type missInterval struct { - start, end int64 // in milliseconds -} - type querier struct { cache cache.Cache reader interfaces.Reader keyGenerator cache.KeyGenerator + queryCache interfaces.QueryCache fluxInterval time.Duration @@ -79,8 +74,11 @@ func NewQuerier(opts QuerierOptions) interfaces.Querier { logsQueryBuilder = logsV4.PrepareLogsQuery } + qc := querycache.NewQueryCache(querycache.WithCache(opts.Cache), querycache.WithFluxInterval(opts.FluxInterval)) + return &querier{ cache: opts.Cache, + queryCache: qc, reader: opts.Reader, keyGenerator: opts.KeyGenerator, fluxInterval: opts.FluxInterval, @@ -157,167 +155,6 @@ func (q *querier) execPromQuery(ctx context.Context, params *model.QueryRangePar return seriesList, nil } -// findMissingTimeRanges finds the missing time ranges in the seriesList -// and returns a list of miss structs, It takes the fluxInterval into -// account to find the missing time ranges. -// -// The [End - fluxInterval, End] is always added to the list of misses, because -// the data might still be in flux and not yet available in the database. -// -// replaceCacheData is used to indicate if the cache data should be replaced instead of merging -// with the new data -// TODO: Remove replaceCacheData with a better logic -func findMissingTimeRanges(start, end, step int64, seriesList []*v3.Series, fluxInterval time.Duration) (misses []missInterval, replaceCacheData bool) { - replaceCacheData = false - var cachedStart, cachedEnd int64 - for idx := range seriesList { - series := seriesList[idx] - for pointIdx := range series.Points { - point := series.Points[pointIdx] - if cachedStart == 0 || point.Timestamp < cachedStart { - cachedStart = point.Timestamp - } - if cachedEnd == 0 || point.Timestamp > cachedEnd { - cachedEnd = point.Timestamp - } - } - } - - // time.Now is used because here we are considering the case where data might not - // be fully ingested for last (fluxInterval) minutes - endMillis := time.Now().UnixMilli() - adjustStep := int64(math.Min(float64(step), 60)) - roundedMillis := endMillis - (endMillis % (adjustStep * 1000)) - - // Exclude the flux interval from the cached end time - cachedEnd = int64( - math.Min( - float64(cachedEnd), - float64(roundedMillis-fluxInterval.Milliseconds()), - ), - ) - - // There are five cases to consider - // 1. Cached time range is a subset of the requested time range - // 2. Cached time range is a superset of the requested time range - // 3. Cached time range is a left overlap of the requested time range - // 4. Cached time range is a right overlap of the requested time range - // 5. Cached time range is a disjoint of the requested time range - if cachedStart >= start && cachedEnd <= end { - // Case 1: Cached time range is a subset of the requested time range - // Add misses for the left and right sides of the cached time range - misses = append(misses, missInterval{start: start, end: cachedStart - 1}) - misses = append(misses, missInterval{start: cachedEnd + 1, end: end}) - } else if cachedStart <= start && cachedEnd >= end { - // Case 2: Cached time range is a superset of the requested time range - // No misses - } else if cachedStart <= start && cachedEnd >= start { - // Case 3: Cached time range is a left overlap of the requested time range - // Add a miss for the left side of the cached time range - misses = append(misses, missInterval{start: cachedEnd + 1, end: end}) - } else if cachedStart <= end && cachedEnd >= end { - // Case 4: Cached time range is a right overlap of the requested time range - // Add a miss for the right side of the cached time range - misses = append(misses, missInterval{start: start, end: cachedStart - 1}) - } else { - // Case 5: Cached time range is a disjoint of the requested time range - // Add a miss for the entire requested time range - misses = append(misses, missInterval{start: start, end: end}) - replaceCacheData = true - } - - // remove the struts with start > end - var validMisses []missInterval - for idx := range misses { - miss := misses[idx] - if miss.start < miss.end { - validMisses = append(validMisses, miss) - } - } - return validMisses, replaceCacheData -} - -// findMissingTimeRanges finds the missing time ranges in the cached data -// and returns them as a list of misses -func (q *querier) findMissingTimeRanges(start, end, step int64, cachedData []byte) (misses []missInterval, replaceCachedData bool) { - var cachedSeriesList []*v3.Series - if err := json.Unmarshal(cachedData, &cachedSeriesList); err != nil { - // In case of error, we return the entire range as a miss - return []missInterval{{start: start, end: end}}, true - } - return findMissingTimeRanges(start, end, step, cachedSeriesList, q.fluxInterval) -} - -// labelsToString converts the labels map to a string -// sorted by key so that the string is consistent -// across different runs -func labelsToString(labels map[string]string) string { - type label struct { - Key string - Value string - } - var labelsList []label - for k, v := range labels { - labelsList = append(labelsList, label{Key: k, Value: v}) - } - sort.Slice(labelsList, func(i, j int) bool { - return labelsList[i].Key < labelsList[j].Key - }) - labelKVs := make([]string, len(labelsList)) - for idx := range labelsList { - labelKVs[idx] = labelsList[idx].Key + "=" + labelsList[idx].Value - } - return fmt.Sprintf("{%s}", strings.Join(labelKVs, ",")) -} - -// filterCachedPoints filters the points in the series list -// that are outside the start and end time range -// and returns the filtered series list -// TODO(srikanthccv): is this really needed? -func filterCachedPoints(cachedSeries []*v3.Series, start, end int64) { - for _, c := range cachedSeries { - points := []v3.Point{} - for _, p := range c.Points { - if (p.Timestamp < start || p.Timestamp > end) && p.Timestamp != 0 { - continue - } - points = append(points, p) - } - c.Points = points - } -} - -// mergeSerieses merges the cached series and the missed series -// and returns the merged series list -func mergeSerieses(cachedSeries, missedSeries []*v3.Series) []*v3.Series { - // Merge the missed series with the cached series by timestamp - mergedSeries := make([]*v3.Series, 0) - seriesesByLabels := make(map[string]*v3.Series) - for idx := range cachedSeries { - series := cachedSeries[idx] - seriesesByLabels[labelsToString(series.Labels)] = series - } - - for idx := range missedSeries { - series := missedSeries[idx] - if _, ok := seriesesByLabels[labelsToString(series.Labels)]; !ok { - seriesesByLabels[labelsToString(series.Labels)] = series - continue - } - seriesesByLabels[labelsToString(series.Labels)].Points = append(seriesesByLabels[labelsToString(series.Labels)].Points, series.Points...) - } - - // Sort the points in each series by timestamp - // and remove duplicate points - for idx := range seriesesByLabels { - series := seriesesByLabels[idx] - series.SortPoints() - series.RemoveDuplicatePoints() - mergedSeries = append(mergedSeries, series) - } - return mergedSeries -} - func (q *querier) runBuilderQueries(ctx context.Context, params *v3.QueryRangeParamsV3) ([]*v3.Result, map[string]error, error) { cacheKeys := q.keyGenerator.GenerateKeys(params) @@ -372,50 +209,33 @@ func (q *querier) runPromQueries(ctx context.Context, params *v3.QueryRangeParam go func(queryName string, promQuery *v3.PromQuery) { defer wg.Done() cacheKey, ok := cacheKeys[queryName] - var cachedData []byte - // Ensure NoCache is not set and cache is not nil - if !params.NoCache && q.cache != nil && ok { - data, retrieveStatus, err := q.cache.Retrieve(cacheKey, true) - zap.L().Info("cache retrieve status", zap.String("status", retrieveStatus.String())) - if err == nil { - cachedData = data - } + + if !ok || params.NoCache { + zap.L().Info("skipping cache for metrics prom query", zap.String("queryName", queryName), zap.Int64("start", params.Start), zap.Int64("end", params.End), zap.Int64("step", params.Step), zap.Bool("noCache", params.NoCache), zap.String("cacheKey", cacheKeys[queryName])) + query := metricsV4.BuildPromQuery(promQuery, params.Step, params.Start, params.End) + series, err := q.execPromQuery(ctx, query) + channelResults <- channelResult{Err: err, Name: queryName, Query: query.Query, Series: series} + return } - misses, replaceCachedData := q.findMissingTimeRanges(params.Start, params.End, params.Step, cachedData) - missedSeries := make([]*v3.Series, 0) - cachedSeries := make([]*v3.Series, 0) + misses := q.queryCache.FindMissingTimeRanges(params.Start, params.End, params.Step, cacheKey) + zap.L().Info("cache misses for metrics prom query", zap.Any("misses", misses)) + missedSeries := make([]querycache.CachedSeriesData, 0) for _, miss := range misses { - query := metricsV4.BuildPromQuery(promQuery, params.Step, miss.start, miss.end) + query := metricsV4.BuildPromQuery(promQuery, params.Step, miss.Start, miss.End) series, err := q.execPromQuery(ctx, query) if err != nil { channelResults <- channelResult{Err: err, Name: queryName, Query: query.Query, Series: nil} return } - missedSeries = append(missedSeries, series...) - } - if err := json.Unmarshal(cachedData, &cachedSeries); err != nil && cachedData != nil { - // ideally we should not be getting an error here - zap.L().Error("error unmarshalling cached data", zap.Error(err)) - } - mergedSeries := mergeSerieses(cachedSeries, missedSeries) - if replaceCachedData { - mergedSeries = missedSeries - } - channelResults <- channelResult{Err: nil, Name: queryName, Query: promQuery.Query, Series: mergedSeries} - - // Cache the seriesList for future queries - if len(missedSeries) > 0 && !params.NoCache && q.cache != nil && ok { - mergedSeriesData, err := json.Marshal(mergedSeries) - if err != nil { - zap.L().Error("error marshalling merged series", zap.Error(err)) - return - } - err = q.cache.Store(cacheKey, mergedSeriesData, time.Hour) - if err != nil { - zap.L().Error("error storing merged series", zap.Error(err)) - return - } + missedSeries = append(missedSeries, querycache.CachedSeriesData{ + Data: series, + Start: miss.Start, + End: miss.End, + }) } + mergedSeries := q.queryCache.MergeWithCachedSeriesData(cacheKey, missedSeries) + resultSeries := common.GetSeriesFromCachedData(mergedSeries, params.Start, params.End) + channelResults <- channelResult{Err: nil, Name: queryName, Query: promQuery.Query, Series: resultSeries} }(queryName, promQuery) } wg.Wait() diff --git a/pkg/query-service/app/querier/v2/querier_test.go b/pkg/query-service/app/querier/v2/querier_test.go index c65b6ff54a..6dfc921183 100644 --- a/pkg/query-service/app/querier/v2/querier_test.go +++ b/pkg/query-service/app/querier/v2/querier_test.go @@ -2,7 +2,9 @@ package v2 import ( "context" + "encoding/json" "fmt" + "math" "strings" "testing" "time" @@ -11,9 +13,34 @@ import ( tracesV3 "go.signoz.io/signoz/pkg/query-service/app/traces/v3" "go.signoz.io/signoz/pkg/query-service/cache/inmemory" v3 "go.signoz.io/signoz/pkg/query-service/model/v3" + "go.signoz.io/signoz/pkg/query-service/querycache" ) -func TestFindMissingTimeRangesZeroFreshNess(t *testing.T) { +func minTimestamp(series []*v3.Series) int64 { + min := int64(math.MaxInt64) + for _, series := range series { + for _, point := range series.Points { + if point.Timestamp < min { + min = point.Timestamp + } + } + } + return min +} + +func maxTimestamp(series []*v3.Series) int64 { + max := int64(math.MinInt64) + for _, series := range series { + for _, point := range series.Points { + if point.Timestamp > max { + max = point.Timestamp + } + } + } + return max +} + +func TestV2FindMissingTimeRangesZeroFreshNess(t *testing.T) { // There are five scenarios: // 1. Cached time range is a subset of the requested time range // 2. Cached time range is a superset of the requested time range @@ -26,7 +53,7 @@ func TestFindMissingTimeRangesZeroFreshNess(t *testing.T) { requestedEnd int64 // in milliseconds requestedStep int64 // in seconds cachedSeries []*v3.Series - expectedMiss []missInterval + expectedMiss []querycache.MissInterval replaceCachedData bool }{ { @@ -51,14 +78,14 @@ func TestFindMissingTimeRangesZeroFreshNess(t *testing.T) { }, }, }, - expectedMiss: []missInterval{ + expectedMiss: []querycache.MissInterval{ { - start: 1675115596722, - end: 1675115596722 + 60*60*1000 - 1, + Start: 1675115596722, + End: 1675115596722 + 60*60*1000, }, { - start: 1675115596722 + 120*60*1000 + 1, - end: 1675115596722 + 180*60*1000, + Start: 1675115596722 + 120*60*1000, + End: 1675115596722 + 180*60*1000, }, }, }, @@ -92,7 +119,7 @@ func TestFindMissingTimeRangesZeroFreshNess(t *testing.T) { }, }, }, - expectedMiss: []missInterval{}, + expectedMiss: []querycache.MissInterval{}, }, { name: "cached time range is a left overlap of the requested time range", @@ -120,10 +147,10 @@ func TestFindMissingTimeRangesZeroFreshNess(t *testing.T) { }, }, }, - expectedMiss: []missInterval{ + expectedMiss: []querycache.MissInterval{ { - start: 1675115596722 + 120*60*1000 + 1, - end: 1675115596722 + 180*60*1000, + Start: 1675115596722 + 120*60*1000, + End: 1675115596722 + 180*60*1000, }, }, }, @@ -153,10 +180,10 @@ func TestFindMissingTimeRangesZeroFreshNess(t *testing.T) { }, }, }, - expectedMiss: []missInterval{ + expectedMiss: []querycache.MissInterval{ { - start: 1675115596722, - end: 1675115596722 + 60*60*1000 - 1, + Start: 1675115596722, + End: 1675115596722 + 60*60*1000, }, }, }, @@ -186,31 +213,48 @@ func TestFindMissingTimeRangesZeroFreshNess(t *testing.T) { }, }, }, - expectedMiss: []missInterval{ + expectedMiss: []querycache.MissInterval{ { - start: 1675115596722, - end: 1675115596722 + 180*60*1000, + Start: 1675115596722, + End: 1675115596722 + 180*60*1000, }, }, replaceCachedData: true, }, } - for _, tc := range testCases { + c := inmemory.New(&inmemory.Options{TTL: 5 * time.Minute, CleanupInterval: 10 * time.Minute}) + + qc := querycache.NewQueryCache(querycache.WithCache(c)) + + for idx, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - misses, replaceCachedData := findMissingTimeRanges(tc.requestedStart, tc.requestedEnd, tc.requestedStep, tc.cachedSeries, 0*time.Minute) + cacheKey := fmt.Sprintf("test-cache-key-%d", idx) + cachedData := &querycache.CachedSeriesData{ + Start: minTimestamp(tc.cachedSeries), + End: maxTimestamp(tc.cachedSeries), + Data: tc.cachedSeries, + } + jsonData, err := json.Marshal([]*querycache.CachedSeriesData{cachedData}) + if err != nil { + t.Errorf("error marshalling cached data: %v", err) + } + err = c.Store(cacheKey, jsonData, 5*time.Minute) + if err != nil { + t.Errorf("error storing cached data: %v", err) + } + + misses := qc.FindMissingTimeRanges(tc.requestedStart, tc.requestedEnd, tc.requestedStep, cacheKey) if len(misses) != len(tc.expectedMiss) { t.Errorf("expected %d misses, got %d", len(tc.expectedMiss), len(misses)) } - if replaceCachedData != tc.replaceCachedData { - t.Errorf("expected replaceCachedData %t, got %t", tc.replaceCachedData, replaceCachedData) - } + for i, miss := range misses { - if miss.start != tc.expectedMiss[i].start { - t.Errorf("expected start %d, got %d", tc.expectedMiss[i].start, miss.start) + if miss.Start != tc.expectedMiss[i].Start { + t.Errorf("expected start %d, got %d", tc.expectedMiss[i].Start, miss.Start) } - if miss.end != tc.expectedMiss[i].end { - t.Errorf("expected end %d, got %d", tc.expectedMiss[i].end, miss.end) + if miss.End != tc.expectedMiss[i].End { + t.Errorf("expected end %d, got %d", tc.expectedMiss[i].End, miss.End) } } }) @@ -226,7 +270,7 @@ func TestV2FindMissingTimeRangesWithFluxInterval(t *testing.T) { requestedStep int64 cachedSeries []*v3.Series fluxInterval time.Duration - expectedMiss []missInterval + expectedMiss []querycache.MissInterval }{ { name: "cached time range is a subset of the requested time range", @@ -251,14 +295,14 @@ func TestV2FindMissingTimeRangesWithFluxInterval(t *testing.T) { }, }, fluxInterval: 5 * time.Minute, - expectedMiss: []missInterval{ + expectedMiss: []querycache.MissInterval{ { - start: 1675115596722, - end: 1675115596722 + 60*60*1000 - 1, + Start: 1675115596722, + End: 1675115596722 + 60*60*1000, }, { - start: 1675115596722 + 120*60*1000 + 1, - end: 1675115596722 + 180*60*1000, + Start: 1675115596722 + 120*60*1000, + End: 1675115596722 + 180*60*1000, }, }, }, @@ -293,7 +337,7 @@ func TestV2FindMissingTimeRangesWithFluxInterval(t *testing.T) { }, }, fluxInterval: 5 * time.Minute, - expectedMiss: []missInterval{}, + expectedMiss: []querycache.MissInterval{}, }, { name: "cache time range is a left overlap of the requested time range", @@ -322,10 +366,10 @@ func TestV2FindMissingTimeRangesWithFluxInterval(t *testing.T) { }, }, fluxInterval: 5 * time.Minute, - expectedMiss: []missInterval{ + expectedMiss: []querycache.MissInterval{ { - start: 1675115596722 + 120*60*1000 + 1, - end: 1675115596722 + 180*60*1000, + Start: 1675115596722 + 120*60*1000, + End: 1675115596722 + 180*60*1000, }, }, }, @@ -356,10 +400,10 @@ func TestV2FindMissingTimeRangesWithFluxInterval(t *testing.T) { }, }, fluxInterval: 5 * time.Minute, - expectedMiss: []missInterval{ + expectedMiss: []querycache.MissInterval{ { - start: 1675115596722, - end: 1675115596722 + 60*60*1000 - 1, + Start: 1675115596722, + End: 1675115596722 + 60*60*1000, }, }, }, @@ -390,27 +434,47 @@ func TestV2FindMissingTimeRangesWithFluxInterval(t *testing.T) { }, }, fluxInterval: 5 * time.Minute, - expectedMiss: []missInterval{ + expectedMiss: []querycache.MissInterval{ { - start: 1675115596722, - end: 1675115596722 + 180*60*1000, + Start: 1675115596722, + End: 1675115596722 + 180*60*1000, }, }, }, } - for _, tc := range testCases { + c := inmemory.New(&inmemory.Options{TTL: 5 * time.Minute, CleanupInterval: 10 * time.Minute}) + + qc := querycache.NewQueryCache(querycache.WithCache(c)) + + for idx, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - misses, _ := findMissingTimeRanges(tc.requestedStart, tc.requestedEnd, tc.requestedStep, tc.cachedSeries, tc.fluxInterval) + cacheKey := fmt.Sprintf("test-cache-key-%d", idx) + cachedData := &querycache.CachedSeriesData{ + Start: minTimestamp(tc.cachedSeries), + End: maxTimestamp(tc.cachedSeries), + Data: tc.cachedSeries, + } + jsonData, err := json.Marshal([]*querycache.CachedSeriesData{cachedData}) + if err != nil { + t.Errorf("error marshalling cached data: %v", err) + return + } + err = c.Store(cacheKey, jsonData, 5*time.Minute) + if err != nil { + t.Errorf("error storing cached data: %v", err) + return + } + misses := qc.FindMissingTimeRanges(tc.requestedStart, tc.requestedEnd, tc.requestedStep, cacheKey) if len(misses) != len(tc.expectedMiss) { t.Errorf("expected %d misses, got %d", len(tc.expectedMiss), len(misses)) } for i, miss := range misses { - if miss.start != tc.expectedMiss[i].start { - t.Errorf("expected start %d, got %d", tc.expectedMiss[i].start, miss.start) + if miss.Start != tc.expectedMiss[i].Start { + t.Errorf("expected start %d, got %d", tc.expectedMiss[i].Start, miss.Start) } - if miss.end != tc.expectedMiss[i].end { - t.Errorf("expected end %d, got %d", tc.expectedMiss[i].end, miss.end) + if miss.End != tc.expectedMiss[i].End { + t.Errorf("expected end %d, got %d", tc.expectedMiss[i].End, miss.End) } } }) @@ -1074,18 +1138,18 @@ func TestV2QueryRangeValueTypePromQL(t *testing.T) { expectedQueryAndTimeRanges := []struct { query string - ranges []missInterval + ranges []querycache.MissInterval }{ { query: "signoz_calls_total", - ranges: []missInterval{ - {start: 1675115596722, end: 1675115596722 + 120*60*1000}, + ranges: []querycache.MissInterval{ + {Start: 1675115596722, End: 1675115596722 + 120*60*1000}, }, }, { query: "signoz_latency_bucket", - ranges: []missInterval{ - {start: 1675115596722 + 60*60*1000, end: 1675115596722 + 180*60*1000}, + ranges: []querycache.MissInterval{ + {Start: 1675115596722 + 60*60*1000, End: 1675115596722 + 180*60*1000}, }, }, } @@ -1106,10 +1170,10 @@ func TestV2QueryRangeValueTypePromQL(t *testing.T) { if len(q.TimeRanges()[i]) != 2 { t.Errorf("expected time ranges to be %v, got %v", expectedQueryAndTimeRanges[i].ranges, q.TimeRanges()[i]) } - if q.TimeRanges()[i][0] != int(expectedQueryAndTimeRanges[i].ranges[0].start) { + if q.TimeRanges()[i][0] != int(expectedQueryAndTimeRanges[i].ranges[0].Start) { t.Errorf("expected time ranges to be %v, got %v", expectedQueryAndTimeRanges[i].ranges, q.TimeRanges()[i]) } - if q.TimeRanges()[i][1] != int(expectedQueryAndTimeRanges[i].ranges[0].end) { + if q.TimeRanges()[i][1] != int(expectedQueryAndTimeRanges[i].ranges[0].End) { t.Errorf("expected time ranges to be %v, got %v", expectedQueryAndTimeRanges[i].ranges, q.TimeRanges()[i]) } } diff --git a/pkg/query-service/common/query_range.go b/pkg/query-service/common/query_range.go index c352c7d9f2..d6b62baf27 100644 --- a/pkg/query-service/common/query_range.go +++ b/pkg/query-service/common/query_range.go @@ -6,6 +6,8 @@ import ( "go.signoz.io/signoz/pkg/query-service/constants" v3 "go.signoz.io/signoz/pkg/query-service/model/v3" + "go.signoz.io/signoz/pkg/query-service/querycache" + "go.signoz.io/signoz/pkg/query-service/utils/labels" ) func AdjustedMetricTimeRange(start, end, step int64, mq v3.BuilderQuery) (int64, int64) { @@ -70,3 +72,35 @@ func LCMList(nums []int64) int64 { } return result } + +func GetSeriesFromCachedData(data []querycache.CachedSeriesData, start, end int64) []*v3.Series { + series := make(map[uint64]*v3.Series) + + for _, cachedData := range data { + for _, data := range cachedData.Data { + h := labels.FromMap(data.Labels).Hash() + + if _, ok := series[h]; !ok { + series[h] = &v3.Series{ + Labels: data.Labels, + LabelsArray: data.LabelsArray, + Points: make([]v3.Point, 0), + } + } + + for _, point := range data.Points { + if point.Timestamp >= start && point.Timestamp <= end { + series[h].Points = append(series[h].Points, point) + } + } + } + } + + newSeries := make([]*v3.Series, 0, len(series)) + for _, s := range series { + s.SortPoints() + s.RemoveDuplicatePoints() + newSeries = append(newSeries, s) + } + return newSeries +} diff --git a/pkg/query-service/interfaces/interface.go b/pkg/query-service/interfaces/interface.go index db2563edab..8e651e17ea 100644 --- a/pkg/query-service/interfaces/interface.go +++ b/pkg/query-service/interfaces/interface.go @@ -10,6 +10,7 @@ import ( "github.com/prometheus/prometheus/util/stats" "go.signoz.io/signoz/pkg/query-service/model" v3 "go.signoz.io/signoz/pkg/query-service/model/v3" + "go.signoz.io/signoz/pkg/query-service/querycache" ) type Reader interface { @@ -121,3 +122,8 @@ type Querier interface { QueriesExecuted() []string TimeRanges() [][]int } + +type QueryCache interface { + FindMissingTimeRanges(start, end int64, step int64, cacheKey string) []querycache.MissInterval + MergeWithCachedSeriesData(cacheKey string, newData []querycache.CachedSeriesData) []querycache.CachedSeriesData +} diff --git a/pkg/query-service/querycache/query_range_cache.go b/pkg/query-service/querycache/query_range_cache.go new file mode 100644 index 0000000000..3b3e3be93c --- /dev/null +++ b/pkg/query-service/querycache/query_range_cache.go @@ -0,0 +1,225 @@ +package querycache + +import ( + "encoding/json" + "math" + "sort" + "time" + + "go.signoz.io/signoz/pkg/query-service/cache" + v3 "go.signoz.io/signoz/pkg/query-service/model/v3" + "go.signoz.io/signoz/pkg/query-service/utils/labels" + "go.uber.org/zap" +) + +type queryCache struct { + cache cache.Cache + fluxInterval time.Duration +} + +type MissInterval struct { + Start, End int64 // in milliseconds +} + +type CachedSeriesData struct { + Start int64 `json:"start"` + End int64 `json:"end"` + Data []*v3.Series `json:"data"` +} + +type QueryCacheOption func(q *queryCache) + +func NewQueryCache(opts ...QueryCacheOption) *queryCache { + q := &queryCache{} + for _, opt := range opts { + opt(q) + } + return q +} + +func WithCache(cache cache.Cache) QueryCacheOption { + return func(q *queryCache) { + q.cache = cache + } +} + +func WithFluxInterval(fluxInterval time.Duration) QueryCacheOption { + return func(q *queryCache) { + q.fluxInterval = fluxInterval + } +} + +func (q *queryCache) FindMissingTimeRanges(start, end, step int64, cacheKey string) []MissInterval { + if q.cache == nil || cacheKey == "" { + return []MissInterval{{Start: start, End: end}} + } + + cachedSeriesDataList := q.getCachedSeriesData(cacheKey) + + // Sort the cached data by start time + sort.Slice(cachedSeriesDataList, func(i, j int) bool { + return cachedSeriesDataList[i].Start < cachedSeriesDataList[j].Start + }) + + zap.L().Info("Number of non-overlapping cached series data", zap.Int("count", len(cachedSeriesDataList))) + + // Exclude the flux interval from the cached end time + + // Why do we use `time.Now()` here? + // When querying for a range [start, now()) + // we don't want to use the cached data inside the flux interval period + // because the data in the flux interval period might not be fully ingested + // and should not be used for caching. + // This is not an issue if the end time is before now() - fluxInterval + endMillis := time.Now().UnixMilli() + adjustStep := int64(math.Min(float64(step), 60)) + roundedMillis := endMillis - (endMillis % (adjustStep * 1000)) + + if len(cachedSeriesDataList) > 0 { + lastCachedData := cachedSeriesDataList[len(cachedSeriesDataList)-1] + lastCachedData.End = int64( + math.Min( + float64(lastCachedData.End), + float64(roundedMillis-q.fluxInterval.Milliseconds()), + ), + ) + } + + var missingRanges []MissInterval + currentTime := start + + for _, data := range cachedSeriesDataList { + // Ignore cached data that ends before the start time + if data.End <= start { + continue + } + // Stop processing if we've reached the end time + if data.Start >= end { + break + } + + // Add missing range if there's a gap + if currentTime < data.Start { + missingRanges = append(missingRanges, MissInterval{Start: currentTime, End: min(data.Start, end)}) + } + + // Update currentTime, but don't go past the end time + currentTime = max(currentTime, min(data.End, end)) + } + + // Add final missing range if necessary + if currentTime < end { + missingRanges = append(missingRanges, MissInterval{Start: currentTime, End: end}) + } + + return missingRanges +} + +func (q *queryCache) getCachedSeriesData(cacheKey string) []*CachedSeriesData { + cachedData, _, _ := q.cache.Retrieve(cacheKey, true) + var cachedSeriesDataList []*CachedSeriesData + if err := json.Unmarshal(cachedData, &cachedSeriesDataList); err != nil { + return nil + } + return cachedSeriesDataList +} + +func (q *queryCache) mergeSeries(cachedSeries, missedSeries []*v3.Series) []*v3.Series { + // Merge the missed series with the cached series by timestamp + mergedSeries := make([]*v3.Series, 0) + seriesesByLabels := make(map[uint64]*v3.Series) + for idx := range cachedSeries { + series := cachedSeries[idx] + seriesesByLabels[labels.FromMap(series.Labels).Hash()] = series + } + + for idx := range missedSeries { + series := missedSeries[idx] + h := labels.FromMap(series.Labels).Hash() + if _, ok := seriesesByLabels[h]; !ok { + seriesesByLabels[h] = series + continue + } + seriesesByLabels[h].Points = append(seriesesByLabels[h].Points, series.Points...) + } + // Sort the points in each series by timestamp + for idx := range seriesesByLabels { + series := seriesesByLabels[idx] + series.SortPoints() + series.RemoveDuplicatePoints() + mergedSeries = append(mergedSeries, series) + } + return mergedSeries +} + +func (q *queryCache) storeMergedData(cacheKey string, mergedData []CachedSeriesData) { + mergedDataJSON, err := json.Marshal(mergedData) + if err != nil { + zap.L().Error("error marshalling merged data", zap.Error(err)) + return + } + err = q.cache.Store(cacheKey, mergedDataJSON, 0) + if err != nil { + zap.L().Error("error storing merged data", zap.Error(err)) + } +} + +func (q *queryCache) MergeWithCachedSeriesData(cacheKey string, newData []CachedSeriesData) []CachedSeriesData { + + if q.cache == nil { + return newData + } + + cachedData, _, _ := q.cache.Retrieve(cacheKey, true) + var existingData []CachedSeriesData + if err := json.Unmarshal(cachedData, &existingData); err != nil { + // In case of error, we return the entire range as a miss + q.storeMergedData(cacheKey, newData) + return newData + } + + allData := append(existingData, newData...) + + sort.Slice(allData, func(i, j int) bool { + return allData[i].Start < allData[j].Start + }) + + var mergedData []CachedSeriesData + var current *CachedSeriesData + + for _, data := range allData { + if current == nil { + current = &CachedSeriesData{ + Start: data.Start, + End: data.End, + Data: data.Data, + } + continue + } + if data.Start <= current.End { + // Overlapping intervals, merge them + current.End = max(current.End, data.End) + current.Start = min(current.Start, data.Start) + // Merge the Data fields + current.Data = q.mergeSeries(current.Data, data.Data) + } else { + // No overlap, add current to mergedData + mergedData = append(mergedData, *current) + // Start new current + current = &CachedSeriesData{ + Start: data.Start, + End: data.End, + Data: data.Data, + } + } + } + + // After the loop, add the last current + if current != nil { + mergedData = append(mergedData, *current) + } + + q.storeMergedData(cacheKey, mergedData) + + return mergedData +} diff --git a/pkg/query-service/querycache/query_range_cache_test.go b/pkg/query-service/querycache/query_range_cache_test.go new file mode 100644 index 0000000000..c71ba13f10 --- /dev/null +++ b/pkg/query-service/querycache/query_range_cache_test.go @@ -0,0 +1,336 @@ +package querycache_test + +import ( + "encoding/json" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "go.signoz.io/signoz/pkg/query-service/cache/inmemory" + v3 "go.signoz.io/signoz/pkg/query-service/model/v3" + "go.signoz.io/signoz/pkg/query-service/querycache" +) + +func TestFindMissingTimeRanges(t *testing.T) { + // Initialize the mock cache + mockCache := inmemory.New(&inmemory.Options{TTL: 5 * time.Minute, CleanupInterval: 10 * time.Minute}) + + // Create a queryCache instance with the mock cache and a fluxInterval + q := querycache.NewQueryCache( + querycache.WithCache(mockCache), + querycache.WithFluxInterval(0), // Set to zero for testing purposes + ) + + // Define the test cases + testCases := []struct { + name string + requestedStart int64 // in milliseconds + requestedEnd int64 // in milliseconds + step int64 // in seconds + cacheKey string + cachedData []querycache.CachedSeriesData + expectedMiss []querycache.MissInterval + }{ + { + name: "Cached time range is a subset of the requested time range", + requestedStart: 1000, + requestedEnd: 5000, + step: 60, + cacheKey: "testKey1", + cachedData: []querycache.CachedSeriesData{ + { + Start: 2000, + End: 3000, + Data: []*v3.Series{}, // Data can be empty for this test + }, + }, + expectedMiss: []querycache.MissInterval{ + {Start: 1000, End: 2000}, + {Start: 3000, End: 5000}, + }, + }, + { + name: "Cached time range is a superset of the requested time range", + requestedStart: 2000, + requestedEnd: 3000, + step: 60, + cacheKey: "testKey2", + cachedData: []querycache.CachedSeriesData{ + { + Start: 1000, + End: 4000, + Data: []*v3.Series{}, + }, + }, + expectedMiss: nil, // No missing intervals + }, + { + name: "Cached time range is a left overlap of the requested time range", + requestedStart: 2000, + requestedEnd: 4000, + step: 60, + cacheKey: "testKey3", + cachedData: []querycache.CachedSeriesData{ + { + Start: 1000, + End: 2500, + Data: []*v3.Series{}, + }, + }, + expectedMiss: []querycache.MissInterval{ + {Start: 2500, End: 4000}, + }, + }, + { + name: "Cached time range is a right overlap of the requested time range", + requestedStart: 2000, + requestedEnd: 4000, + step: 60, + cacheKey: "testKey4", + cachedData: []querycache.CachedSeriesData{ + { + Start: 3500, + End: 5000, + Data: []*v3.Series{}, + }, + }, + expectedMiss: []querycache.MissInterval{ + {Start: 2000, End: 3500}, + }, + }, + { + name: "Cached time range is disjoint from the requested time range", + requestedStart: 2000, + requestedEnd: 4000, + step: 60, + cacheKey: "testKey5", + cachedData: []querycache.CachedSeriesData{ + { + Start: 5000, + End: 6000, + Data: []*v3.Series{}, + }, + }, + expectedMiss: []querycache.MissInterval{ + {Start: 2000, End: 4000}, + }, + }, + // Additional test cases for non-overlapping cached data + { + name: "Multiple non-overlapping cached intervals within requested range", + requestedStart: 1000, + requestedEnd: 5000, + step: 60, + cacheKey: "testKey6", + cachedData: []querycache.CachedSeriesData{ + {Start: 1100, End: 1200, Data: []*v3.Series{}}, + {Start: 1300, End: 1400, Data: []*v3.Series{}}, + {Start: 1500, End: 1600, Data: []*v3.Series{}}, + }, + expectedMiss: []querycache.MissInterval{ + {Start: 1000, End: 1100}, + {Start: 1200, End: 1300}, + {Start: 1400, End: 1500}, + {Start: 1600, End: 5000}, + }, + }, + { + name: "Cached intervals covering some parts with gaps", + requestedStart: 1000, + requestedEnd: 2000, + step: 60, + cacheKey: "testKey7", + cachedData: []querycache.CachedSeriesData{ + {Start: 1000, End: 1100, Data: []*v3.Series{}}, + {Start: 1200, End: 1300, Data: []*v3.Series{}}, + {Start: 1400, End: 1500, Data: []*v3.Series{}}, + {Start: 1600, End: 1700, Data: []*v3.Series{}}, + }, + expectedMiss: []querycache.MissInterval{ + {Start: 1100, End: 1200}, + {Start: 1300, End: 1400}, + {Start: 1500, End: 1600}, + {Start: 1700, End: 2000}, + }, + }, + { + name: "Non-overlapping cached intervals outside requested range", + requestedStart: 2000, + requestedEnd: 3000, + step: 60, + cacheKey: "testKey8", + cachedData: []querycache.CachedSeriesData{ + {Start: 1000, End: 1500, Data: []*v3.Series{}}, + {Start: 3500, End: 4000, Data: []*v3.Series{}}, + }, + expectedMiss: []querycache.MissInterval{ + {Start: 2000, End: 3000}, + }, + }, + { + name: "No cached data at all", + requestedStart: 1000, + requestedEnd: 2000, + step: 60, + cacheKey: "testKey10", + cachedData: nil, + expectedMiss: []querycache.MissInterval{ + {Start: 1000, End: 2000}, + }, + }, + { + name: "Cached intervals with overlapping and non-overlapping mix", + requestedStart: 1000, + requestedEnd: 5000, + step: 60, + cacheKey: "testKey11", + cachedData: []querycache.CachedSeriesData{ + {Start: 1000, End: 2000, Data: []*v3.Series{}}, + {Start: 1500, End: 2500, Data: []*v3.Series{}}, // Overlaps with previous + {Start: 3000, End: 3500, Data: []*v3.Series{}}, + {Start: 4000, End: 4500, Data: []*v3.Series{}}, + }, + expectedMiss: []querycache.MissInterval{ + {Start: 2500, End: 3000}, + {Start: 3500, End: 4000}, + {Start: 4500, End: 5000}, + }, + }, + { + name: "Cached intervals covering the edges but missing middle", + requestedStart: 1000, + requestedEnd: 5000, + step: 60, + cacheKey: "testKey12", + cachedData: []querycache.CachedSeriesData{ + {Start: 1000, End: 1500, Data: []*v3.Series{}}, + {Start: 4500, End: 5000, Data: []*v3.Series{}}, + }, + expectedMiss: []querycache.MissInterval{ + {Start: 1500, End: 4500}, + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + + // Store the cached data in the mock cache + if len(tc.cachedData) > 0 { + cachedDataJSON, err := json.Marshal(tc.cachedData) + assert.NoError(t, err) + err = mockCache.Store(tc.cacheKey, cachedDataJSON, 0) + assert.NoError(t, err) + } + + // Call FindMissingTimeRanges + missingRanges := q.FindMissingTimeRanges(tc.requestedStart, tc.requestedEnd, tc.step, tc.cacheKey) + + // Verify the missing ranges + assert.Equal(t, tc.expectedMiss, missingRanges) + }) + } +} + +func TestMergeWithCachedSeriesData(t *testing.T) { + // Initialize the mock cache + mockCache := inmemory.New(&inmemory.Options{TTL: 5 * time.Minute, CleanupInterval: 10 * time.Minute}) + + // Create a queryCache instance with the mock cache and a fluxInterval + q := querycache.NewQueryCache( + querycache.WithCache(mockCache), + querycache.WithFluxInterval(0), // Set to zero for testing purposes + ) + + // Define test data + cacheKey := "mergeTestKey" + + // Existing cached data + existingData := []querycache.CachedSeriesData{ + { + Start: 1000, + End: 2000, + Data: []*v3.Series{ + { + Labels: map[string]string{"metric": "cpu", "instance": "localhost"}, + Points: []v3.Point{ + {Timestamp: 1500, Value: 0.5}, + }, + }, + }, + }, + } + + // New data to merge + newData := []querycache.CachedSeriesData{ + { + Start: 1500, + End: 2500, + Data: []*v3.Series{ + { + Labels: map[string]string{"metric": "cpu", "instance": "localhost"}, + Points: []v3.Point{ + {Timestamp: 1750, Value: 0.6}, + }, + }, + { + Labels: map[string]string{"metric": "memory", "instance": "localhost"}, + Points: []v3.Point{ + {Timestamp: 1800, Value: 0.7}, + }, + }, + }, + }, + } + + // Expected merged data + expectedMergedData := []querycache.CachedSeriesData{ + { + Start: 1000, + End: 2500, + Data: []*v3.Series{ + { + Labels: map[string]string{"metric": "cpu", "instance": "localhost"}, + Points: []v3.Point{ + {Timestamp: 1500, Value: 0.5}, + {Timestamp: 1750, Value: 0.6}, + }, + }, + { + Labels: map[string]string{"metric": "memory", "instance": "localhost"}, + Points: []v3.Point{ + {Timestamp: 1800, Value: 0.7}, + }, + }, + }, + }, + } + + // Store existing data in cache + cachedDataJSON, err := json.Marshal(existingData) + assert.NoError(t, err) + err = mockCache.Store(cacheKey, cachedDataJSON, 0) + assert.NoError(t, err) + + // Call MergeWithCachedSeriesData + mergedData := q.MergeWithCachedSeriesData(cacheKey, newData) + + // Verify the merged data + assert.Equal(t, len(expectedMergedData), len(mergedData)) + for i, expected := range expectedMergedData { + actual := mergedData[i] + assert.Equal(t, expected.Start, actual.Start) + assert.Equal(t, expected.End, actual.End) + assert.Equal(t, len(expected.Data), len(actual.Data)) + for j, expectedSeries := range expected.Data { + actualSeries := actual.Data[j] + assert.Equal(t, expectedSeries.Labels, actualSeries.Labels) + assert.Equal(t, len(expectedSeries.Points), len(actualSeries.Points)) + for k, expectedPoint := range expectedSeries.Points { + actualPoint := actualSeries.Points[k] + assert.Equal(t, expectedPoint.Timestamp, actualPoint.Timestamp) + assert.Equal(t, expectedPoint.Value, actualPoint.Value) + } + } + } +} From f3c01a51556bf9d9e3b5583e0135d10ec7782cac Mon Sep 17 00:00:00 2001 From: Vikrant Gupta Date: Fri, 20 Sep 2024 23:37:15 +0530 Subject: [PATCH 56/79] fix: export as csv for logs and traces table panel type (#6047) * fix: export as csv for logs and traces panel type * chore: remove console logs --- frontend/src/container/GridTableComponent/index.tsx | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/frontend/src/container/GridTableComponent/index.tsx b/frontend/src/container/GridTableComponent/index.tsx index 676a745b65..fbd3892c48 100644 --- a/frontend/src/container/GridTableComponent/index.tsx +++ b/frontend/src/container/GridTableComponent/index.tsx @@ -4,7 +4,7 @@ import { getYAxisFormattedValue } from 'components/Graph/yAxisConfig'; import { Events } from 'constants/events'; import { QueryTable } from 'container/QueryTable'; import { RowData } from 'lib/query/createTableColumnsFromQuery'; -import { cloneDeep, get, isEmpty, set } from 'lodash-es'; +import { cloneDeep, get, isEmpty } from 'lodash-es'; import { memo, ReactNode, useCallback, useEffect, useMemo } from 'react'; import { useTranslation } from 'react-i18next'; import { eventEmitter } from 'utils/getEventEmitter'; @@ -38,15 +38,13 @@ function GridTableComponent({ const createDataInCorrectFormat = useCallback( (dataSource: RowData[]): RowData[] => dataSource.map((d) => { - const finalObject = {}; + const finalObject: Record = {}; // we use the order of the columns here to have similar download as the user view + // the [] access for the object is used because the titles can contain dot(.) as well columns.forEach((k) => { - set( - finalObject, - get(k, 'title', '') as string, - get(d, get(k, 'dataIndex', ''), 'n/a'), - ); + finalObject[`${get(k, 'title', '')}`] = + d[`${get(k, 'dataIndex', '')}`] || 'n/a'; }); return finalObject as RowData; }), @@ -86,6 +84,7 @@ function GridTableComponent({ applyColumnUnits, originalDataSource, ]); + useEffect(() => { if (tableProcessedDataRef) { // eslint-disable-next-line no-param-reassign From c5b5bfe5406d2dc3c59f50977b76fa4d53d7bc23 Mon Sep 17 00:00:00 2001 From: rahulkeswani101 Date: Fri, 20 Sep 2024 23:56:34 +0530 Subject: [PATCH 57/79] feat: added new tab for infra metrics in logs detailed page (#5771) * feat: added new tab for infra metrics in logs detailed page * feat: added yaxis unit for the charts * chore: cleanup query_range params * fix: clusterName, podName variables not working * feat: added skeleton for each charts in infra metrics tab * change card height to 300px * fix: updated the test cases * feat: added new sub-tabs node and pod for infra metrics tab * feat: added new components for node and pod metrics * feat: added card titles for host metrics and handled empty state * fix: updated the constant for host name * feat: added vertical dotted line to all panels and updated y axis units for all panels * feat: removed other panel types other than graph from host metrics query payload * fix: updated the query payload for node metrics * feat: moved the label of vertical dotted line to top * feat: added console statement to check query payload * fix: added pod name instead of node name in pod query payload * fix: added key as pod name instead of node name in file system usage * fix: updated query payload for file system usage in pod metrics and removed label from dotted line * fix: updated the y axis units for network io * fix: custom date time issue while plotting the graph * feat: compare end time and current time update the end time accordingly * feat: added the start and end time in query payloads * refactor: removed the comments and unused variables * chore: added a todo to make common component for sub-tabs * fix: addressed review comments --------- Co-authored-by: Ankit Nayan --- .../tests/DraggableTableRow.test.tsx | 14 + .../src/components/LogDetail/constants.ts | 8 + frontend/src/components/LogDetail/index.tsx | 24 +- .../InfraMetrics/InfraMetrics.styles.scss | 34 + .../InfraMetrics/InfraMetrics.tsx | 94 + .../InfraMetrics/NodeMetrics.tsx | 140 + .../InfraMetrics/PodMetrics.tsx | 121 + .../LogDetailedView/InfraMetrics/constants.ts | 3033 +++++++++++++++++ .../tests/LogsExplorerViews.test.tsx | 14 + .../tests/ChangeHistory.test.tsx | 14 + .../tests/AddNewPipeline.test.tsx | 14 + .../tests/AddNewProcessor.test.tsx | 14 + .../PipelinePage/tests/DeleteAction.test.tsx | 14 + .../PipelinePage/tests/DragAction.test.tsx | 14 + .../PipelinePage/tests/EditAction.test.tsx | 14 + .../tests/PipelineActions.test.tsx | 14 + .../tests/PipelineExpandView.test.tsx | 14 + .../tests/PipelineListsView.test.tsx | 14 + .../tests/PipelinePageLayout.test.tsx | 14 + .../PipelinePage/tests/TagInput.test.tsx | 14 + .../PipelinePage/tests/utils.test.ts | 14 + frontend/src/lib/dashboard/getQueryResults.ts | 6 +- .../lib/dashboard/prepareQueryRangePayload.ts | 6 +- .../src/lib/uPlotLib/getUplotChartOptions.ts | 25 + .../__tests__/LogsExplorer.test.tsx | 15 + 25 files changed, 3697 insertions(+), 5 deletions(-) create mode 100644 frontend/src/container/LogDetailedView/InfraMetrics/InfraMetrics.styles.scss create mode 100644 frontend/src/container/LogDetailedView/InfraMetrics/InfraMetrics.tsx create mode 100644 frontend/src/container/LogDetailedView/InfraMetrics/NodeMetrics.tsx create mode 100644 frontend/src/container/LogDetailedView/InfraMetrics/PodMetrics.tsx create mode 100644 frontend/src/container/LogDetailedView/InfraMetrics/constants.ts diff --git a/frontend/src/components/DraggableTableRow/tests/DraggableTableRow.test.tsx b/frontend/src/components/DraggableTableRow/tests/DraggableTableRow.test.tsx index f938a19203..67bbeb56f2 100644 --- a/frontend/src/components/DraggableTableRow/tests/DraggableTableRow.test.tsx +++ b/frontend/src/components/DraggableTableRow/tests/DraggableTableRow.test.tsx @@ -12,6 +12,20 @@ beforeAll(() => { matchMedia(); }); +jest.mock('uplot', () => { + const paths = { + spline: jest.fn(), + bars: jest.fn(), + }; + const uplotMock = jest.fn(() => ({ + paths, + })); + return { + paths, + default: uplotMock, + }; +}); + jest.mock('react-dnd', () => ({ useDrop: jest.fn().mockImplementation(() => [jest.fn(), jest.fn(), jest.fn()]), useDrag: jest.fn().mockImplementation(() => [jest.fn(), jest.fn(), jest.fn()]), diff --git a/frontend/src/components/LogDetail/constants.ts b/frontend/src/components/LogDetail/constants.ts index 92199d4441..dea5121dd1 100644 --- a/frontend/src/components/LogDetail/constants.ts +++ b/frontend/src/components/LogDetail/constants.ts @@ -2,6 +2,14 @@ export const VIEW_TYPES = { OVERVIEW: 'OVERVIEW', JSON: 'JSON', CONTEXT: 'CONTEXT', + INFRAMETRICS: 'INFRAMETRICS', } as const; export type VIEWS = typeof VIEW_TYPES[keyof typeof VIEW_TYPES]; + +export const RESOURCE_KEYS = { + CLUSTER_NAME: 'k8s.cluster.name', + POD_NAME: 'k8s.pod.name', + NODE_NAME: 'k8s.node.name', + HOST_NAME: 'host.name', +} as const; diff --git a/frontend/src/components/LogDetail/index.tsx b/frontend/src/components/LogDetail/index.tsx index b138718ed9..4748312ceb 100644 --- a/frontend/src/components/LogDetail/index.tsx +++ b/frontend/src/components/LogDetail/index.tsx @@ -9,6 +9,7 @@ import cx from 'classnames'; import { LogType } from 'components/Logs/LogStateIndicator/LogStateIndicator'; import { LOCALSTORAGE } from 'constants/localStorage'; import ContextView from 'container/LogDetailedView/ContextView/ContextView'; +import InfraMetrics from 'container/LogDetailedView/InfraMetrics/InfraMetrics'; import JSONView from 'container/LogDetailedView/JsonView'; import Overview from 'container/LogDetailedView/Overview'; import { @@ -22,6 +23,7 @@ import { useQueryBuilder } from 'hooks/queryBuilder/useQueryBuilder'; import { useIsDarkMode } from 'hooks/useDarkMode'; import { useNotifications } from 'hooks/useNotifications'; import { + BarChart2, Braces, Copy, Filter, @@ -36,7 +38,7 @@ import { Query, TagFilter } from 'types/api/queryBuilder/queryBuilderData'; import { DataSource, StringOperators } from 'types/common/queryBuilder'; import { FORBID_DOM_PURIFY_TAGS } from 'utils/app'; -import { VIEW_TYPES, VIEWS } from './constants'; +import { RESOURCE_KEYS, VIEW_TYPES, VIEWS } from './constants'; import { LogDetailProps } from './LogDetail.interfaces'; import QueryBuilderSearchWrapper from './QueryBuilderSearchWrapper'; @@ -192,6 +194,17 @@ function LogDetail({ Context
+ +
+ + Metrics +
+
{selectedView === VIEW_TYPES.JSON && ( @@ -246,6 +259,15 @@ function LogDetail({ isEdit={isEdit} /> )} + {selectedView === VIEW_TYPES.INFRAMETRICS && ( + + )} ); } diff --git a/frontend/src/container/LogDetailedView/InfraMetrics/InfraMetrics.styles.scss b/frontend/src/container/LogDetailedView/InfraMetrics/InfraMetrics.styles.scss new file mode 100644 index 0000000000..9e49bcba94 --- /dev/null +++ b/frontend/src/container/LogDetailedView/InfraMetrics/InfraMetrics.styles.scss @@ -0,0 +1,34 @@ +.empty-container { + display: flex; + justify-content: center; + align-items: center; + height: 100%; +} + +.infra-metrics-container { + .views-tabs { + margin-bottom: 1rem; + } +} + +.infra-metrics-card { + margin: 1rem 0; + height: 300px; + padding: 10px; + + .ant-card-body { + padding: 0; + } + + .chart-container { + width: 100%; + height: 100%; + } + + .no-data-container { + position: absolute; + top: 50%; + left: 50%; + transform: translate(-50%, -50%); + } +} diff --git a/frontend/src/container/LogDetailedView/InfraMetrics/InfraMetrics.tsx b/frontend/src/container/LogDetailedView/InfraMetrics/InfraMetrics.tsx new file mode 100644 index 0000000000..78a1d21b16 --- /dev/null +++ b/frontend/src/container/LogDetailedView/InfraMetrics/InfraMetrics.tsx @@ -0,0 +1,94 @@ +import './InfraMetrics.styles.scss'; + +import { Empty, Radio } from 'antd'; +import { RadioChangeEvent } from 'antd/lib'; +import { History, Table } from 'lucide-react'; +import { useState } from 'react'; + +import { VIEW_TYPES } from './constants'; +import NodeMetrics from './NodeMetrics'; +import PodMetrics from './PodMetrics'; + +interface MetricsDataProps { + podName: string; + nodeName: string; + hostName: string; + clusterName: string; + logLineTimestamp: string; +} + +function InfraMetrics({ + podName, + nodeName, + hostName, + clusterName, + logLineTimestamp, +}: MetricsDataProps): JSX.Element { + const [selectedView, setSelectedView] = useState(() => + podName ? VIEW_TYPES.POD : VIEW_TYPES.NODE, + ); + + const handleModeChange = (e: RadioChangeEvent): void => { + setSelectedView(e.target.value); + }; + + if (!podName && !nodeName && !hostName) { + return ( +
+ +
+ ); + } + + return ( +
+ + +
+ + Node + + + {podName && ( + +
+ + Pod +
+
+ )} + + {/* TODO(Rahul): Make a common config driven component for this and other infra metrics components */} + {selectedView === VIEW_TYPES.NODE && ( + + )} + {selectedView === VIEW_TYPES.POD && podName && ( + + )} + + ); +} + +export default InfraMetrics; diff --git a/frontend/src/container/LogDetailedView/InfraMetrics/NodeMetrics.tsx b/frontend/src/container/LogDetailedView/InfraMetrics/NodeMetrics.tsx new file mode 100644 index 0000000000..3c935c8b89 --- /dev/null +++ b/frontend/src/container/LogDetailedView/InfraMetrics/NodeMetrics.tsx @@ -0,0 +1,140 @@ +import { Card, Col, Row, Skeleton, Typography } from 'antd'; +import cx from 'classnames'; +import Uplot from 'components/Uplot'; +import { ENTITY_VERSION_V4 } from 'constants/app'; +import dayjs from 'dayjs'; +import { useIsDarkMode } from 'hooks/useDarkMode'; +import { useResizeObserver } from 'hooks/useDimensions'; +import { GetMetricQueryRange } from 'lib/dashboard/getQueryResults'; +import { getUPlotChartOptions } from 'lib/uPlotLib/getUplotChartOptions'; +import { getUPlotChartData } from 'lib/uPlotLib/utils/getUplotChartData'; +import { useMemo, useRef } from 'react'; +import { useQueries, UseQueryResult } from 'react-query'; +import { SuccessResponse } from 'types/api'; +import { MetricRangePayloadProps } from 'types/api/metrics/getQueryRange'; + +import { + getHostQueryPayload, + getNodeQueryPayload, + hostWidgetInfo, + nodeWidgetInfo, +} from './constants'; + +function NodeMetrics({ + nodeName, + clusterName, + hostName, + logLineTimestamp, +}: { + nodeName: string; + clusterName: string; + hostName: string; + logLineTimestamp: string; +}): JSX.Element { + const { start, end, verticalLineTimestamp } = useMemo(() => { + const logTimestamp = dayjs(logLineTimestamp); + const now = dayjs(); + const startTime = logTimestamp.subtract(3, 'hour'); + + const endTime = logTimestamp.add(3, 'hour').isBefore(now) + ? logTimestamp.add(3, 'hour') + : now; + + return { + start: startTime.unix(), + end: endTime.unix(), + verticalLineTimestamp: logTimestamp.unix(), + }; + }, [logLineTimestamp]); + + const queryPayloads = useMemo(() => { + if (nodeName) { + return getNodeQueryPayload(clusterName, nodeName, start, end); + } + return getHostQueryPayload(hostName, start, end); + }, [nodeName, hostName, clusterName, start, end]); + + const widgetInfo = nodeName ? nodeWidgetInfo : hostWidgetInfo; + const queries = useQueries( + queryPayloads.map((payload) => ({ + queryKey: ['metrics', payload, ENTITY_VERSION_V4, 'NODE'], + queryFn: (): Promise> => + GetMetricQueryRange(payload, ENTITY_VERSION_V4), + enabled: !!payload, + })), + ); + + const isDarkMode = useIsDarkMode(); + const graphRef = useRef(null); + const dimensions = useResizeObserver(graphRef); + + const chartData = useMemo( + () => queries.map(({ data }) => getUPlotChartData(data?.payload)), + [queries], + ); + + const options = useMemo( + () => + queries.map(({ data }, idx) => + getUPlotChartOptions({ + apiResponse: data?.payload, + isDarkMode, + dimensions, + yAxisUnit: widgetInfo[idx].yAxisUnit, + softMax: null, + softMin: null, + minTimeScale: start, + maxTimeScale: end, + verticalLineTimestamp, + }), + ), + [ + queries, + isDarkMode, + dimensions, + widgetInfo, + start, + verticalLineTimestamp, + end, + ], + ); + + const renderCardContent = ( + query: UseQueryResult, unknown>, + idx: number, + ): JSX.Element => { + if (query.isLoading) { + return ; + } + + if (query.error) { + const errorMessage = + (query.error as Error)?.message || 'Something went wrong'; + return
{errorMessage}
; + } + return ( +
+ +
+ ); + }; + return ( + + {queries.map((query, idx) => ( +
+ {widgetInfo[idx].title} + + {renderCardContent(query, idx)} + + + ))} + + ); +} + +export default NodeMetrics; diff --git a/frontend/src/container/LogDetailedView/InfraMetrics/PodMetrics.tsx b/frontend/src/container/LogDetailedView/InfraMetrics/PodMetrics.tsx new file mode 100644 index 0000000000..99391d65e0 --- /dev/null +++ b/frontend/src/container/LogDetailedView/InfraMetrics/PodMetrics.tsx @@ -0,0 +1,121 @@ +import { Card, Col, Row, Skeleton, Typography } from 'antd'; +import cx from 'classnames'; +import Uplot from 'components/Uplot'; +import { ENTITY_VERSION_V4 } from 'constants/app'; +import dayjs from 'dayjs'; +import { useIsDarkMode } from 'hooks/useDarkMode'; +import { useResizeObserver } from 'hooks/useDimensions'; +import { GetMetricQueryRange } from 'lib/dashboard/getQueryResults'; +import { getUPlotChartOptions } from 'lib/uPlotLib/getUplotChartOptions'; +import { getUPlotChartData } from 'lib/uPlotLib/utils/getUplotChartData'; +import { useMemo, useRef } from 'react'; +import { useQueries, UseQueryResult } from 'react-query'; +import { SuccessResponse } from 'types/api'; +import { MetricRangePayloadProps } from 'types/api/metrics/getQueryRange'; + +import { getPodQueryPayload, podWidgetInfo } from './constants'; + +function PodMetrics({ + podName, + clusterName, + logLineTimestamp, +}: { + podName: string; + clusterName: string; + logLineTimestamp: string; +}): JSX.Element { + const { start, end, verticalLineTimestamp } = useMemo(() => { + const logTimestamp = dayjs(logLineTimestamp); + const now = dayjs(); + const startTime = logTimestamp.subtract(3, 'hour'); + + const endTime = logTimestamp.add(3, 'hour').isBefore(now) + ? logTimestamp.add(3, 'hour') + : now; + + return { + start: startTime.unix(), + end: endTime.unix(), + verticalLineTimestamp: logTimestamp.unix(), + }; + }, [logLineTimestamp]); + const queryPayloads = useMemo( + () => getPodQueryPayload(clusterName, podName, start, end), + [clusterName, end, podName, start], + ); + const queries = useQueries( + queryPayloads.map((payload) => ({ + queryKey: ['metrics', payload, ENTITY_VERSION_V4, 'POD'], + queryFn: (): Promise> => + GetMetricQueryRange(payload, ENTITY_VERSION_V4), + enabled: !!payload, + })), + ); + + const isDarkMode = useIsDarkMode(); + const graphRef = useRef(null); + const dimensions = useResizeObserver(graphRef); + + const chartData = useMemo( + () => queries.map(({ data }) => getUPlotChartData(data?.payload)), + [queries], + ); + + const options = useMemo( + () => + queries.map(({ data }, idx) => + getUPlotChartOptions({ + apiResponse: data?.payload, + isDarkMode, + dimensions, + yAxisUnit: podWidgetInfo[idx].yAxisUnit, + softMax: null, + softMin: null, + minTimeScale: start, + maxTimeScale: end, + verticalLineTimestamp, + }), + ), + [queries, isDarkMode, dimensions, start, verticalLineTimestamp, end], + ); + + const renderCardContent = ( + query: UseQueryResult, unknown>, + idx: number, + ): JSX.Element => { + if (query.isLoading) { + return ; + } + + if (query.error) { + const errorMessage = + (query.error as Error)?.message || 'Something went wrong'; + return
{errorMessage}
; + } + return ( +
+ +
+ ); + }; + + return ( + + {queries.map((query, idx) => ( +
+ {podWidgetInfo[idx].title} + + {renderCardContent(query, idx)} + + + ))} + + ); +} + +export default PodMetrics; diff --git a/frontend/src/container/LogDetailedView/InfraMetrics/constants.ts b/frontend/src/container/LogDetailedView/InfraMetrics/constants.ts new file mode 100644 index 0000000000..39130e7f56 --- /dev/null +++ b/frontend/src/container/LogDetailedView/InfraMetrics/constants.ts @@ -0,0 +1,3033 @@ +/* eslint-disable sonarjs/no-duplicate-string */ +import { PANEL_TYPES } from 'constants/queryBuilder'; +import { GetQueryResultsProps } from 'lib/dashboard/getQueryResults'; +import { DataTypes } from 'types/api/queryBuilder/queryAutocompleteResponse'; +import { EQueryType } from 'types/common/dashboard'; +import { DataSource } from 'types/common/queryBuilder'; + +export const getPodQueryPayload = ( + clusterName: string, + podName: string, + start: number, + end: number, +): GetQueryResultsProps[] => [ + { + selectedTime: 'GLOBAL_TIME', + graphType: PANEL_TYPES.TIME_SERIES, + query: { + builder: { + queryData: [ + { + aggregateAttribute: { + dataType: DataTypes.Float64, + id: 'container_cpu_utilization--float64--Gauge--true', + isColumn: true, + isJSON: false, + key: 'container_cpu_utilization', + type: 'Gauge', + }, + aggregateOperator: 'avg', + dataSource: DataSource.METRICS, + disabled: false, + expression: 'A', + filters: { + items: [ + { + id: '6e050953', + key: { + dataType: DataTypes.String, + id: 'k8s_cluster_name--string--tag--false', + isColumn: false, + isJSON: false, + key: 'k8s_cluster_name', + type: 'tag', + }, + op: '=', + value: clusterName, + }, + { + id: '60fe5e62', + key: { + dataType: DataTypes.String, + id: 'k8s_pod_name--string--tag--false', + isColumn: false, + isJSON: false, + key: 'k8s_pod_name', + type: 'tag', + }, + op: '=', + value: podName, + }, + ], + op: 'AND', + }, + functions: [], + groupBy: [ + { + dataType: DataTypes.String, + id: 'k8s_pod_name--string--tag--false', + isColumn: false, + isJSON: false, + key: 'k8s_pod_name', + type: 'tag', + }, + ], + having: [], + legend: '{{k8s_pod_name}}', + limit: null, + orderBy: [], + queryName: 'A', + reduceTo: 'avg', + spaceAggregation: 'sum', + stepInterval: 60, + timeAggregation: 'avg', + }, + ], + queryFormulas: [], + }, + clickhouse_sql: [ + { + disabled: false, + legend: '', + name: 'A', + query: '', + }, + ], + id: '9b92756a-b445-45f8-90f4-d26f3ef28f8f', + promql: [ + { + disabled: false, + legend: '', + name: 'A', + query: '', + }, + ], + queryType: EQueryType.QUERY_BUILDER, + }, + variables: {}, + formatForWeb: false, + start, + end, + }, + { + selectedTime: 'GLOBAL_TIME', + graphType: PANEL_TYPES.TIME_SERIES, + query: { + builder: { + queryData: [ + { + aggregateAttribute: { + dataType: DataTypes.Float64, + id: 'container_memory_usage--float64--Gauge--true', + isColumn: true, + isJSON: false, + key: 'container_memory_usage', + type: 'Gauge', + }, + aggregateOperator: 'avg', + dataSource: DataSource.METRICS, + disabled: false, + expression: 'A', + filters: { + items: [ + { + id: 'a4250695', + key: { + dataType: DataTypes.String, + id: 'k8s_cluster_name--string--tag--false', + isColumn: false, + isJSON: false, + key: 'k8s_cluster_name', + type: 'tag', + }, + op: '=', + value: clusterName, + }, + { + id: '3b2bc32b', + key: { + dataType: DataTypes.String, + id: 'k8s_pod_name--string--tag--false', + isColumn: false, + isJSON: false, + key: 'k8s_pod_name', + type: 'tag', + }, + op: '=', + value: podName, + }, + ], + op: 'AND', + }, + functions: [], + groupBy: [ + { + dataType: DataTypes.String, + id: 'k8s_pod_name--string--tag--false', + isColumn: false, + isJSON: false, + key: 'k8s_pod_name', + type: 'tag', + }, + ], + having: [], + legend: '{{k8s_pod_name}}', + limit: null, + orderBy: [], + queryName: 'A', + reduceTo: 'avg', + spaceAggregation: 'sum', + stepInterval: 60, + timeAggregation: 'avg', + }, + ], + queryFormulas: [], + }, + clickhouse_sql: [ + { + disabled: false, + legend: '', + name: 'A', + query: '', + }, + ], + id: 'a22c1e03-4876-4b3e-9a96-a3c3a28f9c0f', + promql: [ + { + disabled: false, + legend: '', + name: 'A', + query: '', + }, + ], + queryType: EQueryType.QUERY_BUILDER, + }, + variables: {}, + formatForWeb: false, + start, + end, + }, + { + selectedTime: 'GLOBAL_TIME', + graphType: PANEL_TYPES.TIME_SERIES, + query: { + builder: { + queryData: [ + { + aggregateAttribute: { + dataType: DataTypes.Float64, + id: 'container_cpu_utilization--float64--Gauge--true', + isColumn: true, + isJSON: false, + key: 'container_cpu_utilization', + type: 'Gauge', + }, + aggregateOperator: 'avg', + dataSource: DataSource.METRICS, + disabled: true, + expression: 'A', + filters: { + items: [ + { + id: '8426b52f', + key: { + dataType: DataTypes.String, + id: 'k8s_cluster_name--string--tag--false', + isColumn: false, + key: 'k8s_cluster_name', + type: 'tag', + }, + op: '=', + value: clusterName, + }, + { + id: '2f67240c', + key: { + dataType: DataTypes.String, + id: 'k8s_pod_name--string--tag--false', + isColumn: false, + key: 'k8s_pod_name', + type: 'tag', + }, + op: '=', + value: podName, + }, + ], + op: 'AND', + }, + functions: [], + groupBy: [ + { + dataType: DataTypes.String, + id: 'k8s_pod_name--string--tag--false', + isColumn: false, + isJSON: false, + key: 'k8s_pod_name', + type: 'tag', + }, + ], + having: [], + legend: '', + limit: null, + orderBy: [], + queryName: 'A', + reduceTo: 'sum', + spaceAggregation: 'sum', + stepInterval: 60, + timeAggregation: 'avg', + }, + { + aggregateAttribute: { + dataType: DataTypes.Float64, + id: 'k8s_container_cpu_request--float64--Gauge--true', + isColumn: true, + isJSON: false, + key: 'k8s_container_cpu_request', + type: 'Gauge', + }, + aggregateOperator: 'latest', + dataSource: DataSource.METRICS, + disabled: true, + expression: 'B', + filters: { + items: [ + { + id: '8c4667e1', + key: { + dataType: DataTypes.String, + id: 'k8s_cluster_name--string--tag--false', + isColumn: false, + isJSON: false, + key: 'k8s_cluster_name', + type: 'tag', + }, + op: '=', + value: clusterName, + }, + { + id: 'b16e7306', + key: { + dataType: DataTypes.String, + id: 'k8s_pod_name--string--tag--false', + isColumn: false, + isJSON: false, + key: 'k8s_pod_name', + type: 'tag', + }, + op: 'in', + value: podName, + }, + ], + op: 'AND', + }, + functions: [], + groupBy: [ + { + dataType: DataTypes.String, + id: 'k8s_pod_name--string--tag--false', + isColumn: false, + isJSON: false, + key: 'k8s_pod_name', + type: 'tag', + }, + ], + having: [], + legend: '', + limit: null, + orderBy: [], + queryName: 'B', + reduceTo: 'avg', + spaceAggregation: 'sum', + stepInterval: 60, + timeAggregation: 'latest', + }, + ], + queryFormulas: [ + { + disabled: false, + expression: 'A*100/B', + legend: '{{k8s_pod_name}}', + queryName: 'F1', + }, + ], + }, + clickhouse_sql: [ + { + disabled: false, + legend: '', + name: 'A', + query: '', + }, + ], + id: '7bb3a6f5-d1c6-4f2e-9cc9-7dcc46db398f', + promql: [ + { + disabled: false, + legend: '', + name: 'A', + query: '', + }, + ], + queryType: EQueryType.QUERY_BUILDER, + }, + variables: {}, + formatForWeb: false, + start, + end, + }, + { + selectedTime: 'GLOBAL_TIME', + graphType: PANEL_TYPES.TIME_SERIES, + query: { + builder: { + queryData: [ + { + aggregateAttribute: { + dataType: DataTypes.Float64, + id: 'container_cpu_utilization--float64--Gauge--true', + isColumn: true, + isJSON: false, + key: 'container_cpu_utilization', + type: 'Gauge', + }, + aggregateOperator: 'avg', + dataSource: DataSource.METRICS, + disabled: true, + expression: 'A', + filters: { + items: [ + { + id: '0a862947', + key: { + dataType: DataTypes.String, + id: 'k8s_cluster_name--string--tag--false', + isColumn: false, + key: 'k8s_cluster_name', + type: 'tag', + }, + op: '=', + value: clusterName, + }, + { + id: 'cd13fbf0', + key: { + dataType: DataTypes.String, + id: 'k8s_pod_name--string--tag--false', + isColumn: false, + key: 'k8s_pod_name', + type: 'tag', + }, + op: '=', + value: podName, + }, + ], + op: 'AND', + }, + functions: [], + groupBy: [ + { + dataType: DataTypes.String, + id: 'k8s_pod_name--string--tag--false', + isColumn: false, + isJSON: false, + key: 'k8s_pod_name', + type: 'tag', + }, + ], + having: [], + legend: 'usage - {{k8s_pod_name}}', + limit: null, + orderBy: [], + queryName: 'A', + reduceTo: 'sum', + spaceAggregation: 'sum', + stepInterval: 60, + timeAggregation: 'avg', + }, + { + aggregateAttribute: { + dataType: DataTypes.Float64, + id: 'k8s_container_cpu_limit--float64--Gauge--true', + isColumn: true, + isJSON: false, + key: 'k8s_container_cpu_limit', + type: 'Gauge', + }, + aggregateOperator: 'latest', + dataSource: DataSource.METRICS, + disabled: true, + expression: 'B', + filters: { + items: [ + { + id: 'bfb8acf7', + key: { + dataType: DataTypes.String, + id: 'k8s_cluster_name--string--tag--false', + isColumn: false, + isJSON: false, + key: 'k8s_cluster_name', + type: 'tag', + }, + op: '=', + value: clusterName, + }, + { + id: 'e09ba819', + key: { + dataType: DataTypes.String, + id: 'k8s_pod_name--string--tag--false', + isColumn: false, + isJSON: false, + key: 'k8s_pod_name', + type: 'tag', + }, + op: 'in', + value: podName, + }, + ], + op: 'AND', + }, + functions: [], + groupBy: [ + { + dataType: DataTypes.String, + id: 'k8s_pod_name--string--tag--false', + isColumn: false, + isJSON: false, + key: 'k8s_pod_name', + type: 'tag', + }, + ], + having: [], + legend: 'limit - {{k8s_pod_name}}', + limit: null, + orderBy: [], + queryName: 'B', + reduceTo: 'avg', + spaceAggregation: 'sum', + stepInterval: 60, + timeAggregation: 'latest', + }, + ], + queryFormulas: [ + { + disabled: false, + expression: 'A*100/B', + legend: '{{k8s_pod_name}}', + queryName: 'F1', + }, + ], + }, + clickhouse_sql: [ + { + disabled: false, + legend: '', + name: 'A', + query: '', + }, + ], + id: '6d5ccd81-0ea1-4fb9-a66b-7f0fe2f15165', + promql: [ + { + disabled: false, + legend: '', + name: 'A', + query: '', + }, + ], + queryType: EQueryType.QUERY_BUILDER, + }, + variables: {}, + formatForWeb: false, + start, + end, + }, + { + selectedTime: 'GLOBAL_TIME', + graphType: PANEL_TYPES.TIME_SERIES, + query: { + builder: { + queryData: [ + { + aggregateAttribute: { + dataType: DataTypes.Float64, + id: 'container_memory_usage--float64--Gauge--true', + isColumn: true, + isJSON: false, + key: 'container_memory_usage', + type: 'Gauge', + }, + aggregateOperator: 'avg', + dataSource: DataSource.METRICS, + disabled: true, + expression: 'A', + filters: { + items: [ + { + id: 'ea3df3e7', + key: { + dataType: DataTypes.String, + id: 'k8s_cluster_name--string--tag--false', + isColumn: false, + key: 'k8s_cluster_name', + type: 'tag', + }, + op: '=', + value: clusterName, + }, + { + id: '39b21fe0', + key: { + dataType: DataTypes.String, + id: 'k8s_pod_name--string--tag--false', + isColumn: false, + key: 'k8s_pod_name', + type: 'tag', + }, + op: 'in', + value: podName, + }, + ], + op: 'AND', + }, + functions: [], + groupBy: [ + { + dataType: DataTypes.String, + id: 'k8s_pod_name--string--tag--false', + isColumn: false, + isJSON: false, + key: 'k8s_pod_name', + type: 'tag', + }, + ], + having: [], + legend: '', + limit: null, + orderBy: [], + queryName: 'A', + reduceTo: 'sum', + spaceAggregation: 'sum', + stepInterval: 60, + timeAggregation: 'avg', + }, + { + aggregateAttribute: { + dataType: DataTypes.Float64, + id: 'k8s_container_memory_request--float64--Gauge--true', + isColumn: true, + isJSON: false, + key: 'k8s_container_memory_request', + type: 'Gauge', + }, + aggregateOperator: 'latest', + dataSource: DataSource.METRICS, + disabled: true, + expression: 'B', + filters: { + items: [ + { + id: '7401a4b9', + key: { + dataType: DataTypes.String, + id: 'k8s_cluster_name--string--tag--false', + isColumn: false, + isJSON: false, + key: 'k8s_cluster_name', + type: 'tag', + }, + op: '=', + value: clusterName, + }, + { + id: '7cdad1cb', + key: { + dataType: DataTypes.String, + id: 'k8s_pod_name--string--tag--false', + isColumn: false, + isJSON: false, + key: 'k8s_pod_name', + type: 'tag', + }, + op: '=', + value: podName, + }, + ], + op: 'AND', + }, + functions: [], + groupBy: [ + { + dataType: DataTypes.String, + id: 'k8s_pod_name--string--tag--false', + isColumn: false, + isJSON: false, + key: 'k8s_pod_name', + type: 'tag', + }, + ], + having: [], + legend: '', + limit: null, + orderBy: [], + queryName: 'B', + reduceTo: 'avg', + spaceAggregation: 'sum', + stepInterval: 60, + timeAggregation: 'latest', + }, + ], + queryFormulas: [ + { + disabled: false, + expression: 'A*100/B', + legend: '{{k8s_pod_name}}', + queryName: 'F1', + }, + ], + }, + clickhouse_sql: [ + { + disabled: false, + legend: '', + name: 'A', + query: '', + }, + ], + id: '4d03a0ff-4fa5-4b19-b397-97f80ba9e0ac', + promql: [ + { + disabled: false, + legend: '', + name: 'A', + query: '', + }, + ], + queryType: EQueryType.QUERY_BUILDER, + }, + + variables: {}, + formatForWeb: false, + start, + end, + }, + { + selectedTime: 'GLOBAL_TIME', + graphType: PANEL_TYPES.TIME_SERIES, + query: { + builder: { + queryData: [ + { + aggregateAttribute: { + dataType: DataTypes.Float64, + id: 'container_memory_usage--float64--Gauge--true', + isColumn: true, + isJSON: false, + key: 'container_memory_usage', + type: 'Gauge', + }, + aggregateOperator: 'avg', + dataSource: DataSource.METRICS, + disabled: true, + expression: 'A', + filters: { + items: [ + { + id: 'f2a3175c', + key: { + dataType: DataTypes.String, + id: 'k8s_cluster_name--string--tag--false', + isColumn: false, + key: 'k8s_cluster_name', + type: 'tag', + }, + op: '=', + value: clusterName, + }, + { + id: 'fc17ff21', + key: { + dataType: DataTypes.String, + id: 'k8s_pod_name--string--tag--false', + isColumn: false, + key: 'k8s_pod_name', + type: 'tag', + }, + op: '=', + value: podName, + }, + ], + op: 'AND', + }, + functions: [], + groupBy: [ + { + dataType: DataTypes.String, + id: 'k8s_pod_name--string--tag--false', + isColumn: false, + isJSON: false, + key: 'k8s_pod_name', + type: 'tag', + }, + ], + having: [], + legend: '', + limit: null, + orderBy: [], + queryName: 'A', + reduceTo: 'sum', + spaceAggregation: 'sum', + stepInterval: 60, + timeAggregation: 'avg', + }, + { + aggregateAttribute: { + dataType: DataTypes.Float64, + id: 'k8s_container_memory_limit--float64--Gauge--true', + isColumn: true, + isJSON: false, + key: 'k8s_container_memory_limit', + type: 'Gauge', + }, + aggregateOperator: 'latest', + dataSource: DataSource.METRICS, + disabled: true, + expression: 'B', + filters: { + items: [ + { + id: '175e96b7', + key: { + dataType: DataTypes.String, + id: 'k8s_cluster_name--string--tag--false', + isColumn: false, + isJSON: false, + key: 'k8s_cluster_name', + type: 'tag', + }, + op: '=', + value: clusterName, + }, + { + id: '1d9fbe48', + key: { + dataType: DataTypes.String, + id: 'k8s_pod_name--string--tag--false', + isColumn: false, + isJSON: false, + key: 'k8s_pod_name', + type: 'tag', + }, + op: 'in', + value: podName, + }, + ], + op: 'AND', + }, + functions: [], + groupBy: [ + { + dataType: DataTypes.String, + id: 'k8s_pod_name--string--tag--false', + isColumn: false, + isJSON: false, + key: 'k8s_pod_name', + type: 'tag', + }, + ], + having: [], + legend: '', + limit: null, + orderBy: [], + queryName: 'B', + reduceTo: 'avg', + spaceAggregation: 'sum', + stepInterval: 60, + timeAggregation: 'latest', + }, + ], + queryFormulas: [ + { + disabled: false, + expression: 'A*100/B', + legend: '{{k8s_pod_name}}', + queryName: 'F1', + }, + ], + }, + clickhouse_sql: [ + { + disabled: false, + legend: '', + name: 'A', + query: '', + }, + ], + id: 'ad491f19-0f83-4dd4-bb8f-bec295c18d1b', + promql: [ + { + disabled: false, + legend: '', + name: 'A', + query: '', + }, + ], + queryType: EQueryType.QUERY_BUILDER, + }, + variables: {}, + formatForWeb: false, + start, + end, + }, + { + selectedTime: 'GLOBAL_TIME', + graphType: PANEL_TYPES.TIME_SERIES, + query: { + builder: { + queryData: [ + { + aggregateAttribute: { + dataType: DataTypes.Float64, + id: 'k8s_pod_filesystem_available--float64--Gauge--true', + isColumn: true, + isJSON: false, + key: 'k8s_pod_filesystem_available', + type: 'Gauge', + }, + aggregateOperator: 'avg', + dataSource: DataSource.METRICS, + disabled: true, + expression: 'A', + filters: { + items: [ + { + id: '877385bf', + key: { + dataType: DataTypes.String, + id: 'k8s_cluster_name--string--tag--false', + isColumn: false, + key: 'k8s_cluster_name', + type: 'tag', + }, + op: '=', + value: clusterName, + }, + { + id: '877385cd', + key: { + dataType: DataTypes.String, + id: 'k8s_pod_name--string--tag--false', + isColumn: false, + key: 'k8s_pod_name', + type: 'tag', + }, + op: '=', + value: podName, + }, + ], + op: 'AND', + }, + functions: [], + groupBy: [ + { + dataType: DataTypes.String, + id: 'k8s_pod_name--string--tag--false', + isColumn: false, + isJSON: false, + key: 'k8s_pod_name', + type: 'tag', + }, + ], + having: [], + legend: '', + limit: null, + orderBy: [], + queryName: 'A', + reduceTo: 'sum', + spaceAggregation: 'sum', + stepInterval: 60, + timeAggregation: 'avg', + }, + { + aggregateAttribute: { + dataType: DataTypes.Float64, + id: 'k8s_pod_filesystem_capacity--float64--Gauge--true', + isColumn: true, + isJSON: false, + key: 'k8s_pod_filesystem_capacity', + type: 'Gauge', + }, + aggregateOperator: 'avg', + dataSource: DataSource.METRICS, + disabled: true, + expression: 'B', + filters: { + items: [ + { + id: '877385bf', + key: { + dataType: DataTypes.String, + id: 'k8s_cluster_name--string--tag--false', + isColumn: false, + key: 'k8s_cluster_name', + type: 'tag', + }, + op: '=', + value: clusterName, + }, + { + id: '877385cd', + key: { + dataType: DataTypes.String, + id: 'k8s_pod_name--string--tag--false', + isColumn: false, + key: 'k8s_pod_name', + type: 'tag', + }, + op: '=', + value: podName, + }, + ], + op: 'AND', + }, + functions: [], + groupBy: [ + { + dataType: DataTypes.String, + id: 'k8s_pod_name--string--tag--false', + isColumn: false, + isJSON: false, + key: 'k8s_pod_name', + type: 'tag', + }, + ], + having: [], + legend: '', + limit: null, + orderBy: [], + queryName: 'B', + reduceTo: 'sum', + spaceAggregation: 'sum', + stepInterval: 60, + timeAggregation: 'avg', + }, + ], + queryFormulas: [ + { + disabled: false, + expression: '(B-A)/B', + legend: '{{k8s_pod_name}}', + queryName: 'F1', + }, + ], + }, + clickhouse_sql: [ + { + disabled: false, + legend: '', + name: 'A', + query: '', + }, + ], + id: '16908d4e-1565-4847-8d87-01ebb8fc494a', + promql: [ + { + disabled: false, + legend: '', + name: 'A', + query: '', + }, + ], + queryType: EQueryType.QUERY_BUILDER, + }, + variables: {}, + fillGaps: false, + formatForWeb: false, + start, + end, + }, + { + selectedTime: 'GLOBAL_TIME', + graphType: PANEL_TYPES.TIME_SERIES, + query: { + builder: { + queryData: [ + { + aggregateAttribute: { + dataType: DataTypes.Float64, + id: 'k8s_pod_network_io--float64--Sum--true', + isColumn: true, + isJSON: false, + key: 'k8s_pod_network_io', + type: 'Sum', + }, + aggregateOperator: 'rate', + dataSource: DataSource.METRICS, + disabled: false, + expression: 'A', + filters: { + items: [ + { + id: '877385bf', + key: { + dataType: DataTypes.String, + id: 'k8s_cluster_name--string--tag--false', + isColumn: false, + key: 'k8s_cluster_name', + type: 'tag', + }, + op: '=', + value: clusterName, + }, + { + id: '9613b4da', + key: { + dataType: DataTypes.String, + id: 'k8s_pod_name--string--tag--false', + isColumn: false, + key: 'k8s_pod_name', + type: 'tag', + }, + op: '=', + value: podName, + }, + ], + op: 'AND', + }, + functions: [], + groupBy: [ + { + dataType: DataTypes.String, + id: 'k8s_pod_name--string--tag--false', + isColumn: false, + key: 'k8s_pod_name', + type: 'tag', + }, + ], + having: [], + legend: '{{k8s_pod_name}}', + limit: null, + orderBy: [], + queryName: 'A', + reduceTo: 'sum', + spaceAggregation: 'sum', + stepInterval: 60, + timeAggregation: 'rate', + }, + ], + queryFormulas: [], + }, + clickhouse_sql: [ + { + disabled: false, + legend: '', + name: 'A', + query: '', + }, + ], + id: '4b255d6d-4cde-474d-8866-f4418583c18b', + promql: [ + { + disabled: false, + legend: '', + name: 'A', + query: '', + }, + ], + queryType: EQueryType.QUERY_BUILDER, + }, + variables: {}, + formatForWeb: false, + start, + end, + }, +]; + +export const getNodeQueryPayload = ( + clusterName: string, + nodeName: string, + start: number, + end: number, +): GetQueryResultsProps[] => [ + { + selectedTime: 'GLOBAL_TIME', + graphType: PANEL_TYPES.TIME_SERIES, + query: { + builder: { + queryData: [ + { + aggregateAttribute: { + dataType: DataTypes.Float64, + id: 'k8s_node_cpu_time--float64--Sum--true', + isColumn: true, + isJSON: false, + key: 'k8s_node_cpu_time', + type: 'Sum', + }, + aggregateOperator: 'rate', + dataSource: DataSource.METRICS, + disabled: true, + expression: 'A', + filters: { + items: [ + { + id: '91223422', + key: { + dataType: DataTypes.String, + id: 'k8s_cluster_name--string--tag--false', + isColumn: false, + key: 'k8s_cluster_name', + type: 'tag', + }, + op: '=', + value: clusterName, + }, + { + id: '91223422', + key: { + dataType: DataTypes.String, + id: 'k8s_node_name--string--tag--false', + isColumn: false, + key: 'k8s_node_name', + type: 'tag', + }, + op: 'in', + value: nodeName, + }, + ], + op: 'AND', + }, + functions: [], + groupBy: [ + { + dataType: DataTypes.String, + id: 'k8s_node_name--string--tag--false', + isColumn: false, + isJSON: false, + key: 'k8s_node_name', + type: 'tag', + }, + ], + having: [], + legend: '{{k8s_node_name}}', + limit: null, + orderBy: [], + queryName: 'A', + reduceTo: 'sum', + spaceAggregation: 'sum', + stepInterval: 60, + timeAggregation: 'rate', + }, + { + aggregateAttribute: { + dataType: DataTypes.Float64, + id: 'k8s_node_allocatable_cpu--float64--Gauge--true', + isColumn: true, + isJSON: false, + key: 'k8s_node_allocatable_cpu', + type: 'Gauge', + }, + aggregateOperator: 'avg', + dataSource: DataSource.METRICS, + disabled: true, + expression: 'B', + filters: { + items: [ + { + id: '9700f1d4', + key: { + dataType: DataTypes.String, + id: 'k8s_node_name--string--tag--false', + isColumn: false, + key: 'k8s_node_name', + type: 'tag', + }, + op: 'in', + value: nodeName, + }, + ], + op: 'AND', + }, + functions: [], + groupBy: [ + { + dataType: DataTypes.String, + id: 'k8s_node_name--string--tag--false', + isColumn: false, + isJSON: false, + key: 'k8s_node_name', + type: 'tag', + }, + ], + having: [], + legend: '{{k8s_node_name}}', + limit: null, + orderBy: [], + queryName: 'B', + reduceTo: 'sum', + spaceAggregation: 'sum', + stepInterval: 60, + timeAggregation: 'avg', + }, + ], + queryFormulas: [ + { + disabled: false, + expression: 'A/B', + legend: '{{k8s_node_name}}', + queryName: 'F1', + }, + ], + }, + clickhouse_sql: [ + { + disabled: false, + legend: '', + name: 'A', + query: '', + }, + ], + id: '259295b5-774d-4b2e-8a4f-e5dd63e6c38d', + promql: [ + { + disabled: false, + legend: '', + name: 'A', + query: '', + }, + ], + queryType: EQueryType.QUERY_BUILDER, + }, + variables: {}, + fillGaps: false, + formatForWeb: false, + start, + end, + }, + { + selectedTime: 'GLOBAL_TIME', + graphType: PANEL_TYPES.TIME_SERIES, + query: { + builder: { + queryData: [ + { + aggregateAttribute: { + dataType: DataTypes.Float64, + id: 'k8s_node_memory_working_set--float64--Gauge--true', + isColumn: true, + isJSON: false, + key: 'k8s_node_memory_working_set', + type: 'Gauge', + }, + aggregateOperator: 'avg', + dataSource: DataSource.METRICS, + disabled: true, + expression: 'A', + filters: { + items: [ + { + id: 'a9f58cf3', + key: { + dataType: DataTypes.String, + id: 'k8s_cluster_name--string--tag--false', + isColumn: false, + key: 'k8s_cluster_name', + type: 'tag', + }, + op: '=', + value: clusterName, + }, + { + id: '8430c9a0', + key: { + dataType: DataTypes.String, + id: 'k8s_node_name--string--tag--false', + isColumn: false, + key: 'k8s_node_name', + type: 'tag', + }, + op: 'in', + value: nodeName, + }, + ], + op: 'AND', + }, + functions: [], + groupBy: [ + { + dataType: DataTypes.String, + id: 'k8s_node_name--string--tag--false', + isColumn: false, + key: 'k8s_node_name', + type: 'tag', + }, + ], + having: [], + legend: '', + limit: null, + orderBy: [], + queryName: 'A', + reduceTo: 'sum', + spaceAggregation: 'sum', + stepInterval: 60, + timeAggregation: 'avg', + }, + { + aggregateAttribute: { + dataType: DataTypes.Float64, + id: 'k8s_node_allocatable_memory--float64--Gauge--true', + isColumn: true, + isJSON: false, + key: 'k8s_node_allocatable_memory', + type: 'Gauge', + }, + aggregateOperator: 'avg', + dataSource: DataSource.METRICS, + disabled: true, + expression: 'B', + filters: { + items: [ + { + id: 'cb274856', + key: { + dataType: DataTypes.String, + id: 'k8s_node_name--string--tag--false', + isColumn: false, + key: 'k8s_node_name', + type: 'tag', + }, + op: 'in', + value: nodeName, + }, + ], + op: 'AND', + }, + functions: [], + groupBy: [ + { + dataType: DataTypes.String, + id: 'k8s_node_name--string--tag--false', + isColumn: false, + key: 'k8s_node_name', + type: 'tag', + }, + ], + having: [], + legend: '', + limit: null, + orderBy: [], + queryName: 'B', + reduceTo: 'sum', + spaceAggregation: 'sum', + stepInterval: 60, + timeAggregation: 'avg', + }, + ], + queryFormulas: [ + { + disabled: false, + expression: 'A/B', + legend: '{{k8s_node_name}}', + queryName: 'F1', + }, + ], + }, + clickhouse_sql: [ + { + disabled: false, + legend: '', + name: 'A', + query: '', + }, + ], + id: '486af4da-2a1a-4b8f-992c-eba098d3a6f9', + promql: [ + { + disabled: false, + legend: '', + name: 'A', + query: '', + }, + ], + queryType: EQueryType.QUERY_BUILDER, + }, + variables: {}, + fillGaps: false, + formatForWeb: false, + start, + end, + }, + { + selectedTime: 'GLOBAL_TIME', + graphType: PANEL_TYPES.TIME_SERIES, + query: { + builder: { + queryData: [ + { + aggregateAttribute: { + dataType: DataTypes.Float64, + id: 'k8s_node_network_io--float64--Sum--true', + isColumn: true, + isJSON: false, + key: 'k8s_node_network_io', + type: 'Sum', + }, + aggregateOperator: 'rate', + dataSource: DataSource.METRICS, + disabled: false, + expression: 'A', + filters: { + items: [ + { + id: '91223422', + key: { + dataType: DataTypes.String, + id: 'k8s_cluster_name--string--tag--false', + isColumn: false, + key: 'k8s_cluster_name', + type: 'tag', + }, + op: '=', + value: clusterName, + }, + { + id: '66308505', + key: { + dataType: DataTypes.String, + id: 'k8s_node_name--string--tag--false', + isColumn: false, + key: 'k8s_node_name', + type: 'tag', + }, + op: 'in', + value: nodeName, + }, + ], + op: 'AND', + }, + functions: [], + groupBy: [ + { + dataType: DataTypes.String, + id: 'interface--string--tag--false', + isColumn: false, + key: 'interface', + type: 'tag', + }, + { + dataType: DataTypes.String, + id: 'direction--string--tag--false', + isColumn: false, + key: 'direction', + type: 'tag', + }, + { + dataType: DataTypes.String, + id: 'k8s_node_name--string--tag--false', + isColumn: false, + key: 'k8s_node_name', + type: 'tag', + }, + ], + having: [], + legend: '{{k8s_node_name}}-{{interface}}-{{direction}}', + limit: null, + orderBy: [], + queryName: 'A', + reduceTo: 'sum', + spaceAggregation: 'sum', + stepInterval: 60, + timeAggregation: 'rate', + }, + ], + queryFormulas: [], + }, + clickhouse_sql: [ + { + disabled: false, + legend: '', + name: 'A', + query: '', + }, + ], + id: 'b56143c0-7d2f-4425-97c5-65ad6fc87366', + promql: [ + { + disabled: false, + legend: '', + name: 'A', + query: '', + }, + ], + queryType: EQueryType.QUERY_BUILDER, + }, + variables: {}, + formatForWeb: false, + start, + end, + }, + { + selectedTime: 'GLOBAL_TIME', + graphType: PANEL_TYPES.TIME_SERIES, + query: { + builder: { + queryData: [ + { + aggregateAttribute: { + dataType: DataTypes.Float64, + id: 'k8s_node_filesystem_available--float64--Gauge--true', + isColumn: true, + isJSON: false, + key: 'k8s_node_filesystem_available', + type: 'Gauge', + }, + aggregateOperator: 'avg', + dataSource: DataSource.METRICS, + disabled: true, + expression: 'A', + filters: { + items: [ + { + id: '91223422', + key: { + dataType: DataTypes.String, + id: 'k8s_cluster_name--string--tag--false', + isColumn: false, + key: 'k8s_cluster_name', + type: 'tag', + }, + op: '=', + value: clusterName, + }, + { + id: 'a5dffef6', + key: { + dataType: DataTypes.String, + id: 'k8s_node_name--string--tag--false', + isColumn: false, + key: 'k8s_node_name', + type: 'tag', + }, + op: 'in', + value: nodeName, + }, + ], + op: 'AND', + }, + functions: [], + groupBy: [ + { + dataType: DataTypes.String, + id: 'k8s_node_name--string--tag--false', + isColumn: false, + key: 'k8s_node_name', + type: 'tag', + }, + ], + having: [], + legend: '', + limit: null, + orderBy: [], + queryName: 'A', + reduceTo: 'sum', + spaceAggregation: 'sum', + stepInterval: 60, + timeAggregation: 'avg', + }, + { + aggregateAttribute: { + dataType: DataTypes.Float64, + id: 'k8s_node_filesystem_capacity--float64--Gauge--true', + isColumn: true, + isJSON: false, + key: 'k8s_node_filesystem_capacity', + type: 'Gauge', + }, + aggregateOperator: 'avg', + dataSource: DataSource.METRICS, + disabled: true, + expression: 'B', + filters: { + items: [ + { + id: '91223422', + key: { + dataType: DataTypes.String, + id: 'k8s_cluster_name--string--tag--false', + isColumn: false, + key: 'k8s_cluster_name', + type: 'tag', + }, + op: '=', + value: clusterName, + }, + { + id: 'c79d5a16', + key: { + dataType: DataTypes.String, + id: 'k8s_node_name--string--tag--false', + isColumn: false, + key: 'k8s_node_name', + type: 'tag', + }, + op: 'in', + value: nodeName, + }, + ], + op: 'AND', + }, + functions: [], + groupBy: [ + { + dataType: DataTypes.String, + id: 'k8s_node_name--string--tag--false', + isColumn: false, + key: 'k8s_node_name', + type: 'tag', + }, + ], + having: [], + legend: '', + limit: null, + orderBy: [], + queryName: 'B', + reduceTo: 'sum', + spaceAggregation: 'sum', + stepInterval: 60, + timeAggregation: 'avg', + }, + ], + queryFormulas: [ + { + disabled: false, + expression: '(B-A)/B', + legend: '{{k8s_node_name}}', + queryName: 'F1', + }, + ], + }, + clickhouse_sql: [ + { + disabled: false, + legend: '', + name: 'A', + query: '', + }, + ], + id: '57eeac15-615c-4a71-9c61-8e0c0c76b045', + promql: [ + { + disabled: false, + legend: '', + name: 'A', + query: '', + }, + ], + queryType: EQueryType.QUERY_BUILDER, + }, + variables: {}, + formatForWeb: false, + start, + end, + }, +]; + +export const getHostQueryPayload = ( + hostName: string, + start: number, + end: number, +): GetQueryResultsProps[] => [ + { + selectedTime: 'GLOBAL_TIME', + graphType: PANEL_TYPES.TIME_SERIES, + query: { + builder: { + queryData: [ + { + aggregateAttribute: { + dataType: DataTypes.Float64, + id: 'system_cpu_time--float64--Sum--true', + isColumn: true, + isJSON: false, + key: 'system_cpu_time', + type: 'Sum', + }, + aggregateOperator: 'rate', + dataSource: DataSource.METRICS, + disabled: true, + expression: 'A', + filters: { + items: [ + { + id: 'ad316791', + key: { + dataType: DataTypes.String, + id: 'host_name--string--tag--false', + isColumn: false, + isJSON: false, + key: 'host_name', + type: 'tag', + }, + op: '=', + value: hostName, + }, + ], + op: 'AND', + }, + functions: [], + groupBy: [ + { + dataType: DataTypes.String, + id: 'state--string--tag--false', + isColumn: false, + isJSON: false, + key: 'state', + type: 'tag', + }, + ], + having: [], + legend: '{{state}}', + limit: null, + orderBy: [], + queryName: 'A', + reduceTo: 'avg', + spaceAggregation: 'sum', + stepInterval: 60, + timeAggregation: 'rate', + }, + { + aggregateAttribute: { + dataType: DataTypes.Float64, + id: 'system_cpu_time--float64--Sum--true', + isColumn: true, + isJSON: false, + key: 'system_cpu_time', + type: 'Sum', + }, + aggregateOperator: 'rate', + dataSource: DataSource.METRICS, + disabled: true, + expression: 'B', + filters: { + items: [ + { + id: '6baf116b', + key: { + dataType: DataTypes.String, + id: 'host_name--string--tag--false', + isColumn: false, + isJSON: false, + key: 'host_name', + type: 'tag', + }, + op: '=', + value: hostName, + }, + ], + op: 'AND', + }, + functions: [], + groupBy: [], + having: [], + legend: '{{state}}', + limit: null, + orderBy: [], + queryName: 'B', + reduceTo: 'avg', + spaceAggregation: 'sum', + stepInterval: 60, + timeAggregation: 'rate', + }, + ], + queryFormulas: [ + { + disabled: false, + expression: 'A/B', + legend: '{{state}}', + queryName: 'F1', + }, + ], + }, + clickhouse_sql: [ + { + disabled: false, + legend: '', + name: 'A', + query: '', + }, + ], + id: '315b15fa-ff0c-442f-89f8-2bf4fb1af2f2', + promql: [ + { + disabled: false, + legend: '', + name: 'A', + query: '', + }, + ], + queryType: EQueryType.QUERY_BUILDER, + }, + variables: {}, + formatForWeb: false, + start, + end, + }, + { + selectedTime: 'GLOBAL_TIME', + graphType: PANEL_TYPES.TIME_SERIES, + query: { + builder: { + queryData: [ + { + aggregateAttribute: { + dataType: DataTypes.Float64, + id: 'system_memory_usage--float64--Gauge--true', + isColumn: true, + isJSON: false, + key: 'system_memory_usage', + type: 'Gauge', + }, + aggregateOperator: 'avg', + dataSource: DataSource.METRICS, + disabled: false, + expression: 'A', + filters: { + items: [ + { + id: '8026009e', + key: { + dataType: DataTypes.String, + id: 'host_name--string--tag--false', + isColumn: false, + isJSON: false, + key: 'host_name', + type: 'tag', + }, + op: '=', + value: hostName, + }, + ], + op: 'AND', + }, + functions: [], + groupBy: [ + { + dataType: DataTypes.String, + id: 'state--string--tag--false', + isColumn: false, + isJSON: false, + key: 'state', + type: 'tag', + }, + ], + having: [], + legend: '{{state}}', + limit: null, + orderBy: [], + queryName: 'A', + reduceTo: 'avg', + spaceAggregation: 'sum', + stepInterval: 60, + timeAggregation: 'avg', + }, + ], + queryFormulas: [], + }, + clickhouse_sql: [ + { + disabled: false, + legend: '', + name: 'A', + query: '', + }, + ], + id: '40218bfb-a9b7-4974-aead-5bf666e139bf', + promql: [ + { + disabled: false, + legend: '', + name: 'A', + query: '', + }, + ], + queryType: EQueryType.QUERY_BUILDER, + }, + variables: {}, + formatForWeb: false, + start, + end, + }, + { + selectedTime: 'GLOBAL_TIME', + graphType: PANEL_TYPES.TIME_SERIES, + query: { + builder: { + queryData: [ + { + aggregateAttribute: { + dataType: DataTypes.Float64, + id: 'system_cpu_load_average_1m--float64--Gauge--true', + isColumn: true, + isJSON: false, + key: 'system_cpu_load_average_1m', + type: 'Gauge', + }, + aggregateOperator: 'avg', + dataSource: DataSource.METRICS, + disabled: false, + expression: 'A', + filters: { + items: [ + { + id: '4167fbb1', + key: { + dataType: DataTypes.String, + id: 'host_name--string--tag--false', + isColumn: false, + isJSON: false, + key: 'host_name', + type: 'tag', + }, + op: '=', + value: hostName, + }, + ], + op: 'AND', + }, + functions: [], + groupBy: [], + having: [], + legend: '1m', + limit: 30, + orderBy: [], + queryName: 'A', + reduceTo: 'avg', + spaceAggregation: 'sum', + stepInterval: 60, + timeAggregation: 'avg', + }, + { + aggregateAttribute: { + dataType: DataTypes.Float64, + id: 'system_cpu_load_average_5m--float64--Gauge--true', + isColumn: true, + isJSON: false, + key: 'system_cpu_load_average_5m', + type: 'Gauge', + }, + aggregateOperator: 'avg', + dataSource: DataSource.METRICS, + disabled: false, + expression: 'B', + filters: { + items: [ + { + id: '0c2cfeca', + key: { + dataType: DataTypes.String, + id: 'host_name--string--tag--false', + isColumn: false, + isJSON: false, + key: 'host_name', + type: 'tag', + }, + op: '=', + value: hostName, + }, + ], + op: 'AND', + }, + functions: [], + groupBy: [], + having: [], + legend: '5m', + limit: 30, + orderBy: [], + queryName: 'B', + reduceTo: 'avg', + spaceAggregation: 'sum', + stepInterval: 60, + timeAggregation: 'avg', + }, + { + aggregateAttribute: { + dataType: DataTypes.Float64, + id: 'system_cpu_load_average_15m--float64--Gauge--true', + isColumn: true, + isJSON: false, + key: 'system_cpu_load_average_15m', + type: 'Gauge', + }, + aggregateOperator: 'avg', + dataSource: DataSource.METRICS, + disabled: false, + expression: 'C', + filters: { + items: [ + { + id: '28693375', + key: { + dataType: DataTypes.String, + id: 'host_name--string--tag--false', + isColumn: false, + isJSON: false, + key: 'host_name', + type: 'tag', + }, + op: '=', + value: hostName, + }, + ], + op: 'AND', + }, + functions: [], + groupBy: [], + having: [], + legend: '15m', + limit: 30, + orderBy: [], + queryName: 'C', + reduceTo: 'avg', + spaceAggregation: 'sum', + stepInterval: 60, + timeAggregation: 'avg', + }, + ], + queryFormulas: [], + }, + clickhouse_sql: [ + { + disabled: false, + legend: '', + name: 'A', + query: '', + }, + ], + id: '8e6485ea-7018-43b0-ab27-b210f77b59ad', + promql: [ + { + disabled: false, + legend: '', + name: 'A', + query: '', + }, + ], + queryType: EQueryType.QUERY_BUILDER, + }, + variables: {}, + formatForWeb: false, + start, + end, + }, + { + selectedTime: 'GLOBAL_TIME', + graphType: PANEL_TYPES.TIME_SERIES, + query: { + builder: { + queryData: [ + { + aggregateAttribute: { + dataType: DataTypes.Float64, + id: 'system_network_io--float64--Sum--true', + isColumn: true, + isJSON: false, + key: 'system_network_io', + type: 'Sum', + }, + aggregateOperator: 'rate', + dataSource: DataSource.METRICS, + disabled: false, + expression: 'A', + filters: { + items: [ + { + id: '3a03bc80', + key: { + dataType: DataTypes.String, + id: 'host_name--string--tag--false', + isColumn: false, + isJSON: false, + key: 'host_name', + type: 'tag', + }, + op: '=', + value: hostName, + }, + ], + op: 'AND', + }, + functions: [], + groupBy: [ + { + dataType: DataTypes.String, + id: 'direction--string--tag--false', + isColumn: false, + isJSON: false, + key: 'direction', + type: 'tag', + }, + { + dataType: DataTypes.String, + id: 'device--string--tag--false', + isColumn: false, + isJSON: false, + key: 'device', + type: 'tag', + }, + ], + having: [ + { + columnName: 'SUM(system_network_io)', + op: '>', + value: 0, + }, + ], + legend: '{{device}}::{{direction}}', + limit: 30, + orderBy: [], + queryName: 'A', + reduceTo: 'avg', + spaceAggregation: 'sum', + stepInterval: 60, + timeAggregation: 'rate', + }, + ], + queryFormulas: [], + }, + clickhouse_sql: [ + { + disabled: false, + legend: '', + name: 'A', + query: '', + }, + ], + id: '47173220-44df-4ef6-87f4-31e333c180c7', + promql: [ + { + disabled: false, + legend: '', + name: 'A', + query: '', + }, + ], + queryType: EQueryType.QUERY_BUILDER, + }, + variables: {}, + formatForWeb: false, + start, + end, + }, + { + selectedTime: 'GLOBAL_TIME', + graphType: PANEL_TYPES.TIME_SERIES, + query: { + builder: { + queryData: [ + { + aggregateAttribute: { + dataType: DataTypes.Float64, + id: 'system_network_packets--float64--Sum--true', + isColumn: true, + isJSON: false, + key: 'system_network_packets', + type: 'Sum', + }, + aggregateOperator: 'rate', + dataSource: DataSource.METRICS, + disabled: false, + expression: 'A', + filters: { + items: [ + { + id: '3082ef53', + key: { + dataType: DataTypes.String, + id: 'host_name--string--tag--false', + isColumn: false, + isJSON: false, + key: 'host_name', + type: 'tag', + }, + op: '=', + value: hostName, + }, + ], + op: 'AND', + }, + functions: [], + groupBy: [ + { + dataType: DataTypes.String, + id: 'direction--string--tag--false', + isColumn: false, + isJSON: false, + key: 'direction', + type: 'tag', + }, + { + dataType: DataTypes.String, + id: 'device--string--tag--false', + isColumn: false, + isJSON: false, + key: 'device', + type: 'tag', + }, + ], + having: [], + legend: '{{device}}::{{direction}}', + limit: 30, + orderBy: [], + queryName: 'A', + reduceTo: 'avg', + spaceAggregation: 'sum', + stepInterval: 60, + timeAggregation: 'rate', + }, + ], + queryFormulas: [], + }, + clickhouse_sql: [ + { + disabled: false, + legend: '', + name: 'A', + query: '', + }, + ], + id: '62eedbc6-c8ad-4d13-80a8-129396e1d1dc', + promql: [ + { + disabled: false, + legend: '', + name: 'A', + query: '', + }, + ], + queryType: EQueryType.QUERY_BUILDER, + }, + variables: {}, + formatForWeb: false, + start, + end, + }, + { + selectedTime: 'GLOBAL_TIME', + graphType: PANEL_TYPES.TIME_SERIES, + query: { + builder: { + queryData: [ + { + aggregateAttribute: { + dataType: DataTypes.Float64, + id: 'system_network_errors--float64--Sum--true', + isColumn: true, + isJSON: false, + key: 'system_network_errors', + type: 'Sum', + }, + aggregateOperator: 'rate', + dataSource: DataSource.METRICS, + disabled: false, + expression: 'A', + filters: { + items: [ + { + id: '8859bc50', + key: { + dataType: DataTypes.String, + id: 'host_name--string--tag--false', + isColumn: false, + isJSON: false, + key: 'host_name', + type: 'tag', + }, + op: '=', + value: hostName, + }, + ], + op: 'AND', + }, + functions: [], + groupBy: [ + { + dataType: DataTypes.String, + id: 'direction--string--tag--false', + isColumn: false, + isJSON: false, + key: 'direction', + type: 'tag', + }, + { + dataType: DataTypes.String, + id: 'device--string--tag--false', + isColumn: false, + isJSON: false, + key: 'device', + type: 'tag', + }, + ], + having: [], + legend: '{{device}}::{{direction}}', + limit: 30, + orderBy: [], + queryName: 'A', + reduceTo: 'avg', + spaceAggregation: 'sum', + stepInterval: 60, + timeAggregation: 'rate', + }, + ], + queryFormulas: [], + }, + clickhouse_sql: [ + { + disabled: false, + legend: '', + name: 'A', + query: '', + }, + ], + id: '5ddb1b38-53bb-46f5-b4fe-fe832d6b9b24', + promql: [ + { + disabled: false, + legend: '', + name: 'A', + query: '', + }, + ], + queryType: EQueryType.QUERY_BUILDER, + }, + variables: {}, + formatForWeb: false, + start, + end, + }, + { + selectedTime: 'GLOBAL_TIME', + graphType: PANEL_TYPES.TIME_SERIES, + query: { + builder: { + queryData: [ + { + aggregateAttribute: { + dataType: DataTypes.Float64, + id: 'system_network_dropped--float64--Sum--true', + isColumn: true, + isJSON: false, + key: 'system_network_dropped', + type: 'Sum', + }, + aggregateOperator: 'rate', + dataSource: DataSource.METRICS, + disabled: false, + expression: 'A', + filters: { + items: [ + { + id: '40fec2e3', + key: { + dataType: DataTypes.String, + id: 'host_name--string--tag--false', + isColumn: false, + isJSON: false, + key: 'host_name', + type: 'tag', + }, + op: '=', + value: hostName, + }, + ], + op: 'AND', + }, + functions: [], + groupBy: [ + { + dataType: DataTypes.String, + id: 'direction--string--tag--false', + isColumn: false, + isJSON: false, + key: 'direction', + type: 'tag', + }, + { + dataType: DataTypes.String, + id: 'device--string--tag--false', + isColumn: false, + isJSON: false, + key: 'device', + type: 'tag', + }, + ], + having: [], + legend: '{{device}}::{{direction}}', + limit: 30, + orderBy: [], + queryName: 'A', + reduceTo: 'avg', + spaceAggregation: 'sum', + stepInterval: 60, + timeAggregation: 'rate', + }, + ], + queryFormulas: [], + }, + clickhouse_sql: [ + { + disabled: false, + legend: '', + name: 'A', + query: '', + }, + ], + id: 'a849bcce-7684-4852-9134-530b45419b8f', + promql: [ + { + disabled: false, + legend: '', + name: 'A', + query: '', + }, + ], + queryType: EQueryType.QUERY_BUILDER, + }, + variables: {}, + formatForWeb: false, + start, + end, + }, + { + selectedTime: 'GLOBAL_TIME', + graphType: PANEL_TYPES.TIME_SERIES, + query: { + builder: { + queryData: [ + { + aggregateAttribute: { + dataType: DataTypes.Float64, + id: 'system_network_connections--float64--Gauge--true', + isColumn: true, + isJSON: false, + key: 'system_network_connections', + type: 'Gauge', + }, + aggregateOperator: 'avg', + dataSource: DataSource.METRICS, + disabled: false, + expression: 'A', + filters: { + items: [ + { + id: '87f665b5', + key: { + dataType: DataTypes.String, + id: 'host_name--string--tag--false', + isColumn: false, + isJSON: false, + key: 'host_name', + type: 'tag', + }, + op: '=', + value: hostName, + }, + ], + op: 'AND', + }, + functions: [], + groupBy: [ + { + dataType: DataTypes.String, + id: 'protocol--string--tag--false', + isColumn: false, + isJSON: false, + key: 'protocol', + type: 'tag', + }, + { + dataType: DataTypes.String, + id: 'state--string--tag--false', + isColumn: false, + isJSON: false, + key: 'state', + type: 'tag', + }, + ], + having: [], + legend: '{{protocol}}::{{state}}', + limit: 30, + orderBy: [], + queryName: 'A', + reduceTo: 'avg', + spaceAggregation: 'sum', + stepInterval: 60, + timeAggregation: 'avg', + }, + ], + queryFormulas: [], + }, + clickhouse_sql: [ + { + disabled: false, + legend: '', + name: 'A', + query: '', + }, + ], + id: 'ab685a3d-fa4c-4663-8d94-c452e59038f3', + promql: [ + { + disabled: false, + legend: '', + name: 'A', + query: '', + }, + ], + queryType: EQueryType.QUERY_BUILDER, + }, + variables: {}, + formatForWeb: false, + start, + end, + }, + { + selectedTime: 'GLOBAL_TIME', + graphType: PANEL_TYPES.TIME_SERIES, + query: { + builder: { + queryData: [ + { + aggregateAttribute: { + dataType: DataTypes.Float64, + id: 'system_disk_io--float64--Sum--true', + isColumn: true, + isJSON: false, + key: 'system_disk_io', + type: 'Sum', + }, + aggregateOperator: 'rate', + dataSource: DataSource.METRICS, + disabled: false, + expression: 'A', + filters: { + items: [ + { + id: '6039199f', + key: { + dataType: DataTypes.String, + id: 'host_name--string--tag--false', + isColumn: false, + isJSON: false, + key: 'host_name', + type: 'tag', + }, + op: '=', + value: hostName, + }, + ], + op: 'AND', + }, + functions: [], + groupBy: [], + having: [], + legend: '', + limit: null, + orderBy: [], + queryName: 'A', + reduceTo: 'avg', + spaceAggregation: 'sum', + stepInterval: 60, + timeAggregation: 'rate', + }, + ], + queryFormulas: [], + }, + clickhouse_sql: [ + { + disabled: false, + legend: '', + name: 'A', + query: '', + }, + ], + id: '9bd40b51-0790-4cdd-9718-551b2ded5926', + promql: [ + { + disabled: false, + legend: '', + name: 'A', + query: '', + }, + ], + queryType: EQueryType.QUERY_BUILDER, + }, + variables: {}, + formatForWeb: false, + start, + end, + }, + { + selectedTime: 'GLOBAL_TIME', + graphType: PANEL_TYPES.TIME_SERIES, + query: { + builder: { + queryData: [ + { + aggregateAttribute: { + dataType: DataTypes.Float64, + id: 'system_disk_operation_time--float64--Sum--true', + isColumn: true, + isJSON: false, + key: 'system_disk_operation_time', + type: 'Sum', + }, + aggregateOperator: 'rate', + dataSource: DataSource.METRICS, + disabled: false, + expression: 'A', + filters: { + items: [ + { + id: 'd21dc017', + key: { + dataType: DataTypes.String, + id: 'host_name--string--tag--false', + isColumn: false, + isJSON: false, + key: 'host_name', + type: 'tag', + }, + op: '=', + value: hostName, + }, + ], + op: 'AND', + }, + functions: [], + groupBy: [ + { + dataType: DataTypes.String, + id: 'device--string--tag--false', + isColumn: false, + isJSON: false, + key: 'device', + type: 'tag', + }, + { + dataType: DataTypes.String, + id: 'direction--string--tag--false', + isColumn: false, + isJSON: false, + key: 'direction', + type: 'tag', + }, + ], + having: [ + { + columnName: 'SUM(system_disk_operation_time)', + op: '>', + value: 0, + }, + ], + legend: '{{device}}::{{direction}}', + limit: null, + orderBy: [], + queryName: 'A', + reduceTo: 'avg', + spaceAggregation: 'sum', + stepInterval: 60, + timeAggregation: 'rate', + }, + ], + queryFormulas: [], + }, + clickhouse_sql: [ + { + disabled: false, + legend: '', + name: 'A', + query: '', + }, + ], + id: '9c6d18ad-89ff-4e38-a15a-440e72ed6ca8', + promql: [ + { + disabled: false, + legend: '', + name: 'A', + query: '', + }, + ], + queryType: EQueryType.QUERY_BUILDER, + }, + variables: {}, + formatForWeb: false, + start, + end, + }, + { + selectedTime: 'GLOBAL_TIME', + graphType: PANEL_TYPES.TIME_SERIES, + query: { + builder: { + queryData: [ + { + aggregateAttribute: { + dataType: DataTypes.Float64, + id: 'system_disk_pending_operations--float64--Gauge--true', + isColumn: true, + isJSON: false, + key: 'system_disk_pending_operations', + type: 'Gauge', + }, + aggregateOperator: 'max', + dataSource: DataSource.METRICS, + disabled: false, + expression: 'A', + filters: { + items: [ + { + id: 'a1023af9', + key: { + dataType: DataTypes.String, + id: 'host_name--string--tag--false', + isColumn: false, + isJSON: false, + key: 'host_name', + type: 'tag', + }, + op: '=', + value: hostName, + }, + ], + op: 'AND', + }, + functions: [], + groupBy: [ + { + dataType: DataTypes.String, + id: 'device--string--tag--false', + isColumn: false, + isJSON: false, + key: 'device', + type: 'tag', + }, + ], + having: [ + { + columnName: 'SUM(system_disk_pending_operations)', + op: '>', + value: 0, + }, + ], + legend: '{{device}}', + limit: null, + orderBy: [], + queryName: 'A', + reduceTo: 'avg', + spaceAggregation: 'sum', + stepInterval: 60, + timeAggregation: 'max', + }, + ], + queryFormulas: [], + }, + clickhouse_sql: [ + { + disabled: false, + legend: '', + name: 'A', + query: '', + }, + ], + id: 'f4cfc2a5-78fc-42cc-8f4a-194c8c916132', + promql: [ + { + disabled: false, + legend: '', + name: 'A', + query: '', + }, + ], + queryType: EQueryType.QUERY_BUILDER, + }, + variables: {}, + formatForWeb: false, + start, + end, + }, + { + selectedTime: 'GLOBAL_TIME', + graphType: PANEL_TYPES.TIME_SERIES, + query: { + builder: { + queryData: [ + { + aggregateAttribute: { + dataType: DataTypes.Float64, + id: 'system_disk_operation_time--float64--Sum--true', + isColumn: true, + isJSON: false, + key: 'system_disk_operation_time', + type: 'Sum', + }, + aggregateOperator: 'rate', + dataSource: DataSource.METRICS, + disabled: false, + expression: 'A', + filters: { + items: [ + { + id: 'd21dc017', + key: { + dataType: DataTypes.String, + id: 'host_name--string--tag--false', + isColumn: false, + isJSON: false, + key: 'host_name', + type: 'tag', + }, + op: '=', + value: hostName, + }, + ], + op: 'AND', + }, + functions: [], + groupBy: [ + { + dataType: DataTypes.String, + id: 'device--string--tag--false', + isColumn: false, + isJSON: false, + key: 'device', + type: 'tag', + }, + { + dataType: DataTypes.String, + id: 'direction--string--tag--false', + isColumn: false, + isJSON: false, + key: 'direction', + type: 'tag', + }, + ], + having: [ + { + columnName: 'SUM(system_disk_operation_time)', + op: '>', + value: 0, + }, + ], + legend: '{{device}}::{{direction}}', + limit: null, + orderBy: [], + queryName: 'A', + reduceTo: 'avg', + spaceAggregation: 'sum', + stepInterval: 60, + timeAggregation: 'rate', + }, + ], + queryFormulas: [], + }, + clickhouse_sql: [ + { + disabled: false, + legend: '', + name: 'A', + query: '', + }, + ], + id: '9c6d18ad-89ff-4e38-a15a-440e72ed6ca8', + promql: [ + { + disabled: false, + legend: '', + name: 'A', + query: '', + }, + ], + queryType: EQueryType.QUERY_BUILDER, + }, + variables: {}, + formatForWeb: false, + start, + end, + }, +]; + +export const podWidgetInfo = [ + { + title: 'CPU usage', + yAxisUnit: '', + }, + { + title: 'Memory Usage', + yAxisUnit: 'bytes', + }, + { + title: 'Pod CPU usage [% of Request]', + yAxisUnit: 'percent', + }, + { + title: 'Pod CPU usage [% of Limit]', + yAxisUnit: 'percent', + }, + { + title: 'Pod memory usage [% of Request]', + yAxisUnit: 'percent', + }, + { + title: 'Pod memory usage [% of Limit]', + yAxisUnit: 'percent', + }, + { + title: 'Pod filesystem usage [%]', + yAxisUnit: 'percentunit', + }, + { + title: 'Pod network IO', + yAxisUnit: 'binBps', + }, +]; + +export const VIEW_TYPES = { + NODE: 'node', + POD: 'pod', +}; + +export const nodeWidgetInfo = [ + { + title: 'Node CPU usage', + yAxisUnit: 'percentunit', + }, + { + title: 'Node memory usage (WSS)', + yAxisUnit: 'percentunit', + }, + { + title: 'Node network IO', + yAxisUnit: 'binBps', + }, + { + title: 'Node filesystem usage', + yAxisUnit: 'percentunit', + }, +]; + +export const hostWidgetInfo = [ + { title: 'CPU Usage', yAxisUnit: 'percentunit' }, + { title: 'Memory Usage', yAxisUnit: 'bytes' }, + { title: 'System Load Average', yAxisUnit: '' }, + { title: 'Network usage (bytes)', yAxisUnit: 'bytes' }, + { title: 'Network usage (packet/s)', yAxisUnit: 'pps' }, + { title: 'Network errors', yAxisUnit: 'short' }, + { title: 'Network drops', yAxisUnit: 'short' }, + { title: 'Network connections', yAxisUnit: 'short' }, + { title: 'System disk io (bytes transferred)', yAxisUnit: 'bytes' }, + { title: 'System disk operations/s', yAxisUnit: 'short' }, + { title: 'Queue size', yAxisUnit: 'short' }, + { title: 'Disk operations time', yAxisUnit: 's' }, +]; diff --git a/frontend/src/container/LogsExplorerViews/tests/LogsExplorerViews.test.tsx b/frontend/src/container/LogsExplorerViews/tests/LogsExplorerViews.test.tsx index 8262d6f9bc..6271ff793e 100644 --- a/frontend/src/container/LogsExplorerViews/tests/LogsExplorerViews.test.tsx +++ b/frontend/src/container/LogsExplorerViews/tests/LogsExplorerViews.test.tsx @@ -28,6 +28,20 @@ const lodsQueryServerRequest = (): void => ), ); +jest.mock('uplot', () => { + const paths = { + spline: jest.fn(), + bars: jest.fn(), + }; + const uplotMock = jest.fn(() => ({ + paths, + })); + return { + paths, + default: uplotMock, + }; +}); + // mocking the graph components in this test as this should be handled separately jest.mock( 'container/TimeSeriesView/TimeSeriesView', diff --git a/frontend/src/container/PipelinePage/Layouts/ChangeHistory/tests/ChangeHistory.test.tsx b/frontend/src/container/PipelinePage/Layouts/ChangeHistory/tests/ChangeHistory.test.tsx index 194acbea0a..88fdb5d594 100644 --- a/frontend/src/container/PipelinePage/Layouts/ChangeHistory/tests/ChangeHistory.test.tsx +++ b/frontend/src/container/PipelinePage/Layouts/ChangeHistory/tests/ChangeHistory.test.tsx @@ -9,6 +9,20 @@ import store from 'store'; import ChangeHistory from '../index'; import { pipelineData, pipelineDataHistory } from './testUtils'; +jest.mock('uplot', () => { + const paths = { + spline: jest.fn(), + bars: jest.fn(), + }; + const uplotMock = jest.fn(() => ({ + paths, + })); + return { + paths, + default: uplotMock, + }; +}); + const queryClient = new QueryClient({ defaultOptions: { queries: { diff --git a/frontend/src/container/PipelinePage/tests/AddNewPipeline.test.tsx b/frontend/src/container/PipelinePage/tests/AddNewPipeline.test.tsx index 8990ffa4e7..360a7c5925 100644 --- a/frontend/src/container/PipelinePage/tests/AddNewPipeline.test.tsx +++ b/frontend/src/container/PipelinePage/tests/AddNewPipeline.test.tsx @@ -9,6 +9,20 @@ import store from 'store'; import { pipelineMockData } from '../mocks/pipeline'; import AddNewPipeline from '../PipelineListsView/AddNewPipeline'; +jest.mock('uplot', () => { + const paths = { + spline: jest.fn(), + bars: jest.fn(), + }; + const uplotMock = jest.fn(() => ({ + paths, + })); + return { + paths, + default: uplotMock, + }; +}); + export function matchMedia(): void { Object.defineProperty(window, 'matchMedia', { writable: true, diff --git a/frontend/src/container/PipelinePage/tests/AddNewProcessor.test.tsx b/frontend/src/container/PipelinePage/tests/AddNewProcessor.test.tsx index a4d2e680a4..d3f236437f 100644 --- a/frontend/src/container/PipelinePage/tests/AddNewProcessor.test.tsx +++ b/frontend/src/container/PipelinePage/tests/AddNewProcessor.test.tsx @@ -9,6 +9,20 @@ import { pipelineMockData } from '../mocks/pipeline'; import AddNewProcessor from '../PipelineListsView/AddNewProcessor'; import { matchMedia } from './AddNewPipeline.test'; +jest.mock('uplot', () => { + const paths = { + spline: jest.fn(), + bars: jest.fn(), + }; + const uplotMock = jest.fn(() => ({ + paths, + })); + return { + paths, + default: uplotMock, + }; +}); + beforeAll(() => { matchMedia(); }); diff --git a/frontend/src/container/PipelinePage/tests/DeleteAction.test.tsx b/frontend/src/container/PipelinePage/tests/DeleteAction.test.tsx index 451ef8807f..3b2fdfeb34 100644 --- a/frontend/src/container/PipelinePage/tests/DeleteAction.test.tsx +++ b/frontend/src/container/PipelinePage/tests/DeleteAction.test.tsx @@ -6,6 +6,20 @@ import { MemoryRouter } from 'react-router-dom'; import i18n from 'ReactI18'; import store from 'store'; +jest.mock('uplot', () => { + const paths = { + spline: jest.fn(), + bars: jest.fn(), + }; + const uplotMock = jest.fn(() => ({ + paths, + })); + return { + paths, + default: uplotMock, + }; +}); + describe('PipelinePage container test', () => { it('should render DeleteAction section', () => { const { asFragment } = render( diff --git a/frontend/src/container/PipelinePage/tests/DragAction.test.tsx b/frontend/src/container/PipelinePage/tests/DragAction.test.tsx index 168b3f042f..9f64714072 100644 --- a/frontend/src/container/PipelinePage/tests/DragAction.test.tsx +++ b/frontend/src/container/PipelinePage/tests/DragAction.test.tsx @@ -6,6 +6,20 @@ import { MemoryRouter } from 'react-router-dom'; import i18n from 'ReactI18'; import store from 'store'; +jest.mock('uplot', () => { + const paths = { + spline: jest.fn(), + bars: jest.fn(), + }; + const uplotMock = jest.fn(() => ({ + paths, + })); + return { + paths, + default: uplotMock, + }; +}); + describe('PipelinePage container test', () => { it('should render DragAction section', () => { const { asFragment } = render( diff --git a/frontend/src/container/PipelinePage/tests/EditAction.test.tsx b/frontend/src/container/PipelinePage/tests/EditAction.test.tsx index c52991bf6d..56dd779600 100644 --- a/frontend/src/container/PipelinePage/tests/EditAction.test.tsx +++ b/frontend/src/container/PipelinePage/tests/EditAction.test.tsx @@ -6,6 +6,20 @@ import { MemoryRouter } from 'react-router-dom'; import i18n from 'ReactI18'; import store from 'store'; +jest.mock('uplot', () => { + const paths = { + spline: jest.fn(), + bars: jest.fn(), + }; + const uplotMock = jest.fn(() => ({ + paths, + })); + return { + paths, + default: uplotMock, + }; +}); + describe('PipelinePage container test', () => { it('should render EditAction section', () => { const { asFragment } = render( diff --git a/frontend/src/container/PipelinePage/tests/PipelineActions.test.tsx b/frontend/src/container/PipelinePage/tests/PipelineActions.test.tsx index 83f503107b..d472f4745c 100644 --- a/frontend/src/container/PipelinePage/tests/PipelineActions.test.tsx +++ b/frontend/src/container/PipelinePage/tests/PipelineActions.test.tsx @@ -8,6 +8,20 @@ import store from 'store'; import { pipelineMockData } from '../mocks/pipeline'; import PipelineActions from '../PipelineListsView/TableComponents/PipelineActions'; +jest.mock('uplot', () => { + const paths = { + spline: jest.fn(), + bars: jest.fn(), + }; + const uplotMock = jest.fn(() => ({ + paths, + })); + return { + paths, + default: uplotMock, + }; +}); + describe('PipelinePage container test', () => { it('should render PipelineActions section', () => { const { asFragment } = render( diff --git a/frontend/src/container/PipelinePage/tests/PipelineExpandView.test.tsx b/frontend/src/container/PipelinePage/tests/PipelineExpandView.test.tsx index 6875d11259..b9c78091dd 100644 --- a/frontend/src/container/PipelinePage/tests/PipelineExpandView.test.tsx +++ b/frontend/src/container/PipelinePage/tests/PipelineExpandView.test.tsx @@ -9,6 +9,20 @@ import { pipelineMockData } from '../mocks/pipeline'; import PipelineExpandView from '../PipelineListsView/PipelineExpandView'; import { matchMedia } from './AddNewPipeline.test'; +jest.mock('uplot', () => { + const paths = { + spline: jest.fn(), + bars: jest.fn(), + }; + const uplotMock = jest.fn(() => ({ + paths, + })); + return { + paths, + default: uplotMock, + }; +}); + beforeAll(() => { matchMedia(); }); diff --git a/frontend/src/container/PipelinePage/tests/PipelineListsView.test.tsx b/frontend/src/container/PipelinePage/tests/PipelineListsView.test.tsx index 74f1f125e0..517e623ebe 100644 --- a/frontend/src/container/PipelinePage/tests/PipelineListsView.test.tsx +++ b/frontend/src/container/PipelinePage/tests/PipelineListsView.test.tsx @@ -11,6 +11,20 @@ import store from 'store'; import { pipelineApiResponseMockData } from '../mocks/pipeline'; import PipelineListsView from '../PipelineListsView'; +jest.mock('uplot', () => { + const paths = { + spline: jest.fn(), + bars: jest.fn(), + }; + const uplotMock = jest.fn(() => ({ + paths, + })); + return { + paths, + default: uplotMock, + }; +}); + const samplePipelinePreviewResponse = { isLoading: false, logs: [ diff --git a/frontend/src/container/PipelinePage/tests/PipelinePageLayout.test.tsx b/frontend/src/container/PipelinePage/tests/PipelinePageLayout.test.tsx index 91d5dfe244..a71bc1266d 100644 --- a/frontend/src/container/PipelinePage/tests/PipelinePageLayout.test.tsx +++ b/frontend/src/container/PipelinePage/tests/PipelinePageLayout.test.tsx @@ -11,6 +11,20 @@ import { v4 } from 'uuid'; import PipelinePageLayout from '../Layouts/Pipeline'; import { matchMedia } from './AddNewPipeline.test'; +jest.mock('uplot', () => { + const paths = { + spline: jest.fn(), + bars: jest.fn(), + }; + const uplotMock = jest.fn(() => ({ + paths, + })); + return { + paths, + default: uplotMock, + }; +}); + beforeAll(() => { matchMedia(); }); diff --git a/frontend/src/container/PipelinePage/tests/TagInput.test.tsx b/frontend/src/container/PipelinePage/tests/TagInput.test.tsx index 24cedc2eb0..e95efb6715 100644 --- a/frontend/src/container/PipelinePage/tests/TagInput.test.tsx +++ b/frontend/src/container/PipelinePage/tests/TagInput.test.tsx @@ -7,6 +7,20 @@ import store from 'store'; import TagInput from '../components/TagInput'; +jest.mock('uplot', () => { + const paths = { + spline: jest.fn(), + bars: jest.fn(), + }; + const uplotMock = jest.fn(() => ({ + paths, + })); + return { + paths, + default: uplotMock, + }; +}); + describe('Pipeline Page', () => { it('should render TagInput section', () => { const { asFragment } = render( diff --git a/frontend/src/container/PipelinePage/tests/utils.test.ts b/frontend/src/container/PipelinePage/tests/utils.test.ts index c21e8c5a4b..707ad06c2d 100644 --- a/frontend/src/container/PipelinePage/tests/utils.test.ts +++ b/frontend/src/container/PipelinePage/tests/utils.test.ts @@ -11,6 +11,20 @@ import { getTableColumn, } from '../PipelineListsView/utils'; +jest.mock('uplot', () => { + const paths = { + spline: jest.fn(), + bars: jest.fn(), + }; + const uplotMock = jest.fn(() => ({ + paths, + })); + return { + paths, + default: uplotMock, + }; +}); + describe('Utils testing of Pipeline Page', () => { test('it should be check form field of add pipeline', () => { expect(pipelineFields.length).toBe(3); diff --git a/frontend/src/lib/dashboard/getQueryResults.ts b/frontend/src/lib/dashboard/getQueryResults.ts index 03ad5f8caa..232efad04c 100644 --- a/frontend/src/lib/dashboard/getQueryResults.ts +++ b/frontend/src/lib/dashboard/getQueryResults.ts @@ -26,13 +26,13 @@ export async function GetMetricQueryRange( headers?: Record, ): Promise> { const { legendMap, queryPayload } = prepareQueryRangePayload(props); - const response = await getMetricsQueryRange( queryPayload, version || 'v3', signal, headers, ); + if (response.statusCode >= 400) { let error = `API responded with ${response.statusCode} - ${response.error} status: ${response.message}`; @@ -78,7 +78,7 @@ export interface GetQueryResultsProps { query: Query; graphType: PANEL_TYPES; selectedTime: timePreferenceType; - globalSelectedInterval: Time | TimeV2 | CustomTimeType; + globalSelectedInterval?: Time | TimeV2 | CustomTimeType; variables?: Record; params?: Record; fillGaps?: boolean; @@ -87,4 +87,6 @@ export interface GetQueryResultsProps { pagination?: Pagination; selectColumns?: any; }; + start?: number; + end?: number; } diff --git a/frontend/src/lib/dashboard/prepareQueryRangePayload.ts b/frontend/src/lib/dashboard/prepareQueryRangePayload.ts index ffc2f0477c..4999726c63 100644 --- a/frontend/src/lib/dashboard/prepareQueryRangePayload.ts +++ b/frontend/src/lib/dashboard/prepareQueryRangePayload.ts @@ -23,6 +23,8 @@ export const prepareQueryRangePayload = ({ variables = {}, params = {}, fillGaps = false, + start: startTime, + end: endTime, }: GetQueryResultsProps): PrepareQueryRangePayload => { let legendMap: Record = {}; const { @@ -100,8 +102,8 @@ export const prepareQueryRangePayload = ({ : undefined; const queryPayload: QueryRangePayload = { - start: parseInt(start, 10) * 1e3, - end: endLogTimeStamp || parseInt(end, 10) * 1e3, + start: startTime ? startTime * 1e3 : parseInt(start, 10) * 1e3, + end: endTime ? endTime * 1e3 : endLogTimeStamp || parseInt(end, 10) * 1e3, step: getStep({ start: allowSelectedIntervalForStepGen ? start diff --git a/frontend/src/lib/uPlotLib/getUplotChartOptions.ts b/frontend/src/lib/uPlotLib/getUplotChartOptions.ts index 5000b0f0d8..447dd8d716 100644 --- a/frontend/src/lib/uPlotLib/getUplotChartOptions.ts +++ b/frontend/src/lib/uPlotLib/getUplotChartOptions.ts @@ -54,6 +54,7 @@ export interface GetUPlotChartOptions { }> >; customTooltipElement?: HTMLDivElement; + verticalLineTimestamp?: number; } /** the function converts series A , series B , series C to @@ -156,6 +157,7 @@ export const getUPlotChartOptions = ({ hiddenGraph, setHiddenGraph, customTooltipElement, + verticalLineTimestamp, }: GetUPlotChartOptions): uPlot.Options => { const timeScaleProps = getXAxisScale(minTimeScale, maxTimeScale); @@ -222,6 +224,29 @@ export const getUPlotChartOptions = ({ onClick: onClickHandler, apiResponse, }), + { + hooks: { + draw: [ + (u): void => { + if (verticalLineTimestamp) { + const { ctx } = u; + ctx.save(); + ctx.setLineDash([4, 2]); + ctx.strokeStyle = 'white'; + ctx.lineWidth = 1; + const x = u.valToPos(verticalLineTimestamp, 'x', true); + + ctx.beginPath(); + ctx.moveTo(x, u.bbox.top); + ctx.lineTo(x, u.bbox.top + u.bbox.height); + ctx.stroke(); + ctx.setLineDash([]); + ctx.restore(); + } + }, + ], + }, + }, ], hooks: { draw: [ diff --git a/frontend/src/pages/LogsExplorer/__tests__/LogsExplorer.test.tsx b/frontend/src/pages/LogsExplorer/__tests__/LogsExplorer.test.tsx index 4970d6cf17..66bf604fa1 100644 --- a/frontend/src/pages/LogsExplorer/__tests__/LogsExplorer.test.tsx +++ b/frontend/src/pages/LogsExplorer/__tests__/LogsExplorer.test.tsx @@ -26,6 +26,21 @@ import { Query } from 'types/api/queryBuilder/queryBuilderData'; import LogsExplorer from '../index'; const queryRangeURL = 'http://localhost/api/v3/query_range'; + +jest.mock('uplot', () => { + const paths = { + spline: jest.fn(), + bars: jest.fn(), + }; + const uplotMock = jest.fn(() => ({ + paths, + })); + return { + paths, + default: uplotMock, + }; +}); + // mocking the graph components in this test as this should be handled separately jest.mock( 'container/TimeSeriesView/TimeSeriesView', From f9ac41b865ded9c2089b7c40ea26c31c633b4681 Mon Sep 17 00:00:00 2001 From: Vikrant Gupta Date: Mon, 23 Sep 2024 09:34:59 +0530 Subject: [PATCH 58/79] feat: enable the search v2 for logs explorer page and remove FF (#5952) --- ee/query-service/model/plans.go | 22 ----------------- .../LogExplorerQuerySection/index.tsx | 24 ++++--------------- .../QueryBuilder/components/Query/Query.tsx | 23 ++++++++++++++---- 3 files changed, 23 insertions(+), 46 deletions(-) diff --git a/ee/query-service/model/plans.go b/ee/query-service/model/plans.go index dbd8b56965..9b696c013f 100644 --- a/ee/query-service/model/plans.go +++ b/ee/query-service/model/plans.go @@ -13,7 +13,6 @@ const Onboarding = "ONBOARDING" const ChatSupport = "CHAT_SUPPORT" const Gateway = "GATEWAY" const PremiumSupport = "PREMIUM_SUPPORT" -const QueryBuilderSearchV2 = "QUERY_BUILDER_SEARCH_V2" var BasicPlan = basemodel.FeatureSet{ basemodel.Feature{ @@ -128,13 +127,6 @@ var BasicPlan = basemodel.FeatureSet{ UsageLimit: -1, Route: "", }, - basemodel.Feature{ - Name: QueryBuilderSearchV2, - Active: false, - Usage: 0, - UsageLimit: -1, - Route: "", - }, } var ProPlan = basemodel.FeatureSet{ @@ -243,13 +235,6 @@ var ProPlan = basemodel.FeatureSet{ UsageLimit: -1, Route: "", }, - basemodel.Feature{ - Name: QueryBuilderSearchV2, - Active: false, - Usage: 0, - UsageLimit: -1, - Route: "", - }, } var EnterprisePlan = basemodel.FeatureSet{ @@ -372,11 +357,4 @@ var EnterprisePlan = basemodel.FeatureSet{ UsageLimit: -1, Route: "", }, - basemodel.Feature{ - Name: QueryBuilderSearchV2, - Active: false, - Usage: 0, - UsageLimit: -1, - Route: "", - }, } diff --git a/frontend/src/container/LogExplorerQuerySection/index.tsx b/frontend/src/container/LogExplorerQuerySection/index.tsx index f807103f68..c49990861f 100644 --- a/frontend/src/container/LogExplorerQuerySection/index.tsx +++ b/frontend/src/container/LogExplorerQuerySection/index.tsx @@ -1,6 +1,5 @@ import './LogsExplorerQuerySection.styles.scss'; -import { FeatureKeys } from 'constants/features'; import { initialQueriesMap, OPERATORS, @@ -9,14 +8,12 @@ import { import ExplorerOrderBy from 'container/ExplorerOrderBy'; import { QueryBuilder } from 'container/QueryBuilder'; import { OrderByFilterProps } from 'container/QueryBuilder/filters/OrderByFilter/OrderByFilter.interfaces'; -import QueryBuilderSearch from 'container/QueryBuilder/filters/QueryBuilderSearch'; import QueryBuilderSearchV2 from 'container/QueryBuilder/filters/QueryBuilderSearchV2/QueryBuilderSearchV2'; import { QueryBuilderProps } from 'container/QueryBuilder/QueryBuilder.interfaces'; import { useGetPanelTypesQueryParam } from 'hooks/queryBuilder/useGetPanelTypesQueryParam'; import { useQueryBuilder } from 'hooks/queryBuilder/useQueryBuilder'; import { useQueryOperations } from 'hooks/queryBuilder/useQueryBuilderOperations'; import { useShareBuilderUrl } from 'hooks/queryBuilder/useShareBuilderUrl'; -import useFeatureFlags from 'hooks/useFeatureFlag'; import { prepareQueryWithDefaultTimestamp, SELECTED_VIEWS, @@ -89,26 +86,15 @@ function LogExplorerQuerySection({ [handleChangeQueryData], ); - const isSearchV2Enabled = - useFeatureFlags(FeatureKeys.QUERY_BUILDER_SEARCH_V2)?.active || false; - return ( <> {selectedView === SELECTED_VIEWS.SEARCH && (
- {isSearchV2Enabled ? ( - - ) : ( - - )} +
)} diff --git a/frontend/src/container/QueryBuilder/components/Query/Query.tsx b/frontend/src/container/QueryBuilder/components/Query/Query.tsx index 747198abfb..453cf063f8 100644 --- a/frontend/src/container/QueryBuilder/components/Query/Query.tsx +++ b/frontend/src/container/QueryBuilder/components/Query/Query.tsx @@ -23,6 +23,7 @@ import { import AggregateEveryFilter from 'container/QueryBuilder/filters/AggregateEveryFilter'; import LimitFilter from 'container/QueryBuilder/filters/LimitFilter/LimitFilter'; import QueryBuilderSearch from 'container/QueryBuilder/filters/QueryBuilderSearch'; +import QueryBuilderSearchV2 from 'container/QueryBuilder/filters/QueryBuilderSearchV2/QueryBuilderSearchV2'; import { useQueryBuilder } from 'hooks/queryBuilder/useQueryBuilder'; import { useQueryOperations } from 'hooks/queryBuilder/useQueryBuilderOperations'; // ** Hooks @@ -81,6 +82,10 @@ export const Query = memo(function Query({ entityVersion: version, }); + const isLogsExplorerPage = useMemo(() => pathname === ROUTES.LOGS_EXPLORER, [ + pathname, + ]); + const handleChangeAggregateEvery = useCallback( (value: IBuilderQuery['stepInterval']) => { handleChangeQueryData('stepInterval', value); @@ -452,11 +457,19 @@ export const Query = memo(function Query({ )}
- + {isLogsExplorerPage ? ( + + ) : ( + + )} From 3866f89d3ebbdcc4da5ce333ee97acb51aa22700 Mon Sep 17 00:00:00 2001 From: Nityananda Gohain Date: Mon, 23 Sep 2024 12:27:14 +0530 Subject: [PATCH 59/79] feat: support for case insensitive for contains and like queries (#6045) * feat: support for case insensitive for contains and like queries * fix: make index filter lowercase for like and not like --- .../app/logs/v4/query_builder.go | 10 +-- .../app/logs/v4/query_builder_test.go | 6 +- .../app/logs/v4/resource_query_builder.go | 64 +++++++++++++++---- .../logs/v4/resource_query_builder_test.go | 29 ++++++--- 4 files changed, 78 insertions(+), 31 deletions(-) diff --git a/pkg/query-service/app/logs/v4/query_builder.go b/pkg/query-service/app/logs/v4/query_builder.go index e906c605a1..49e585e64b 100644 --- a/pkg/query-service/app/logs/v4/query_builder.go +++ b/pkg/query-service/app/logs/v4/query_builder.go @@ -17,10 +17,10 @@ var logOperators = map[v3.FilterOperator]string{ v3.FilterOperatorLessThanOrEq: "<=", v3.FilterOperatorGreaterThan: ">", v3.FilterOperatorGreaterThanOrEq: ">=", - v3.FilterOperatorLike: "LIKE", - v3.FilterOperatorNotLike: "NOT LIKE", - v3.FilterOperatorContains: "LIKE", - v3.FilterOperatorNotContains: "NOT LIKE", + v3.FilterOperatorLike: "ILIKE", + v3.FilterOperatorNotLike: "NOT ILIKE", + v3.FilterOperatorContains: "ILIKE", + v3.FilterOperatorNotContains: "NOT ILIKE", v3.FilterOperatorRegex: "match(%s, %s)", v3.FilterOperatorNotRegex: "NOT match(%s, %s)", v3.FilterOperatorIn: "IN", @@ -150,6 +150,7 @@ func buildAttributeFilter(item v3.FilterItem) (string, error) { val := utils.QuoteEscapedStringForContains(fmt.Sprintf("%s", item.Value)) // for body the contains is case insensitive if keyName == BODY { + logsOp = strings.Replace(logsOp, "ILIKE", "LIKE", 1) // removing i from ilike and not ilike return fmt.Sprintf("lower(%s) %s lower('%%%s%%')", keyName, logsOp, val), nil } else { return fmt.Sprintf("%s %s '%%%s%%'", keyName, logsOp, val), nil @@ -158,6 +159,7 @@ func buildAttributeFilter(item v3.FilterItem) (string, error) { // for body use lower for like and ilike val := utils.QuoteEscapedString(fmt.Sprintf("%s", item.Value)) if keyName == BODY { + logsOp = strings.Replace(logsOp, "ILIKE", "LIKE", 1) // removing i from ilike and not ilike return fmt.Sprintf("lower(%s) %s lower('%s')", keyName, logsOp, val), nil } else { return fmt.Sprintf("%s %s '%s'", keyName, logsOp, val), nil diff --git a/pkg/query-service/app/logs/v4/query_builder_test.go b/pkg/query-service/app/logs/v4/query_builder_test.go index 34ea7e1f6f..9c2b1fd2e5 100644 --- a/pkg/query-service/app/logs/v4/query_builder_test.go +++ b/pkg/query-service/app/logs/v4/query_builder_test.go @@ -250,7 +250,7 @@ func Test_buildAttributeFilter(t *testing.T) { Value: "test", }, }, - want: "resources_string['service.name'] LIKE '%test%'", + want: "resources_string['service.name'] ILIKE '%test%'", }, { name: "build attribute filter contains- body", @@ -280,7 +280,7 @@ func Test_buildAttributeFilter(t *testing.T) { Value: "test%", }, }, - want: "resources_string['service.name'] LIKE 'test%'", + want: "resources_string['service.name'] ILIKE 'test%'", }, { name: "build attribute filter like-body", @@ -956,7 +956,7 @@ func TestPrepareLogsQuery(t *testing.T) { }, want: "SELECT timestamp, id, trace_id, span_id, trace_flags, severity_text, severity_number, body, attributes_string, attributes_number, attributes_bool, resources_string from " + "signoz_logs.distributed_logs_v2 where attributes_string['method'] = 'GET' AND mapContains(attributes_string, 'method') AND " + - "(resource_fingerprint GLOBAL IN (SELECT fingerprint FROM signoz_logs.distributed_logs_v2_resource WHERE simpleJSONExtractString(labels, 'service.name') LIKE '%app%' AND labels like '%service.name%app%' AND ", + "(resource_fingerprint GLOBAL IN (SELECT fingerprint FROM signoz_logs.distributed_logs_v2_resource WHERE simpleJSONExtractString(lower(labels), 'service.name') LIKE '%app%' AND lower(labels) like '%service.name%app%' AND ", }, { name: "Live Tail Query W/O filter", diff --git a/pkg/query-service/app/logs/v4/resource_query_builder.go b/pkg/query-service/app/logs/v4/resource_query_builder.go index 2a56549b43..3f7f2682cb 100644 --- a/pkg/query-service/app/logs/v4/resource_query_builder.go +++ b/pkg/query-service/app/logs/v4/resource_query_builder.go @@ -8,12 +8,37 @@ import ( "go.signoz.io/signoz/pkg/query-service/utils" ) +var resourceLogOperators = map[v3.FilterOperator]string{ + v3.FilterOperatorEqual: "=", + v3.FilterOperatorNotEqual: "!=", + v3.FilterOperatorLessThan: "<", + v3.FilterOperatorLessThanOrEq: "<=", + v3.FilterOperatorGreaterThan: ">", + v3.FilterOperatorGreaterThanOrEq: ">=", + v3.FilterOperatorLike: "LIKE", + v3.FilterOperatorNotLike: "NOT LIKE", + v3.FilterOperatorContains: "LIKE", + v3.FilterOperatorNotContains: "NOT LIKE", + v3.FilterOperatorRegex: "match(%s, %s)", + v3.FilterOperatorNotRegex: "NOT match(%s, %s)", + v3.FilterOperatorIn: "IN", + v3.FilterOperatorNotIn: "NOT IN", + v3.FilterOperatorExists: "mapContains(%s_%s, '%s')", + v3.FilterOperatorNotExists: "not mapContains(%s_%s, '%s')", +} + // buildResourceFilter builds a clickhouse filter string for resource labels func buildResourceFilter(logsOp string, key string, op v3.FilterOperator, value interface{}) string { + // for all operators except contains and like searchKey := fmt.Sprintf("simpleJSONExtractString(labels, '%s')", key) + // for contains and like it will be case insensitive + lowerSearchKey := fmt.Sprintf("simpleJSONExtractString(lower(labels), '%s')", key) + chFmtVal := utils.ClickHouseFormattedValue(value) + lowerValue := strings.ToLower(fmt.Sprintf("%s", value)) + switch op { case v3.FilterOperatorExists: return fmt.Sprintf("simpleJSONHas(labels, '%s')", key) @@ -24,20 +49,20 @@ func buildResourceFilter(logsOp string, key string, op v3.FilterOperator, value case v3.FilterOperatorContains, v3.FilterOperatorNotContains: // this is required as clickhouseFormattedValue add's quotes to the string // we also want to treat %, _ as literals for contains - escapedStringValue := utils.QuoteEscapedStringForContains(fmt.Sprintf("%s", value)) - return fmt.Sprintf("%s %s '%%%s%%'", searchKey, logsOp, escapedStringValue) + escapedStringValue := utils.QuoteEscapedStringForContains(lowerValue) + return fmt.Sprintf("%s %s '%%%s%%'", lowerSearchKey, logsOp, escapedStringValue) case v3.FilterOperatorLike, v3.FilterOperatorNotLike: // this is required as clickhouseFormattedValue add's quotes to the string - escapedStringValue := utils.QuoteEscapedString(fmt.Sprintf("%s", value)) - return fmt.Sprintf("%s %s '%s'", searchKey, logsOp, escapedStringValue) + escapedStringValue := utils.QuoteEscapedString(lowerValue) + return fmt.Sprintf("%s %s '%s'", lowerSearchKey, logsOp, escapedStringValue) default: return fmt.Sprintf("%s %s %s", searchKey, logsOp, chFmtVal) } } // buildIndexFilterForInOperator builds a clickhouse filter string for in operator -// example:= x in a,b,c = (labels like '%x%a%' or labels like '%"x":"b"%' or labels like '%"x"="c"%') -// example:= x nin a,b,c = (labels nlike '%x%a%' AND labels nlike '%"x"="b"' AND labels nlike '%"x"="c"%') +// example:= x in a,b,c = (labels like '%"x"%"a"%' or labels like '%"x":"b"%' or labels like '%"x"="c"%') +// example:= x nin a,b,c = (labels nlike '%"x"%"a"%' AND labels nlike '%"x"="b"' AND labels nlike '%"x"="c"%') func buildIndexFilterForInOperator(key string, op v3.FilterOperator, value interface{}) string { conditions := []string{} separator := " OR " @@ -77,24 +102,35 @@ func buildIndexFilterForInOperator(key string, op v3.FilterOperator, value inter // buildResourceIndexFilter builds a clickhouse filter string for resource labels // example:= x like '%john%' = labels like '%x%john%' +// we have two indexes for resource attributes one is lower and one is normal. +// for all operators other then like/contains we will use normal index +// for like/contains we will use lower index +// we can use lower index for =, in etc but it's difficult to do it for !=, NIN etc +// if as x != "ABC" we cannot predict something like "not lower(labels) like '%%x%%abc%%'". It has it be "not lower(labels) like '%%x%%ABC%%'" func buildResourceIndexFilter(key string, op v3.FilterOperator, value interface{}) string { // not using clickhouseFormattedValue as we don't wan't the quotes strVal := fmt.Sprintf("%s", value) - formattedValueEscapedForContains := utils.QuoteEscapedStringForContains(strVal) + formattedValueEscapedForContains := strings.ToLower(utils.QuoteEscapedStringForContains(strVal)) formattedValueEscaped := utils.QuoteEscapedString(strVal) + formattedValueEscapedLower := strings.ToLower(formattedValueEscaped) // add index filters switch op { case v3.FilterOperatorContains: - return fmt.Sprintf("labels like '%%%s%%%s%%'", key, formattedValueEscapedForContains) + return fmt.Sprintf("lower(labels) like '%%%s%%%s%%'", key, formattedValueEscapedForContains) case v3.FilterOperatorNotContains: - return fmt.Sprintf("labels not like '%%%s%%%s%%'", key, formattedValueEscapedForContains) - case v3.FilterOperatorLike, v3.FilterOperatorEqual: + return fmt.Sprintf("lower(labels) not like '%%%s%%%s%%'", key, formattedValueEscapedForContains) + case v3.FilterOperatorLike: + return fmt.Sprintf("lower(labels) like '%%%s%%%s%%'", key, formattedValueEscapedLower) + case v3.FilterOperatorNotLike: + return fmt.Sprintf("lower(labels) not like '%%%s%%%s%%'", key, formattedValueEscapedLower) + case v3.FilterOperatorEqual: return fmt.Sprintf("labels like '%%%s%%%s%%'", key, formattedValueEscaped) - case v3.FilterOperatorNotLike, v3.FilterOperatorNotEqual: + case v3.FilterOperatorNotEqual: return fmt.Sprintf("labels not like '%%%s%%%s%%'", key, formattedValueEscaped) - case v3.FilterOperatorNotRegex: - return fmt.Sprintf("labels not like '%%%s%%'", key) + case v3.FilterOperatorRegex, v3.FilterOperatorNotRegex: + // don't try to do anything for regex. + return "" case v3.FilterOperatorIn, v3.FilterOperatorNotIn: return buildIndexFilterForInOperator(key, op, value) default: @@ -137,7 +173,7 @@ func buildResourceFiltersFromFilterItems(fs *v3.FilterSet) ([]string, error) { } } - if logsOp, ok := logOperators[op]; ok { + if logsOp, ok := resourceLogOperators[op]; ok { // the filter if resourceFilter := buildResourceFilter(logsOp, keyName, op, value); resourceFilter != "" { conditions = append(conditions, resourceFilter) diff --git a/pkg/query-service/app/logs/v4/resource_query_builder_test.go b/pkg/query-service/app/logs/v4/resource_query_builder_test.go index e315f739a3..7bea0bc291 100644 --- a/pkg/query-service/app/logs/v4/resource_query_builder_test.go +++ b/pkg/query-service/app/logs/v4/resource_query_builder_test.go @@ -53,7 +53,7 @@ func Test_buildResourceFilter(t *testing.T) { op: v3.FilterOperatorContains, value: "Application%_", }, - want: `simpleJSONExtractString(labels, 'service.name') LIKE '%Application\%\_%'`, + want: `simpleJSONExtractString(lower(labels), 'service.name') LIKE '%application\%\_%'`, }, { name: "test eq", @@ -83,7 +83,7 @@ func Test_buildResourceFilter(t *testing.T) { op: v3.FilterOperatorLike, value: "Application%_", }, - want: `simpleJSONExtractString(labels, 'service.name') LIKE 'Application%_'`, + want: `simpleJSONExtractString(lower(labels), 'service.name') LIKE 'application%_'`, }, } for _, tt := range tests { @@ -170,7 +170,7 @@ func Test_buildResourceIndexFilter(t *testing.T) { op: v3.FilterOperatorContains, value: "application", }, - want: `labels like '%service.name%application%'`, + want: `lower(labels) like '%service.name%application%'`, }, { name: "test not contains", @@ -179,7 +179,7 @@ func Test_buildResourceIndexFilter(t *testing.T) { op: v3.FilterOperatorNotContains, value: "application", }, - want: `labels not like '%service.name%application%'`, + want: `lower(labels) not like '%service.name%application%'`, }, { name: "test contains with % and _", @@ -188,7 +188,16 @@ func Test_buildResourceIndexFilter(t *testing.T) { op: v3.FilterOperatorNotContains, value: "application%_test", }, - want: `labels not like '%service.name%application\%\_test%'`, + want: `lower(labels) not like '%service.name%application\%\_test%'`, + }, + { + name: "test like with % and _", + args: args{ + key: "service.name", + op: v3.FilterOperatorLike, + value: "Application%_test", + }, + want: `lower(labels) like '%service.name%application%_test%'`, }, { name: "test like with % and _", @@ -197,7 +206,7 @@ func Test_buildResourceIndexFilter(t *testing.T) { op: v3.FilterOperatorLike, value: "application%_test", }, - want: `labels like '%service.name%application%_test%'`, + want: `lower(labels) like '%service.name%application%_test%'`, }, { name: "test not regex", @@ -206,7 +215,7 @@ func Test_buildResourceIndexFilter(t *testing.T) { op: v3.FilterOperatorNotRegex, value: ".*", }, - want: `labels not like '%service.name%'`, + want: ``, }, { name: "test in", @@ -318,8 +327,8 @@ func Test_buildResourceFiltersFromFilterItems(t *testing.T) { want: []string{ "simpleJSONExtractString(labels, 'service.name') = 'test'", "labels like '%service.name%test%'", - "simpleJSONExtractString(labels, 'namespace') LIKE '%test1%'", - "labels like '%namespace%test1%'", + "simpleJSONExtractString(lower(labels), 'namespace') LIKE '%test1%'", + "lower(labels) like '%namespace%test1%'", }, wantErr: false, }, @@ -480,7 +489,7 @@ func Test_buildResourceSubQuery(t *testing.T) { want: "(SELECT fingerprint FROM signoz_logs.distributed_logs_v2_resource WHERE " + "(seen_at_ts_bucket_start >= 1680064560) AND (seen_at_ts_bucket_start <= 1680066458) AND " + "simpleJSONExtractString(labels, 'service.name') = 'test' AND labels like '%service.name%test%' " + - "AND simpleJSONExtractString(labels, 'namespace') LIKE '%test1%' AND labels like '%namespace%test1%' " + + "AND simpleJSONExtractString(lower(labels), 'namespace') LIKE '%test1%' AND lower(labels) like '%namespace%test1%' " + "AND (simpleJSONHas(labels, 'cluster.name') AND labels like '%cluster.name%') AND " + "( (simpleJSONHas(labels, 'host.name') AND labels like '%host.name%') ))", wantErr: false, From f69aaa2cfb778e7b3631c78b4d8813bc5504935b Mon Sep 17 00:00:00 2001 From: Nityananda Gohain Date: Mon, 23 Sep 2024 13:15:21 +0530 Subject: [PATCH 60/79] fix: telemetry nil pointer error fix (#6051) --- pkg/query-service/telemetry/telemetry.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/pkg/query-service/telemetry/telemetry.go b/pkg/query-service/telemetry/telemetry.go index 62ff020281..972caa4679 100644 --- a/pkg/query-service/telemetry/telemetry.go +++ b/pkg/query-service/telemetry/telemetry.go @@ -12,6 +12,7 @@ import ( "time" "github.com/go-co-op/gocron" + "go.uber.org/zap" "gopkg.in/segmentio/analytics-go.v3" "go.signoz.io/signoz/pkg/query-service/constants" @@ -258,7 +259,11 @@ func createTelemetry() { ctx := context.Background() // Define heartbeat function heartbeatFunc := func() { - tagsInfo, _ := telemetry.reader.GetTagsInfoInLastHeartBeatInterval(ctx, HEART_BEAT_DURATION) + tagsInfo, err := telemetry.reader.GetTagsInfoInLastHeartBeatInterval(ctx, HEART_BEAT_DURATION) + if err != nil { + zap.L().Error("heartbeatFunc: failed to get tags info", zap.Error(err)) + return + } if len(tagsInfo.Env) != 0 { telemetry.SendEvent(TELEMETRY_EVENT_ENVIRONMENT, map[string]interface{}{"value": tagsInfo.Env}, "", true, false) From a6b05f0a3d39912bb8df699984cdcc90158ae056 Mon Sep 17 00:00:00 2001 From: Vikrant Gupta Date: Mon, 23 Sep 2024 15:50:46 +0530 Subject: [PATCH 61/79] fix: light mode design for qb v2 (#6052) --- .../QueryBuilderSearchV2.styles.scss | 111 ++++++++++++++++++ .../Suggestions.styles.scss | 56 +++++++++ 2 files changed, 167 insertions(+) diff --git a/frontend/src/container/QueryBuilder/filters/QueryBuilderSearchV2/QueryBuilderSearchV2.styles.scss b/frontend/src/container/QueryBuilder/filters/QueryBuilderSearchV2/QueryBuilderSearchV2.styles.scss index 1ca8bd7529..7db18f400e 100644 --- a/frontend/src/container/QueryBuilder/filters/QueryBuilderSearchV2/QueryBuilderSearchV2.styles.scss +++ b/frontend/src/container/QueryBuilder/filters/QueryBuilderSearchV2/QueryBuilderSearchV2.styles.scss @@ -2,6 +2,10 @@ display: flex; gap: 4px; + .ant-select-dropdown { + padding: 0px; + } + .show-all-filters { .content { .rc-virtual-list-holder { @@ -259,3 +263,110 @@ } } } + +.lightMode { + .query-builder-search-v2 { + .content { + .operator-for { + .operator-for-text { + color: var(--bg-ink-200); + } + + .operator-for-value { + background: rgba(255, 255, 255, 0.1); + color: var(--bg-ink-200); + } + } + + .value-for { + .value-for-text { + color: var(--bg-ink-200); + } + + .value-for-value { + background: rgba(255, 255, 255, 0.1); + color: var(--bg-ink-400); + } + } + .example-queries { + cursor: default; + .heading { + color: var(--bg-slate-50); + } + + .query-container { + .example-query { + background: var(--bg-vanilla-200); + color: var(--bg-ink-400); + } + + .example-query:hover { + color: var(--bg-ink-100); + } + } + } + } + + .keyboard-shortcuts { + border: 1px solid var(--bg-vanilla-300); + background: var(--bg-vanilla-200); + + .icons { + border-top: 1.143px solid var(--bg-ink-200); + border-right: 1.143px solid var(--bg-ink-200); + border-bottom: 2.286px solid var(--bg-ink-200); + border-left: 1.143px solid var(--bg-ink-200); + background: var(--bg-vanilla-300); + } + + .keyboard-text { + color: var(--bg-ink-400); + } + + .navigate { + border-right: 1px solid #1d212d; + } + + .show-all-filter-items { + border-left: 1px solid #1d212d; + } + } + + .qb-search-bar-tokenised-tags { + .ant-tag { + border: 1px solid var(--bg-slate-100); + background: var(--bg-vanilla-300); + box-shadow: 0px 0px 8px 0px rgba(0, 0, 0, 0.1); + + .ant-typography { + color: var(--bg-ink-100); + } + + &.resource { + border: 1px solid rgba(242, 71, 105, 0.2); + + .ant-typography { + color: var(--bg-sakura-400); + background: rgba(245, 108, 135, 0.1); + } + + .ant-tag-close-icon { + background: rgba(245, 108, 135, 0.1); + } + } + &.tag { + border: 1px solid rgba(189, 153, 121, 0.2); + + .ant-typography { + color: var(--bg-sienna-400); + background: rgba(189, 153, 121, 0.1); + } + + .ant-tag-close-icon { + background: rgba(189, 153, 121, 0.1); + } + } + } + } + } +} diff --git a/frontend/src/container/QueryBuilder/filters/QueryBuilderSearchV2/Suggestions.styles.scss b/frontend/src/container/QueryBuilder/filters/QueryBuilderSearchV2/Suggestions.styles.scss index bd7ad36a5a..9fb2f50e55 100644 --- a/frontend/src/container/QueryBuilder/filters/QueryBuilderSearchV2/Suggestions.styles.scss +++ b/frontend/src/container/QueryBuilder/filters/QueryBuilderSearchV2/Suggestions.styles.scss @@ -168,3 +168,59 @@ } } } + +.lightMode { + .text { + color: var(--bg-ink-400); + } + + .option { + .container { + display: flex; + align-items: center; + justify-content: space-between; + + .right-section { + .data-type { + background: var(--bg-vanilla-300); + } + } + .option-meta-data-container { + display: flex; + gap: 8px; + } + } + + .container-without-tag { + .left { + .OPERATOR { + color: var(--bg-ink-400); + } + + .VALUE { + color: var(--bg-ink-400); + } + } + + .right { + .data-type { + background: var(--bg-vanilla-300); + } + } + } + } + .option:hover { + .container { + .left-section { + .value { + color: var(--bg-ink-100); + } + } + } + .container-without-tag { + .value { + color: var(--bg-ink-100); + } + } + } +} From 5e5f0f167fb0b2b38ded373f99faef7edcefc1ab Mon Sep 17 00:00:00 2001 From: Nityananda Gohain Date: Mon, 23 Sep 2024 20:12:38 +0530 Subject: [PATCH 62/79] TTL API for logs V2 (#5926) * feat: ttl api for logs * fix: add comments * fix: add materialize_ttl_after_modify --------- Co-authored-by: Srikanth Chekuri --- .../app/clickhouseReader/reader.go | 108 +++++++++++++++++- 1 file changed, 107 insertions(+), 1 deletion(-) diff --git a/pkg/query-service/app/clickhouseReader/reader.go b/pkg/query-service/app/clickhouseReader/reader.go index bb2a84b487..8dd29856fb 100644 --- a/pkg/query-service/app/clickhouseReader/reader.go +++ b/pkg/query-service/app/clickhouseReader/reader.go @@ -1996,6 +1996,108 @@ func getLocalTableName(tableName string) string { } +func (r *ClickHouseReader) SetTTLLogsV2(ctx context.Context, params *model.TTLParams) (*model.SetTTLResponseItem, *model.ApiError) { + // Keep only latest 100 transactions/requests + r.deleteTtlTransactions(ctx, 100) + // uuid is used as transaction id + uuidWithHyphen := uuid.New() + uuid := strings.Replace(uuidWithHyphen.String(), "-", "", -1) + + coldStorageDuration := -1 + if len(params.ColdStorageVolume) > 0 { + coldStorageDuration = int(params.ToColdStorageDuration) + } + + tableNameArray := []string{r.logsDB + "." + r.logsLocalTableV2, r.logsDB + "." + r.logsResourceLocalTableV2} + + // check if there is existing things to be done + for _, tableName := range tableNameArray { + statusItem, err := r.checkTTLStatusItem(ctx, tableName) + if err != nil { + return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing ttl_status check sql query")} + } + if statusItem.Status == constants.StatusPending { + return nil, &model.ApiError{Typ: model.ErrorConflict, Err: fmt.Errorf("TTL is already running")} + } + } + + // TTL query for logs_v2 table + ttlLogsV2 := fmt.Sprintf( + "ALTER TABLE %v ON CLUSTER %s MODIFY TTL toDateTime(timestamp / 1000000000) + "+ + "INTERVAL %v SECOND DELETE", tableNameArray[0], r.cluster, params.DelDuration) + if len(params.ColdStorageVolume) > 0 { + ttlLogsV2 += fmt.Sprintf(", toDateTime(timestamp / 1000000000)"+ + " + INTERVAL %v SECOND TO VOLUME '%s'", + params.ToColdStorageDuration, params.ColdStorageVolume) + } + + // TTL query for logs_v2_resource table + // adding 1800 as our bucket size is 1800 seconds + ttlLogsV2Resource := fmt.Sprintf( + "ALTER TABLE %v ON CLUSTER %s MODIFY TTL toDateTime(seen_at_ts_bucket_start) + toIntervalSecond(1800) + "+ + "INTERVAL %v SECOND DELETE", tableNameArray[1], r.cluster, params.DelDuration) + if len(params.ColdStorageVolume) > 0 { + ttlLogsV2Resource += fmt.Sprintf(", toDateTime(seen_at_ts_bucket_start) + toIntervalSecond(1800) + "+ + "INTERVAL %v SECOND TO VOLUME '%s'", + params.ToColdStorageDuration, params.ColdStorageVolume) + } + + ttlPayload := map[string]string{ + tableNameArray[0]: ttlLogsV2, + tableNameArray[1]: ttlLogsV2Resource, + } + + // set the ttl if nothing is pending/ no errors + go func(ttlPayload map[string]string) { + for tableName, query := range ttlPayload { + // https://github.com/SigNoz/signoz/issues/5470 + // we will change ttl for only the new parts and not the old ones + query += " SETTINGS materialize_ttl_after_modify=0" + + _, dbErr := r.localDB.Exec("INSERT INTO ttl_status (transaction_id, created_at, updated_at, table_name, ttl, status, cold_storage_ttl) VALUES (?, ?, ?, ?, ?, ?, ?)", uuid, time.Now(), time.Now(), tableName, params.DelDuration, constants.StatusPending, coldStorageDuration) + if dbErr != nil { + zap.L().Error("error in inserting to ttl_status table", zap.Error(dbErr)) + return + } + + err := r.setColdStorage(context.Background(), tableName, params.ColdStorageVolume) + if err != nil { + zap.L().Error("error in setting cold storage", zap.Error(err)) + statusItem, err := r.checkTTLStatusItem(ctx, tableName) + if err == nil { + _, dbErr := r.localDB.Exec("UPDATE ttl_status SET updated_at = ?, status = ? WHERE id = ?", time.Now(), constants.StatusFailed, statusItem.Id) + if dbErr != nil { + zap.L().Error("Error in processing ttl_status update sql query", zap.Error(dbErr)) + return + } + } + return + } + zap.L().Info("Executing TTL request: ", zap.String("request", query)) + statusItem, _ := r.checkTTLStatusItem(ctx, tableName) + if err := r.db.Exec(ctx, query); err != nil { + zap.L().Error("error while setting ttl", zap.Error(err)) + _, dbErr := r.localDB.Exec("UPDATE ttl_status SET updated_at = ?, status = ? WHERE id = ?", time.Now(), constants.StatusFailed, statusItem.Id) + if dbErr != nil { + zap.L().Error("Error in processing ttl_status update sql query", zap.Error(dbErr)) + return + } + return + } + _, dbErr = r.localDB.Exec("UPDATE ttl_status SET updated_at = ?, status = ? WHERE id = ?", time.Now(), constants.StatusSuccess, statusItem.Id) + if dbErr != nil { + zap.L().Error("Error in processing ttl_status update sql query", zap.Error(dbErr)) + return + } + } + + }(ttlPayload) + return &model.SetTTLResponseItem{Message: "move ttl has been successfully set up"}, nil +} + +// SetTTL sets the TTL for traces or metrics or logs tables. +// This is an async API which creates goroutines to set TTL. +// Status of TTL update is tracked with ttl_status table in sqlite db. // SetTTL sets the TTL for traces or metrics or logs tables. // This is an async API which creates goroutines to set TTL. // Status of TTL update is tracked with ttl_status table in sqlite db. @@ -2139,6 +2241,10 @@ func (r *ClickHouseReader) SetTTL(ctx context.Context, go metricTTL(tableName) } case constants.LogsTTL: + if r.useLogsNewSchema { + return r.SetTTLLogsV2(ctx, params) + } + tableName := r.logsDB + "." + r.logsLocalTable statusItem, err := r.checkTTLStatusItem(ctx, tableName) if err != nil { @@ -2372,7 +2478,7 @@ func (r *ClickHouseReader) GetTTL(ctx context.Context, ttlParams *model.GetTTLPa getLogsTTL := func() (*model.DBResponseTTL, *model.ApiError) { var dbResp []model.DBResponseTTL - query := fmt.Sprintf("SELECT engine_full FROM system.tables WHERE name='%v' AND database='%v'", r.logsLocalTable, r.logsDB) + query := fmt.Sprintf("SELECT engine_full FROM system.tables WHERE name='%v' AND database='%v'", r.logsLocalTableName, r.logsDB) err := r.db.Select(ctx, &dbResp, query) From df2844ea7451804885826b93a92a5513f58ec760 Mon Sep 17 00:00:00 2001 From: Vikrant Gupta Date: Tue, 24 Sep 2024 09:08:24 +0530 Subject: [PATCH 63/79] fix: the tag key flickering when moving from traces to logs (#6054) --- .../QueryBuilderSearchV2.styles.scss | 16 ++++++++-------- .../QueryBuilderSearchV2.tsx | 5 +++-- .../QueryBuilderSearchV2/Suggestions.styles.scss | 8 ++++---- 3 files changed, 15 insertions(+), 14 deletions(-) diff --git a/frontend/src/container/QueryBuilder/filters/QueryBuilderSearchV2/QueryBuilderSearchV2.styles.scss b/frontend/src/container/QueryBuilder/filters/QueryBuilderSearchV2/QueryBuilderSearchV2.styles.scss index 7db18f400e..624546fed5 100644 --- a/frontend/src/container/QueryBuilder/filters/QueryBuilderSearchV2/QueryBuilderSearchV2.styles.scss +++ b/frontend/src/container/QueryBuilder/filters/QueryBuilderSearchV2/QueryBuilderSearchV2.styles.scss @@ -235,16 +235,16 @@ } &.resource { - border: 1px solid rgba(242, 71, 105, 0.2); + border: 1px solid #4bcff920; .ant-typography { - color: var(--bg-sakura-400); - background: rgba(245, 108, 135, 0.1); + color: var(--bg-aqua-400); + background: #4bcff910; font-size: 14px; } .ant-tag-close-icon { - background: rgba(245, 108, 135, 0.1); + background: #4bcff910; } } &.tag { @@ -343,15 +343,15 @@ } &.resource { - border: 1px solid rgba(242, 71, 105, 0.2); + border: 1px solid #4bcff920; .ant-typography { - color: var(--bg-sakura-400); - background: rgba(245, 108, 135, 0.1); + color: var(--bg-aqua-400); + background: #4bcff910; } .ant-tag-close-icon { - background: rgba(245, 108, 135, 0.1); + background: #4bcff910; } } &.tag { diff --git a/frontend/src/container/QueryBuilder/filters/QueryBuilderSearchV2/QueryBuilderSearchV2.tsx b/frontend/src/container/QueryBuilder/filters/QueryBuilderSearchV2/QueryBuilderSearchV2.tsx index 3ddeef85bc..3d3fca4654 100644 --- a/frontend/src/container/QueryBuilder/filters/QueryBuilderSearchV2/QueryBuilderSearchV2.tsx +++ b/frontend/src/container/QueryBuilder/filters/QueryBuilderSearchV2/QueryBuilderSearchV2.tsx @@ -729,7 +729,8 @@ function QueryBuilderSearchV2( }, [tags]); useEffect(() => { - if (!isEqual(query.filters.items, tags)) { + // convert the query and tags to same format before comparison + if (!isEqual(getInitTags(query), tags)) { setTags(getInitTags(query)); } // eslint-disable-next-line react-hooks/exhaustive-deps @@ -769,7 +770,7 @@ function QueryBuilderSearchV2( ); const queryTags = useMemo( - () => tags.map((tag) => `${tag.key.key} ${tag.op} ${tag.value}`), + () => tags.map((tag) => `${tag.key?.key} ${tag.op} ${tag.value}`), [tags], ); diff --git a/frontend/src/container/QueryBuilder/filters/QueryBuilderSearchV2/Suggestions.styles.scss b/frontend/src/container/QueryBuilder/filters/QueryBuilderSearchV2/Suggestions.styles.scss index 9fb2f50e55..153f32e5ee 100644 --- a/frontend/src/container/QueryBuilder/filters/QueryBuilderSearchV2/Suggestions.styles.scss +++ b/frontend/src/container/QueryBuilder/filters/QueryBuilderSearchV2/Suggestions.styles.scss @@ -77,14 +77,14 @@ &.resource { border-radius: 50px; - background: rgba(245, 108, 135, 0.1) !important; - color: var(--bg-sakura-400) !important; + background: #4bcff910 !important; + color: var(--bg-aqua-400) !important; .dot { - background-color: var(--bg-sakura-400); + background-color: var(--bg-aqua-400); } .text { - color: var(--bg-sakura-400); + color: var(--bg-aqua-400); font-family: Inter; font-size: 12px; font-style: normal; From 419d2da363dd01f66b5c24f0e78ff863605b9a2a Mon Sep 17 00:00:00 2001 From: Srikanth Chekuri Date: Tue, 24 Sep 2024 10:22:52 +0530 Subject: [PATCH 64/79] feat: add anomaly rule (#5973) --- ee/query-service/anomaly/params.go | 4 + ee/query-service/anomaly/seasonal.go | 20 +- ee/query-service/app/api/api.go | 2 + ee/query-service/app/api/queryrange.go | 119 ++++++ ee/query-service/app/server.go | 20 +- ee/query-service/model/plans.go | 21 + ee/query-service/rules/anomaly.go | 393 ++++++++++++++++++ ee/query-service/rules/manager.go | 19 + pkg/query-service/app/http_handler.go | 6 +- pkg/query-service/app/server.go | 24 +- pkg/query-service/common/query_range.go | 19 + pkg/query-service/model/featureSet.go | 8 + pkg/query-service/rules/alerting.go | 32 +- pkg/query-service/rules/api_params.go | 4 +- pkg/query-service/rules/base_rule.go | 120 +++++- pkg/query-service/rules/manager.go | 12 +- pkg/query-service/rules/prom_rule.go | 14 +- pkg/query-service/rules/prom_rule_task.go | 4 +- pkg/query-service/rules/promrule_test.go | 2 +- pkg/query-service/rules/rule.go | 3 + pkg/query-service/rules/rule_task.go | 4 +- pkg/query-service/rules/threshold_rule.go | 104 +---- .../rules/threshold_rule_test.go | 25 +- 23 files changed, 799 insertions(+), 180 deletions(-) create mode 100644 ee/query-service/app/api/queryrange.go create mode 100644 ee/query-service/rules/anomaly.go diff --git a/ee/query-service/anomaly/params.go b/ee/query-service/anomaly/params.go index d39b2fa80f..8340a2673a 100644 --- a/ee/query-service/anomaly/params.go +++ b/ee/query-service/anomaly/params.go @@ -16,6 +16,10 @@ const ( SeasonalityWeekly Seasonality = "weekly" ) +func (s Seasonality) String() string { + return string(s) +} + var ( oneWeekOffset = 24 * 7 * time.Hour.Milliseconds() oneDayOffset = 24 * time.Hour.Milliseconds() diff --git a/ee/query-service/anomaly/seasonal.go b/ee/query-service/anomaly/seasonal.go index 485ab7f460..9b5f33d3df 100644 --- a/ee/query-service/anomaly/seasonal.go +++ b/ee/query-service/anomaly/seasonal.go @@ -67,6 +67,7 @@ func (p *BaseSeasonalProvider) getQueryParams(req *GetAnomaliesRequest) *anomaly } func (p *BaseSeasonalProvider) getResults(ctx context.Context, params *anomalyQueryParams) (*anomalyQueryResults, error) { + zap.L().Info("fetching results for current period", zap.Any("currentPeriodQuery", params.CurrentPeriodQuery)) currentPeriodResults, _, err := p.querierV2.QueryRange(ctx, params.CurrentPeriodQuery) if err != nil { return nil, err @@ -77,6 +78,7 @@ func (p *BaseSeasonalProvider) getResults(ctx context.Context, params *anomalyQu return nil, err } + zap.L().Info("fetching results for past period", zap.Any("pastPeriodQuery", params.PastPeriodQuery)) pastPeriodResults, _, err := p.querierV2.QueryRange(ctx, params.PastPeriodQuery) if err != nil { return nil, err @@ -87,6 +89,7 @@ func (p *BaseSeasonalProvider) getResults(ctx context.Context, params *anomalyQu return nil, err } + zap.L().Info("fetching results for current season", zap.Any("currentSeasonQuery", params.CurrentSeasonQuery)) currentSeasonResults, _, err := p.querierV2.QueryRange(ctx, params.CurrentSeasonQuery) if err != nil { return nil, err @@ -97,6 +100,7 @@ func (p *BaseSeasonalProvider) getResults(ctx context.Context, params *anomalyQu return nil, err } + zap.L().Info("fetching results for past season", zap.Any("pastSeasonQuery", params.PastSeasonQuery)) pastSeasonResults, _, err := p.querierV2.QueryRange(ctx, params.PastSeasonQuery) if err != nil { return nil, err @@ -107,6 +111,7 @@ func (p *BaseSeasonalProvider) getResults(ctx context.Context, params *anomalyQu return nil, err } + zap.L().Info("fetching results for past 2 season", zap.Any("past2SeasonQuery", params.Past2SeasonQuery)) past2SeasonResults, _, err := p.querierV2.QueryRange(ctx, params.Past2SeasonQuery) if err != nil { return nil, err @@ -117,6 +122,7 @@ func (p *BaseSeasonalProvider) getResults(ctx context.Context, params *anomalyQu return nil, err } + zap.L().Info("fetching results for past 3 season", zap.Any("past3SeasonQuery", params.Past3SeasonQuery)) past3SeasonResults, _, err := p.querierV2.QueryRange(ctx, params.Past3SeasonQuery) if err != nil { return nil, err @@ -184,7 +190,7 @@ func (p *BaseSeasonalProvider) getMovingAvg(series *v3.Series, movingAvgWindowSi return 0 } if startIdx >= len(series.Points)-movingAvgWindowSize { - startIdx = len(series.Points) - movingAvgWindowSize + startIdx = int(math.Max(0, float64(len(series.Points)-movingAvgWindowSize))) } var sum float64 points := series.Points[startIdx:] @@ -250,7 +256,7 @@ func (p *BaseSeasonalProvider) getPredictedSeries( // moving avg of the previous period series + z score threshold * std dev of the series // moving avg of the previous period series - z score threshold * std dev of the series func (p *BaseSeasonalProvider) getBounds( - series, prevSeries, _, _, _, _ *v3.Series, + series, predictedSeries *v3.Series, zScoreThreshold float64, ) (*v3.Series, *v3.Series) { upperBoundSeries := &v3.Series{ @@ -266,8 +272,8 @@ func (p *BaseSeasonalProvider) getBounds( } for idx, curr := range series.Points { - upperBound := p.getMovingAvg(prevSeries, movingAvgWindowSize, idx) + zScoreThreshold*p.getStdDev(series) - lowerBound := p.getMovingAvg(prevSeries, movingAvgWindowSize, idx) - zScoreThreshold*p.getStdDev(series) + upperBound := p.getMovingAvg(predictedSeries, movingAvgWindowSize, idx) + zScoreThreshold*p.getStdDev(series) + lowerBound := p.getMovingAvg(predictedSeries, movingAvgWindowSize, idx) - zScoreThreshold*p.getStdDev(series) upperBoundSeries.Points = append(upperBoundSeries.Points, v3.Point{ Timestamp: curr.Timestamp, Value: upperBound, @@ -431,11 +437,7 @@ func (p *BaseSeasonalProvider) getAnomalies(ctx context.Context, req *GetAnomali upperBoundSeries, lowerBoundSeries := p.getBounds( series, - pastPeriodSeries, - currentSeasonSeries, - pastSeasonSeries, - past2SeasonSeries, - past3SeasonSeries, + predictedSeries, zScoreThreshold, ) result.UpperBoundSeries = append(result.UpperBoundSeries, upperBoundSeries) diff --git a/ee/query-service/app/api/api.go b/ee/query-service/app/api/api.go index 2e2eb8ded5..82557705fd 100644 --- a/ee/query-service/app/api/api.go +++ b/ee/query-service/app/api/api.go @@ -177,6 +177,8 @@ func (ah *APIHandler) RegisterRoutes(router *mux.Router, am *baseapp.AuthMiddlew am.ViewAccess(ah.listLicensesV2)). Methods(http.MethodGet) + router.HandleFunc("/api/v4/query_range", am.ViewAccess(ah.queryRangeV4)).Methods(http.MethodPost) + // Gateway router.PathPrefix(gateway.RoutePrefix).HandlerFunc(am.AdminAccess(ah.ServeGatewayHTTP)) diff --git a/ee/query-service/app/api/queryrange.go b/ee/query-service/app/api/queryrange.go new file mode 100644 index 0000000000..d4f3eb975a --- /dev/null +++ b/ee/query-service/app/api/queryrange.go @@ -0,0 +1,119 @@ +package api + +import ( + "bytes" + "fmt" + "io" + "net/http" + + "go.signoz.io/signoz/ee/query-service/anomaly" + baseapp "go.signoz.io/signoz/pkg/query-service/app" + "go.signoz.io/signoz/pkg/query-service/app/queryBuilder" + "go.signoz.io/signoz/pkg/query-service/model" + v3 "go.signoz.io/signoz/pkg/query-service/model/v3" + "go.uber.org/zap" +) + +func (aH *APIHandler) queryRangeV4(w http.ResponseWriter, r *http.Request) { + + bodyBytes, _ := io.ReadAll(r.Body) + r.Body = io.NopCloser(bytes.NewBuffer(bodyBytes)) + + queryRangeParams, apiErrorObj := baseapp.ParseQueryRangeParams(r) + + if apiErrorObj != nil { + zap.L().Error("error parsing metric query range params", zap.Error(apiErrorObj.Err)) + RespondError(w, apiErrorObj, nil) + return + } + queryRangeParams.Version = "v4" + + // add temporality for each metric + temporalityErr := aH.PopulateTemporality(r.Context(), queryRangeParams) + if temporalityErr != nil { + zap.L().Error("Error while adding temporality for metrics", zap.Error(temporalityErr)) + RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: temporalityErr}, nil) + return + } + + anomalyQueryExists := false + anomalyQuery := &v3.BuilderQuery{} + if queryRangeParams.CompositeQuery.QueryType == v3.QueryTypeBuilder { + for _, query := range queryRangeParams.CompositeQuery.BuilderQueries { + for _, fn := range query.Functions { + if fn.Name == v3.FunctionNameAnomaly { + anomalyQueryExists = true + anomalyQuery = query + break + } + } + } + } + + if anomalyQueryExists { + // ensure all queries have metric data source, and there should be only one anomaly query + for _, query := range queryRangeParams.CompositeQuery.BuilderQueries { + if query.DataSource != v3.DataSourceMetrics { + RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: fmt.Errorf("all queries must have metric data source")}, nil) + return + } + } + + // get the threshold, and seasonality from the anomaly query + var seasonality anomaly.Seasonality + for _, fn := range anomalyQuery.Functions { + if fn.Name == v3.FunctionNameAnomaly { + seasonalityStr, ok := fn.NamedArgs["seasonality"].(string) + if !ok { + seasonalityStr = "daily" + } + if seasonalityStr == "weekly" { + seasonality = anomaly.SeasonalityWeekly + } else if seasonalityStr == "daily" { + seasonality = anomaly.SeasonalityDaily + } else { + seasonality = anomaly.SeasonalityHourly + } + break + } + } + var provider anomaly.Provider + switch seasonality { + case anomaly.SeasonalityWeekly: + provider = anomaly.NewWeeklyProvider( + anomaly.WithCache[*anomaly.WeeklyProvider](aH.opts.Cache), + anomaly.WithKeyGenerator[*anomaly.WeeklyProvider](queryBuilder.NewKeyGenerator()), + anomaly.WithReader[*anomaly.WeeklyProvider](aH.opts.DataConnector), + anomaly.WithFeatureLookup[*anomaly.WeeklyProvider](aH.opts.FeatureFlags), + ) + case anomaly.SeasonalityDaily: + provider = anomaly.NewDailyProvider( + anomaly.WithCache[*anomaly.DailyProvider](aH.opts.Cache), + anomaly.WithKeyGenerator[*anomaly.DailyProvider](queryBuilder.NewKeyGenerator()), + anomaly.WithReader[*anomaly.DailyProvider](aH.opts.DataConnector), + anomaly.WithFeatureLookup[*anomaly.DailyProvider](aH.opts.FeatureFlags), + ) + case anomaly.SeasonalityHourly: + provider = anomaly.NewHourlyProvider( + anomaly.WithCache[*anomaly.HourlyProvider](aH.opts.Cache), + anomaly.WithKeyGenerator[*anomaly.HourlyProvider](queryBuilder.NewKeyGenerator()), + anomaly.WithReader[*anomaly.HourlyProvider](aH.opts.DataConnector), + anomaly.WithFeatureLookup[*anomaly.HourlyProvider](aH.opts.FeatureFlags), + ) + } + anomalies, err := provider.GetAnomalies(r.Context(), &anomaly.GetAnomaliesRequest{Params: queryRangeParams}) + if err != nil { + RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil) + return + } + uniqueResults := make(map[string]*v3.Result) + for _, anomaly := range anomalies.Results { + uniqueResults[anomaly.QueryName] = anomaly + uniqueResults[anomaly.QueryName].IsAnomaly = true + } + aH.Respond(w, uniqueResults) + } else { + r.Body = io.NopCloser(bytes.NewBuffer(bodyBytes)) + aH.QueryRangeV4(w, r) + } +} diff --git a/ee/query-service/app/server.go b/ee/query-service/app/server.go index 9845ee670b..54eb7bd1e5 100644 --- a/ee/query-service/app/server.go +++ b/ee/query-service/app/server.go @@ -170,6 +170,14 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) { return nil, err } } + var c cache.Cache + if serverOptions.CacheConfigPath != "" { + cacheOpts, err := cache.LoadFromYAMLCacheConfigFile(serverOptions.CacheConfigPath) + if err != nil { + return nil, err + } + c = cache.NewCache(cacheOpts) + } <-readerReady rm, err := makeRulesManager(serverOptions.PromConfigPath, @@ -177,6 +185,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) { serverOptions.RuleRepoURL, localDB, reader, + c, serverOptions.DisableRules, lm, serverOptions.UseLogsNewSchema, @@ -237,15 +246,6 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) { telemetry.GetInstance().SetReader(reader) telemetry.GetInstance().SetSaasOperator(constants.SaasSegmentKey) - var c cache.Cache - if serverOptions.CacheConfigPath != "" { - cacheOpts, err := cache.LoadFromYAMLCacheConfigFile(serverOptions.CacheConfigPath) - if err != nil { - return nil, err - } - c = cache.NewCache(cacheOpts) - } - fluxInterval, err := time.ParseDuration(serverOptions.FluxInterval) if err != nil { @@ -732,6 +732,7 @@ func makeRulesManager( ruleRepoURL string, db *sqlx.DB, ch baseint.Reader, + cache cache.Cache, disableRules bool, fm baseint.FeatureLookup, useLogsNewSchema bool) (*baserules.Manager, error) { @@ -760,6 +761,7 @@ func makeRulesManager( DisableRules: disableRules, FeatureFlags: fm, Reader: ch, + Cache: cache, EvalDelay: baseconst.GetEvalDelay(), PrepareTaskFunc: rules.PrepareTaskFunc, diff --git a/ee/query-service/model/plans.go b/ee/query-service/model/plans.go index 9b696c013f..5b695143b7 100644 --- a/ee/query-service/model/plans.go +++ b/ee/query-service/model/plans.go @@ -127,6 +127,13 @@ var BasicPlan = basemodel.FeatureSet{ UsageLimit: -1, Route: "", }, + basemodel.Feature{ + Name: basemodel.AnomalyDetection, + Active: false, + Usage: 0, + UsageLimit: -1, + Route: "", + }, } var ProPlan = basemodel.FeatureSet{ @@ -235,6 +242,13 @@ var ProPlan = basemodel.FeatureSet{ UsageLimit: -1, Route: "", }, + basemodel.Feature{ + Name: basemodel.AnomalyDetection, + Active: true, + Usage: 0, + UsageLimit: -1, + Route: "", + }, } var EnterprisePlan = basemodel.FeatureSet{ @@ -357,4 +371,11 @@ var EnterprisePlan = basemodel.FeatureSet{ UsageLimit: -1, Route: "", }, + basemodel.Feature{ + Name: basemodel.AnomalyDetection, + Active: true, + Usage: 0, + UsageLimit: -1, + Route: "", + }, } diff --git a/ee/query-service/rules/anomaly.go b/ee/query-service/rules/anomaly.go new file mode 100644 index 0000000000..a04bfc2840 --- /dev/null +++ b/ee/query-service/rules/anomaly.go @@ -0,0 +1,393 @@ +package rules + +import ( + "context" + "encoding/json" + "fmt" + "math" + "strings" + "sync" + "time" + + "go.uber.org/zap" + + "go.signoz.io/signoz/ee/query-service/anomaly" + "go.signoz.io/signoz/pkg/query-service/cache" + "go.signoz.io/signoz/pkg/query-service/common" + "go.signoz.io/signoz/pkg/query-service/model" + + querierV2 "go.signoz.io/signoz/pkg/query-service/app/querier/v2" + "go.signoz.io/signoz/pkg/query-service/app/queryBuilder" + "go.signoz.io/signoz/pkg/query-service/interfaces" + v3 "go.signoz.io/signoz/pkg/query-service/model/v3" + "go.signoz.io/signoz/pkg/query-service/utils/labels" + "go.signoz.io/signoz/pkg/query-service/utils/times" + "go.signoz.io/signoz/pkg/query-service/utils/timestamp" + + "go.signoz.io/signoz/pkg/query-service/formatter" + + baserules "go.signoz.io/signoz/pkg/query-service/rules" + + yaml "gopkg.in/yaml.v2" +) + +const ( + RuleTypeAnomaly = "anomaly_rule" +) + +type AnomalyRule struct { + *baserules.BaseRule + + mtx sync.Mutex + + reader interfaces.Reader + + // querierV2 is used for alerts created after the introduction of new metrics query builder + querierV2 interfaces.Querier + + provider anomaly.Provider + + seasonality anomaly.Seasonality +} + +func NewAnomalyRule( + id string, + p *baserules.PostableRule, + featureFlags interfaces.FeatureLookup, + reader interfaces.Reader, + cache cache.Cache, + opts ...baserules.RuleOption, +) (*AnomalyRule, error) { + + zap.L().Info("creating new AnomalyRule", zap.String("id", id), zap.Any("opts", opts)) + + baseRule, err := baserules.NewBaseRule(id, p, reader, opts...) + if err != nil { + return nil, err + } + + t := AnomalyRule{ + BaseRule: baseRule, + } + + switch strings.ToLower(p.RuleCondition.Seasonality) { + case "hourly": + t.seasonality = anomaly.SeasonalityHourly + case "daily": + t.seasonality = anomaly.SeasonalityDaily + case "weekly": + t.seasonality = anomaly.SeasonalityWeekly + default: + t.seasonality = anomaly.SeasonalityDaily + } + + zap.L().Info("using seasonality", zap.String("seasonality", t.seasonality.String())) + + querierOptsV2 := querierV2.QuerierOptions{ + Reader: reader, + Cache: cache, + KeyGenerator: queryBuilder.NewKeyGenerator(), + FeatureLookup: featureFlags, + } + + t.querierV2 = querierV2.NewQuerier(querierOptsV2) + t.reader = reader + if t.seasonality == anomaly.SeasonalityHourly { + t.provider = anomaly.NewHourlyProvider( + anomaly.WithCache[*anomaly.HourlyProvider](cache), + anomaly.WithKeyGenerator[*anomaly.HourlyProvider](queryBuilder.NewKeyGenerator()), + anomaly.WithReader[*anomaly.HourlyProvider](reader), + anomaly.WithFeatureLookup[*anomaly.HourlyProvider](featureFlags), + ) + } else if t.seasonality == anomaly.SeasonalityDaily { + t.provider = anomaly.NewDailyProvider( + anomaly.WithCache[*anomaly.DailyProvider](cache), + anomaly.WithKeyGenerator[*anomaly.DailyProvider](queryBuilder.NewKeyGenerator()), + anomaly.WithReader[*anomaly.DailyProvider](reader), + anomaly.WithFeatureLookup[*anomaly.DailyProvider](featureFlags), + ) + } else if t.seasonality == anomaly.SeasonalityWeekly { + t.provider = anomaly.NewWeeklyProvider( + anomaly.WithCache[*anomaly.WeeklyProvider](cache), + anomaly.WithKeyGenerator[*anomaly.WeeklyProvider](queryBuilder.NewKeyGenerator()), + anomaly.WithReader[*anomaly.WeeklyProvider](reader), + anomaly.WithFeatureLookup[*anomaly.WeeklyProvider](featureFlags), + ) + } + return &t, nil +} + +func (r *AnomalyRule) Type() baserules.RuleType { + return RuleTypeAnomaly +} + +func (r *AnomalyRule) prepareQueryRange(ts time.Time) (*v3.QueryRangeParamsV3, error) { + + zap.L().Info("prepareQueryRange", zap.Int64("ts", ts.UnixMilli()), zap.Int64("evalWindow", r.EvalWindow().Milliseconds()), zap.Int64("evalDelay", r.EvalDelay().Milliseconds())) + + start := ts.Add(-time.Duration(r.EvalWindow())).UnixMilli() + end := ts.UnixMilli() + + if r.EvalDelay() > 0 { + start = start - int64(r.EvalDelay().Milliseconds()) + end = end - int64(r.EvalDelay().Milliseconds()) + } + // round to minute otherwise we could potentially miss data + start = start - (start % (60 * 1000)) + end = end - (end % (60 * 1000)) + + compositeQuery := r.Condition().CompositeQuery + + if compositeQuery.PanelType != v3.PanelTypeGraph { + compositeQuery.PanelType = v3.PanelTypeGraph + } + + // default mode + return &v3.QueryRangeParamsV3{ + Start: start, + End: end, + Step: int64(math.Max(float64(common.MinAllowedStepInterval(start, end)), 60)), + CompositeQuery: compositeQuery, + Variables: make(map[string]interface{}, 0), + NoCache: false, + }, nil +} + +func (r *AnomalyRule) GetSelectedQuery() string { + return r.Condition().GetSelectedQueryName() +} + +func (r *AnomalyRule) buildAndRunQuery(ctx context.Context, ts time.Time) (baserules.Vector, error) { + + params, err := r.prepareQueryRange(ts) + if err != nil { + return nil, err + } + err = r.PopulateTemporality(ctx, params) + if err != nil { + return nil, fmt.Errorf("internal error while setting temporality") + } + + anomalies, err := r.provider.GetAnomalies(ctx, &anomaly.GetAnomaliesRequest{ + Params: params, + Seasonality: r.seasonality, + }) + if err != nil { + return nil, err + } + + var queryResult *v3.Result + for _, result := range anomalies.Results { + if result.QueryName == r.GetSelectedQuery() { + queryResult = result + break + } + } + + var resultVector baserules.Vector + + scoresJSON, _ := json.Marshal(queryResult.AnomalyScores) + zap.L().Info("anomaly scores", zap.String("scores", string(scoresJSON))) + + for _, series := range queryResult.AnomalyScores { + smpl, shouldAlert := r.ShouldAlert(*series) + if shouldAlert { + resultVector = append(resultVector, smpl) + } + } + return resultVector, nil +} + +func (r *AnomalyRule) Eval(ctx context.Context, ts time.Time) (interface{}, error) { + + prevState := r.State() + + valueFormatter := formatter.FromUnit(r.Unit()) + res, err := r.buildAndRunQuery(ctx, ts) + + if err != nil { + return nil, err + } + + r.mtx.Lock() + defer r.mtx.Unlock() + + resultFPs := map[uint64]struct{}{} + var alerts = make(map[uint64]*baserules.Alert, len(res)) + + for _, smpl := range res { + l := make(map[string]string, len(smpl.Metric)) + for _, lbl := range smpl.Metric { + l[lbl.Name] = lbl.Value + } + + value := valueFormatter.Format(smpl.V, r.Unit()) + threshold := valueFormatter.Format(r.TargetVal(), r.Unit()) + zap.L().Debug("Alert template data for rule", zap.String("name", r.Name()), zap.String("formatter", valueFormatter.Name()), zap.String("value", value), zap.String("threshold", threshold)) + + tmplData := baserules.AlertTemplateData(l, value, threshold) + // Inject some convenience variables that are easier to remember for users + // who are not used to Go's templating system. + defs := "{{$labels := .Labels}}{{$value := .Value}}{{$threshold := .Threshold}}" + + // utility function to apply go template on labels and annotations + expand := func(text string) string { + + tmpl := baserules.NewTemplateExpander( + ctx, + defs+text, + "__alert_"+r.Name(), + tmplData, + times.Time(timestamp.FromTime(ts)), + nil, + ) + result, err := tmpl.Expand() + if err != nil { + result = fmt.Sprintf("", err) + zap.L().Error("Expanding alert template failed", zap.Error(err), zap.Any("data", tmplData)) + } + return result + } + + lb := labels.NewBuilder(smpl.Metric).Del(labels.MetricNameLabel).Del(labels.TemporalityLabel) + resultLabels := labels.NewBuilder(smpl.MetricOrig).Del(labels.MetricNameLabel).Del(labels.TemporalityLabel).Labels() + + for name, value := range r.Labels().Map() { + lb.Set(name, expand(value)) + } + + lb.Set(labels.AlertNameLabel, r.Name()) + lb.Set(labels.AlertRuleIdLabel, r.ID()) + lb.Set(labels.RuleSourceLabel, r.GeneratorURL()) + + annotations := make(labels.Labels, 0, len(r.Annotations().Map())) + for name, value := range r.Annotations().Map() { + annotations = append(annotations, labels.Label{Name: common.NormalizeLabelName(name), Value: expand(value)}) + } + if smpl.IsMissing { + lb.Set(labels.AlertNameLabel, "[No data] "+r.Name()) + } + + lbs := lb.Labels() + h := lbs.Hash() + resultFPs[h] = struct{}{} + + if _, ok := alerts[h]; ok { + zap.L().Error("the alert query returns duplicate records", zap.String("ruleid", r.ID()), zap.Any("alert", alerts[h])) + err = fmt.Errorf("duplicate alert found, vector contains metrics with the same labelset after applying alert labels") + return nil, err + } + + alerts[h] = &baserules.Alert{ + Labels: lbs, + QueryResultLables: resultLabels, + Annotations: annotations, + ActiveAt: ts, + State: model.StatePending, + Value: smpl.V, + GeneratorURL: r.GeneratorURL(), + Receivers: r.PreferredChannels(), + Missing: smpl.IsMissing, + } + } + + zap.L().Info("number of alerts found", zap.String("name", r.Name()), zap.Int("count", len(alerts))) + + // alerts[h] is ready, add or update active list now + for h, a := range alerts { + // Check whether we already have alerting state for the identifying label set. + // Update the last value and annotations if so, create a new alert entry otherwise. + if alert, ok := r.Active[h]; ok && alert.State != model.StateInactive { + + alert.Value = a.Value + alert.Annotations = a.Annotations + alert.Receivers = r.PreferredChannels() + continue + } + + r.Active[h] = a + } + + itemsToAdd := []model.RuleStateHistory{} + + // Check if any pending alerts should be removed or fire now. Write out alert timeseries. + for fp, a := range r.Active { + labelsJSON, err := json.Marshal(a.QueryResultLables) + if err != nil { + zap.L().Error("error marshaling labels", zap.Error(err), zap.Any("labels", a.Labels)) + } + if _, ok := resultFPs[fp]; !ok { + // If the alert was previously firing, keep it around for a given + // retention time so it is reported as resolved to the AlertManager. + if a.State == model.StatePending || (!a.ResolvedAt.IsZero() && ts.Sub(a.ResolvedAt) > baserules.ResolvedRetention) { + delete(r.Active, fp) + } + if a.State != model.StateInactive { + a.State = model.StateInactive + a.ResolvedAt = ts + itemsToAdd = append(itemsToAdd, model.RuleStateHistory{ + RuleID: r.ID(), + RuleName: r.Name(), + State: model.StateInactive, + StateChanged: true, + UnixMilli: ts.UnixMilli(), + Labels: model.LabelsString(labelsJSON), + Fingerprint: a.QueryResultLables.Hash(), + Value: a.Value, + }) + } + continue + } + + if a.State == model.StatePending && ts.Sub(a.ActiveAt) >= r.HoldDuration() { + a.State = model.StateFiring + a.FiredAt = ts + state := model.StateFiring + if a.Missing { + state = model.StateNoData + } + itemsToAdd = append(itemsToAdd, model.RuleStateHistory{ + RuleID: r.ID(), + RuleName: r.Name(), + State: state, + StateChanged: true, + UnixMilli: ts.UnixMilli(), + Labels: model.LabelsString(labelsJSON), + Fingerprint: a.QueryResultLables.Hash(), + Value: a.Value, + }) + } + } + + currentState := r.State() + + overallStateChanged := currentState != prevState + for idx, item := range itemsToAdd { + item.OverallStateChanged = overallStateChanged + item.OverallState = currentState + itemsToAdd[idx] = item + } + + r.RecordRuleStateHistory(ctx, prevState, currentState, itemsToAdd) + + return len(r.Active), nil +} + +func (r *AnomalyRule) String() string { + + ar := baserules.PostableRule{ + AlertName: r.Name(), + RuleCondition: r.Condition(), + EvalWindow: baserules.Duration(r.EvalWindow()), + Labels: r.Labels().Map(), + Annotations: r.Annotations().Map(), + PreferredChannels: r.PreferredChannels(), + } + + byt, err := yaml.Marshal(ar) + if err != nil { + return fmt.Sprintf("error marshaling alerting rule: %s", err.Error()) + } + + return string(byt) +} diff --git a/ee/query-service/rules/manager.go b/ee/query-service/rules/manager.go index 2b80441f0c..5ed35d4d34 100644 --- a/ee/query-service/rules/manager.go +++ b/ee/query-service/rules/manager.go @@ -53,6 +53,25 @@ func PrepareTaskFunc(opts baserules.PrepareTaskOptions) (baserules.Task, error) // create promql rule task for evalution task = newTask(baserules.TaskTypeProm, opts.TaskName, time.Duration(opts.Rule.Frequency), rules, opts.ManagerOpts, opts.NotifyFunc, opts.RuleDB) + } else if opts.Rule.RuleType == baserules.RuleTypeAnomaly { + // create anomaly rule + ar, err := NewAnomalyRule( + ruleId, + opts.Rule, + opts.FF, + opts.Reader, + opts.Cache, + baserules.WithEvalDelay(opts.ManagerOpts.EvalDelay), + ) + if err != nil { + return task, err + } + + rules = append(rules, ar) + + // create anomaly rule task for evalution + task = newTask(baserules.TaskTypeCh, opts.TaskName, time.Duration(opts.Rule.Frequency), rules, opts.ManagerOpts, opts.NotifyFunc, opts.RuleDB) + } else { return nil, fmt.Errorf("unsupported rule type. Supported types: %s, %s", baserules.RuleTypeProm, baserules.RuleTypeThreshold) } diff --git a/pkg/query-service/app/http_handler.go b/pkg/query-service/app/http_handler.go index 219181dc7f..5055913113 100644 --- a/pkg/query-service/app/http_handler.go +++ b/pkg/query-service/app/http_handler.go @@ -518,7 +518,7 @@ func (aH *APIHandler) getRule(w http.ResponseWriter, r *http.Request) { } // populateTemporality adds the temporality to the query if it is not present -func (aH *APIHandler) populateTemporality(ctx context.Context, qp *v3.QueryRangeParamsV3) error { +func (aH *APIHandler) PopulateTemporality(ctx context.Context, qp *v3.QueryRangeParamsV3) error { aH.temporalityMux.Lock() defer aH.temporalityMux.Unlock() @@ -3791,7 +3791,7 @@ func (aH *APIHandler) QueryRangeV3(w http.ResponseWriter, r *http.Request) { } // add temporality for each metric - temporalityErr := aH.populateTemporality(r.Context(), queryRangeParams) + temporalityErr := aH.PopulateTemporality(r.Context(), queryRangeParams) if temporalityErr != nil { zap.L().Error("Error while adding temporality for metrics", zap.Error(temporalityErr)) RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: temporalityErr}, nil) @@ -4139,7 +4139,7 @@ func (aH *APIHandler) QueryRangeV4(w http.ResponseWriter, r *http.Request) { queryRangeParams.Version = "v4" // add temporality for each metric - temporalityErr := aH.populateTemporality(r.Context(), queryRangeParams) + temporalityErr := aH.PopulateTemporality(r.Context(), queryRangeParams) if temporalityErr != nil { zap.L().Error("Error while adding temporality for metrics", zap.Error(temporalityErr)) RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: temporalityErr}, nil) diff --git a/pkg/query-service/app/server.go b/pkg/query-service/app/server.go index 22d52b9884..b71df63781 100644 --- a/pkg/query-service/app/server.go +++ b/pkg/query-service/app/server.go @@ -144,9 +144,20 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) { return nil, err } } + var c cache.Cache + if serverOptions.CacheConfigPath != "" { + cacheOpts, err := cache.LoadFromYAMLCacheConfigFile(serverOptions.CacheConfigPath) + if err != nil { + return nil, err + } + c = cache.NewCache(cacheOpts) + } <-readerReady - rm, err := makeRulesManager(serverOptions.PromConfigPath, constants.GetAlertManagerApiPrefix(), serverOptions.RuleRepoURL, localDB, reader, serverOptions.DisableRules, fm, serverOptions.UseLogsNewSchema) + rm, err := makeRulesManager( + serverOptions.PromConfigPath, + constants.GetAlertManagerApiPrefix(), + serverOptions.RuleRepoURL, localDB, reader, c, serverOptions.DisableRules, fm, serverOptions.UseLogsNewSchema) if err != nil { return nil, err } @@ -158,15 +169,6 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) { } }() - var c cache.Cache - if serverOptions.CacheConfigPath != "" { - cacheOpts, err := cache.LoadFromYAMLCacheConfigFile(serverOptions.CacheConfigPath) - if err != nil { - return nil, err - } - c = cache.NewCache(cacheOpts) - } - fluxInterval, err := time.ParseDuration(serverOptions.FluxInterval) if err != nil { return nil, err @@ -715,6 +717,7 @@ func makeRulesManager( ruleRepoURL string, db *sqlx.DB, ch interfaces.Reader, + cache cache.Cache, disableRules bool, fm interfaces.FeatureLookup, useLogsNewSchema bool) (*rules.Manager, error) { @@ -743,6 +746,7 @@ func makeRulesManager( DisableRules: disableRules, FeatureFlags: fm, Reader: ch, + Cache: cache, EvalDelay: constants.GetEvalDelay(), UseLogsNewSchema: useLogsNewSchema, } diff --git a/pkg/query-service/common/query_range.go b/pkg/query-service/common/query_range.go index d6b62baf27..598ac1a21c 100644 --- a/pkg/query-service/common/query_range.go +++ b/pkg/query-service/common/query_range.go @@ -2,7 +2,9 @@ package common import ( "math" + "regexp" "time" + "unicode" "go.signoz.io/signoz/pkg/query-service/constants" v3 "go.signoz.io/signoz/pkg/query-service/model/v3" @@ -73,6 +75,23 @@ func LCMList(nums []int64) int64 { return result } +func NormalizeLabelName(name string) string { + // See https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels + + // Regular expression to match non-alphanumeric characters except underscores + reg := regexp.MustCompile(`[^a-zA-Z0-9_]`) + + // Replace all non-alphanumeric characters except underscores with underscores + normalized := reg.ReplaceAllString(name, "_") + + // If the first character is not a letter or an underscore, prepend an underscore + if len(normalized) > 0 && !unicode.IsLetter(rune(normalized[0])) && normalized[0] != '_' { + normalized = "_" + normalized + } + + return normalized +} + func GetSeriesFromCachedData(data []querycache.CachedSeriesData, start, end int64) []*v3.Series { series := make(map[uint64]*v3.Series) diff --git a/pkg/query-service/model/featureSet.go b/pkg/query-service/model/featureSet.go index 0e7a1c0278..de4a4ea879 100644 --- a/pkg/query-service/model/featureSet.go +++ b/pkg/query-service/model/featureSet.go @@ -22,6 +22,7 @@ const AlertChannelPagerduty = "ALERT_CHANNEL_PAGERDUTY" const AlertChannelMsTeams = "ALERT_CHANNEL_MSTEAMS" const AlertChannelOpsgenie = "ALERT_CHANNEL_OPSGENIE" const AlertChannelEmail = "ALERT_CHANNEL_EMAIL" +const AnomalyDetection = "ANOMALY_DETECTION" var BasicPlan = FeatureSet{ Feature{ @@ -115,4 +116,11 @@ var BasicPlan = FeatureSet{ UsageLimit: -1, Route: "", }, + Feature{ + Name: AnomalyDetection, + Active: false, + Usage: 0, + UsageLimit: -1, + Route: "", + }, } diff --git a/pkg/query-service/rules/alerting.go b/pkg/query-service/rules/alerting.go index 77c9fbe219..ec2b0c8016 100644 --- a/pkg/query-service/rules/alerting.go +++ b/pkg/query-service/rules/alerting.go @@ -19,7 +19,7 @@ import ( const ( // how long before re-sending the alert - resolvedRetention = 15 * time.Minute + ResolvedRetention = 15 * time.Minute TestAlertPostFix = "_TEST_ALERT" ) @@ -29,6 +29,7 @@ type RuleType string const ( RuleTypeThreshold = "threshold_rule" RuleTypeProm = "promql_rule" + RuleTypeAnomaly = "anomaly_rule" ) type RuleHealth string @@ -83,27 +84,16 @@ type NamedAlert struct { type CompareOp string const ( - CompareOpNone CompareOp = "0" - ValueIsAbove CompareOp = "1" - ValueIsBelow CompareOp = "2" - ValueIsEq CompareOp = "3" - ValueIsNotEq CompareOp = "4" + CompareOpNone CompareOp = "0" + ValueIsAbove CompareOp = "1" + ValueIsBelow CompareOp = "2" + ValueIsEq CompareOp = "3" + ValueIsNotEq CompareOp = "4" + ValueAboveOrEq CompareOp = "5" + ValueBelowOrEq CompareOp = "6" + ValueOutsideBounds CompareOp = "7" ) -func ResolveCompareOp(cop CompareOp) string { - switch cop { - case ValueIsAbove: - return ">" - case ValueIsBelow: - return "<" - case ValueIsEq: - return "==" - case ValueIsNotEq: - return "!=" - } - return "" -} - type MatchType string const ( @@ -123,6 +113,8 @@ type RuleCondition struct { AbsentFor uint64 `yaml:"absentFor,omitempty" json:"absentFor,omitempty"` MatchType MatchType `json:"matchType,omitempty"` TargetUnit string `json:"targetUnit,omitempty"` + Algorithm string `json:"algorithm,omitempty"` + Seasonality string `json:"seasonality,omitempty"` SelectedQuery string `json:"selectedQueryName,omitempty"` } diff --git a/pkg/query-service/rules/api_params.go b/pkg/query-service/rules/api_params.go index 6d3288ece1..77a1552946 100644 --- a/pkg/query-service/rules/api_params.go +++ b/pkg/query-service/rules/api_params.go @@ -133,7 +133,9 @@ func parseIntoRule(initRule PostableRule, content []byte, kind RuleDataKind) (*P if rule.RuleCondition != nil { if rule.RuleCondition.CompositeQuery.QueryType == v3.QueryTypeBuilder { - rule.RuleType = RuleTypeThreshold + if rule.RuleType == "" { + rule.RuleType = RuleTypeThreshold + } } else if rule.RuleCondition.CompositeQuery.QueryType == v3.QueryTypePromQL { rule.RuleType = RuleTypeProm } diff --git a/pkg/query-service/rules/base_rule.go b/pkg/query-service/rules/base_rule.go index b82aab91b5..181eaa3a28 100644 --- a/pkg/query-service/rules/base_rule.go +++ b/pkg/query-service/rules/base_rule.go @@ -8,6 +8,7 @@ import ( "sync" "time" + "go.signoz.io/signoz/pkg/query-service/common" "go.signoz.io/signoz/pkg/query-service/converter" "go.signoz.io/signoz/pkg/query-service/interfaces" "go.signoz.io/signoz/pkg/query-service/model" @@ -53,7 +54,7 @@ type BaseRule struct { health RuleHealth lastError error - active map[uint64]*Alert + Active map[uint64]*Alert // lastTimestampWithDatapoints is the timestamp of the last datapoint we observed // for this rule @@ -72,6 +73,12 @@ type BaseRule struct { // sendAlways will send alert irresepective of resendDelay // or other params sendAlways bool + + // TemporalityMap is a map of metric name to temporality + // to avoid fetching temporality for the same metric multiple times + // querying the v4 table on low cardinal temporality column + // should be fast but we can still avoid the query if we have the data in memory + TemporalityMap map[string]map[v3.Temporality]bool } type RuleOption func(*BaseRule) @@ -116,8 +123,9 @@ func NewBaseRule(id string, p *PostableRule, reader interfaces.Reader, opts ...R annotations: qslabels.FromMap(p.Annotations), preferredChannels: p.PreferredChannels, health: HealthUnknown, - active: map[uint64]*Alert{}, + Active: map[uint64]*Alert{}, reader: reader, + TemporalityMap: make(map[string]map[v3.Temporality]bool), } if baseRule.evalWindow == 0 { @@ -165,14 +173,30 @@ func (r *BaseRule) currentAlerts() []*Alert { r.mtx.Lock() defer r.mtx.Unlock() - alerts := make([]*Alert, 0, len(r.active)) - for _, a := range r.active { + alerts := make([]*Alert, 0, len(r.Active)) + for _, a := range r.Active { anew := *a alerts = append(alerts, &anew) } return alerts } +func (r *BaseRule) EvalDelay() time.Duration { + return r.evalDelay +} + +func (r *BaseRule) EvalWindow() time.Duration { + return r.evalWindow +} + +func (r *BaseRule) HoldDuration() time.Duration { + return r.holdDuration +} + +func (r *BaseRule) TargetVal() float64 { + return r.targetVal() +} + func (r *ThresholdRule) hostFromSource() string { parsedUrl, err := url.Parse(r.source) if err != nil { @@ -267,7 +291,7 @@ func (r *BaseRule) GetEvaluationTimestamp() time.Time { func (r *BaseRule) State() model.AlertState { maxState := model.StateInactive - for _, a := range r.active { + for _, a := range r.Active { if a.State > maxState { maxState = a.State } @@ -306,12 +330,12 @@ func (r *BaseRule) ForEachActiveAlert(f func(*Alert)) { r.mtx.Lock() defer r.mtx.Unlock() - for _, a := range r.active { + for _, a := range r.Active { f(a) } } -func (r *BaseRule) shouldAlert(series v3.Series) (Sample, bool) { +func (r *BaseRule) ShouldAlert(series v3.Series) (Sample, bool) { var alertSmpl Sample var shouldAlert bool var lbls qslabels.Labels @@ -319,7 +343,7 @@ func (r *BaseRule) shouldAlert(series v3.Series) (Sample, bool) { for name, value := range series.Labels { lbls = append(lbls, qslabels.Label{Name: name, Value: value}) - lblsNormalized = append(lblsNormalized, qslabels.Label{Name: normalizeLabelName(name), Value: value}) + lblsNormalized = append(lblsNormalized, qslabels.Label{Name: common.NormalizeLabelName(name), Value: value}) } series.Points = removeGroupinSetPoints(series) @@ -364,6 +388,14 @@ func (r *BaseRule) shouldAlert(series v3.Series) (Sample, bool) { break } } + } else if r.compareOp() == ValueOutsideBounds { + for _, smpl := range series.Points { + if math.Abs(smpl.Value) >= r.targetVal() { + alertSmpl = Sample{Point: Point{V: smpl.Value}, Metric: lblsNormalized, MetricOrig: lbls} + shouldAlert = true + break + } + } } case AllTheTimes: // If all samples match the condition, the rule is firing. @@ -425,6 +457,14 @@ func (r *BaseRule) shouldAlert(series v3.Series) (Sample, bool) { } } } + } else if r.compareOp() == ValueOutsideBounds { + for _, smpl := range series.Points { + if math.Abs(smpl.Value) >= r.targetVal() { + alertSmpl = Sample{Point: Point{V: smpl.Value}, Metric: lblsNormalized, MetricOrig: lbls} + shouldAlert = true + break + } + } } case OnAverage: // If the average of all samples matches the condition, the rule is firing. @@ -454,6 +494,10 @@ func (r *BaseRule) shouldAlert(series v3.Series) (Sample, bool) { if avg != r.targetVal() { shouldAlert = true } + } else if r.compareOp() == ValueOutsideBounds { + if math.Abs(avg) >= r.targetVal() { + shouldAlert = true + } } case InTotal: // If the sum of all samples matches the condition, the rule is firing. @@ -482,6 +526,10 @@ func (r *BaseRule) shouldAlert(series v3.Series) (Sample, bool) { if sum != r.targetVal() { shouldAlert = true } + } else if r.compareOp() == ValueOutsideBounds { + if math.Abs(sum) >= r.targetVal() { + shouldAlert = true + } } case Last: // If the last sample matches the condition, the rule is firing. @@ -602,3 +650,59 @@ func (r *BaseRule) RecordRuleStateHistory(ctx context.Context, prevState, curren return nil } + +func (r *BaseRule) PopulateTemporality(ctx context.Context, qp *v3.QueryRangeParamsV3) error { + + missingTemporality := make([]string, 0) + metricNameToTemporality := make(map[string]map[v3.Temporality]bool) + if qp.CompositeQuery != nil && len(qp.CompositeQuery.BuilderQueries) > 0 { + for _, query := range qp.CompositeQuery.BuilderQueries { + // if there is no temporality specified in the query but we have it in the map + // then use the value from the map + if query.Temporality == "" && r.TemporalityMap[query.AggregateAttribute.Key] != nil { + // We prefer delta if it is available + if r.TemporalityMap[query.AggregateAttribute.Key][v3.Delta] { + query.Temporality = v3.Delta + } else if r.TemporalityMap[query.AggregateAttribute.Key][v3.Cumulative] { + query.Temporality = v3.Cumulative + } else { + query.Temporality = v3.Unspecified + } + } + // we don't have temporality for this metric + if query.DataSource == v3.DataSourceMetrics && query.Temporality == "" { + missingTemporality = append(missingTemporality, query.AggregateAttribute.Key) + } + if _, ok := metricNameToTemporality[query.AggregateAttribute.Key]; !ok { + metricNameToTemporality[query.AggregateAttribute.Key] = make(map[v3.Temporality]bool) + } + } + } + + var nameToTemporality map[string]map[v3.Temporality]bool + var err error + + if len(missingTemporality) > 0 { + nameToTemporality, err = r.reader.FetchTemporality(ctx, missingTemporality) + if err != nil { + return err + } + } + + if qp.CompositeQuery != nil && len(qp.CompositeQuery.BuilderQueries) > 0 { + for name := range qp.CompositeQuery.BuilderQueries { + query := qp.CompositeQuery.BuilderQueries[name] + if query.DataSource == v3.DataSourceMetrics && query.Temporality == "" { + if nameToTemporality[query.AggregateAttribute.Key][v3.Delta] { + query.Temporality = v3.Delta + } else if nameToTemporality[query.AggregateAttribute.Key][v3.Cumulative] { + query.Temporality = v3.Cumulative + } else { + query.Temporality = v3.Unspecified + } + r.TemporalityMap[query.AggregateAttribute.Key] = nameToTemporality[query.AggregateAttribute.Key] + } + } + } + return nil +} diff --git a/pkg/query-service/rules/manager.go b/pkg/query-service/rules/manager.go index 89dec5f3d1..09eb7ad367 100644 --- a/pkg/query-service/rules/manager.go +++ b/pkg/query-service/rules/manager.go @@ -18,6 +18,7 @@ import ( "github.com/jmoiron/sqlx" + "go.signoz.io/signoz/pkg/query-service/cache" am "go.signoz.io/signoz/pkg/query-service/integrations/alertManager" "go.signoz.io/signoz/pkg/query-service/interfaces" "go.signoz.io/signoz/pkg/query-service/model" @@ -32,6 +33,7 @@ type PrepareTaskOptions struct { RuleDB RuleDB Logger *zap.Logger Reader interfaces.Reader + Cache cache.Cache FF interfaces.FeatureLookup ManagerOpts *ManagerOptions NotifyFunc NotifyFunc @@ -73,6 +75,7 @@ type ManagerOptions struct { DisableRules bool FeatureFlags interfaces.FeatureLookup Reader interfaces.Reader + Cache cache.Cache EvalDelay time.Duration @@ -96,9 +99,9 @@ type Manager struct { logger *zap.Logger - featureFlags interfaces.FeatureLookup - reader interfaces.Reader - + featureFlags interfaces.FeatureLookup + reader interfaces.Reader + cache cache.Cache prepareTaskFunc func(opts PrepareTaskOptions) (Task, error) UseLogsNewSchema bool @@ -209,6 +212,7 @@ func NewManager(o *ManagerOptions) (*Manager, error) { logger: o.Logger, featureFlags: o.FeatureFlags, reader: o.Reader, + cache: o.Cache, prepareTaskFunc: o.PrepareTaskFunc, } return m, nil @@ -342,6 +346,7 @@ func (m *Manager) editTask(rule *PostableRule, taskName string) error { RuleDB: m.ruleDB, Logger: m.logger, Reader: m.reader, + Cache: m.cache, FF: m.featureFlags, ManagerOpts: m.opts, NotifyFunc: m.prepareNotifyFunc(), @@ -463,6 +468,7 @@ func (m *Manager) addTask(rule *PostableRule, taskName string) error { RuleDB: m.ruleDB, Logger: m.logger, Reader: m.reader, + Cache: m.cache, FF: m.featureFlags, ManagerOpts: m.opts, NotifyFunc: m.prepareNotifyFunc(), diff --git a/pkg/query-service/rules/prom_rule.go b/pkg/query-service/rules/prom_rule.go index db5a963731..473fac0d5d 100644 --- a/pkg/query-service/rules/prom_rule.go +++ b/pkg/query-service/rules/prom_rule.go @@ -131,7 +131,7 @@ func (r *PromRule) Eval(ctx context.Context, ts time.Time) (interface{}, error) continue } - alertSmpl, shouldAlert := r.shouldAlert(toCommonSeries(series)) + alertSmpl, shouldAlert := r.ShouldAlert(toCommonSeries(series)) if !shouldAlert { continue } @@ -208,21 +208,21 @@ func (r *PromRule) Eval(ctx context.Context, ts time.Time) (interface{}, error) for h, a := range alerts { // Check whether we already have alerting state for the identifying label set. // Update the last value and annotations if so, create a new alert entry otherwise. - if alert, ok := r.active[h]; ok && alert.State != model.StateInactive { + if alert, ok := r.Active[h]; ok && alert.State != model.StateInactive { alert.Value = a.Value alert.Annotations = a.Annotations alert.Receivers = r.preferredChannels continue } - r.active[h] = a + r.Active[h] = a } itemsToAdd := []model.RuleStateHistory{} // Check if any pending alerts should be removed or fire now. Write out alert timeseries. - for fp, a := range r.active { + for fp, a := range r.Active { labelsJSON, err := json.Marshal(a.QueryResultLables) if err != nil { zap.L().Error("error marshaling labels", zap.Error(err), zap.String("name", r.Name())) @@ -230,8 +230,8 @@ func (r *PromRule) Eval(ctx context.Context, ts time.Time) (interface{}, error) if _, ok := resultFPs[fp]; !ok { // If the alert was previously firing, keep it around for a given // retention time so it is reported as resolved to the AlertManager. - if a.State == model.StatePending || (!a.ResolvedAt.IsZero() && ts.Sub(a.ResolvedAt) > resolvedRetention) { - delete(r.active, fp) + if a.State == model.StatePending || (!a.ResolvedAt.IsZero() && ts.Sub(a.ResolvedAt) > ResolvedRetention) { + delete(r.Active, fp) } if a.State != model.StateInactive { a.State = model.StateInactive @@ -283,7 +283,7 @@ func (r *PromRule) Eval(ctx context.Context, ts time.Time) (interface{}, error) r.RecordRuleStateHistory(ctx, prevState, currentState, itemsToAdd) - return len(r.active), nil + return len(r.Active), nil } func (r *PromRule) String() string { diff --git a/pkg/query-service/rules/prom_rule_task.go b/pkg/query-service/rules/prom_rule_task.go index f78994430a..c96b09e74f 100644 --- a/pkg/query-service/rules/prom_rule_task.go +++ b/pkg/query-service/rules/prom_rule_task.go @@ -293,8 +293,8 @@ func (g *PromRuleTask) CopyState(fromTask Task) error { continue } - for fp, a := range far.active { - ar.active[fp] = a + for fp, a := range far.Active { + ar.Active[fp] = a } ar.handledRestart = far.handledRestart } diff --git a/pkg/query-service/rules/promrule_test.go b/pkg/query-service/rules/promrule_test.go index c87ef2cee9..3bc268ed65 100644 --- a/pkg/query-service/rules/promrule_test.go +++ b/pkg/query-service/rules/promrule_test.go @@ -661,7 +661,7 @@ func TestPromRuleShouldAlert(t *testing.T) { assert.NoError(t, err) } - _, shoulAlert := rule.shouldAlert(toCommonSeries(c.values)) + _, shoulAlert := rule.ShouldAlert(toCommonSeries(c.values)) assert.Equal(t, c.expectAlert, shoulAlert, "Test case %d", idx) } } diff --git a/pkg/query-service/rules/rule.go b/pkg/query-service/rules/rule.go index 2b5b8d5aae..a8d25f89b6 100644 --- a/pkg/query-service/rules/rule.go +++ b/pkg/query-service/rules/rule.go @@ -18,6 +18,9 @@ type Rule interface { Labels() labels.BaseLabels Annotations() labels.BaseLabels Condition() *RuleCondition + EvalDelay() time.Duration + EvalWindow() time.Duration + HoldDuration() time.Duration State() model.AlertState ActiveAlerts() []*Alert diff --git a/pkg/query-service/rules/rule_task.go b/pkg/query-service/rules/rule_task.go index 0a969bffc8..fc7bde05af 100644 --- a/pkg/query-service/rules/rule_task.go +++ b/pkg/query-service/rules/rule_task.go @@ -285,8 +285,8 @@ func (g *RuleTask) CopyState(fromTask Task) error { continue } - for fp, a := range far.active { - ar.active[fp] = a + for fp, a := range far.Active { + ar.Active[fp] = a } ar.handledRestart = far.handledRestart } diff --git a/pkg/query-service/rules/threshold_rule.go b/pkg/query-service/rules/threshold_rule.go index 0f768314cf..8453f1a268 100644 --- a/pkg/query-service/rules/threshold_rule.go +++ b/pkg/query-service/rules/threshold_rule.go @@ -6,10 +6,8 @@ import ( "encoding/json" "fmt" "math" - "regexp" "text/template" "time" - "unicode" "go.uber.org/zap" @@ -43,11 +41,6 @@ type ThresholdRule struct { // if the version is "v3", then we use the old querier // if the version is "v4", then we use the new querierV2 version string - // temporalityMap is a map of metric name to temporality - // to avoid fetching temporality for the same metric multiple times - // querying the v4 table on low cardinal temporality column - // should be fast but we can still avoid the query if we have the data in memory - temporalityMap map[string]map[v3.Temporality]bool // querier is used for alerts created before the introduction of new metrics query builder querier interfaces.Querier @@ -76,9 +69,8 @@ func NewThresholdRule( } t := ThresholdRule{ - BaseRule: baseRule, - version: p.Version, - temporalityMap: make(map[string]map[v3.Temporality]bool), + BaseRule: baseRule, + version: p.Version, } querierOption := querier.QuerierOptions{ @@ -107,63 +99,6 @@ func (r *ThresholdRule) Type() RuleType { return RuleTypeThreshold } -// populateTemporality same as addTemporality but for v4 and better -func (r *ThresholdRule) populateTemporality(ctx context.Context, qp *v3.QueryRangeParamsV3) error { - - missingTemporality := make([]string, 0) - metricNameToTemporality := make(map[string]map[v3.Temporality]bool) - if qp.CompositeQuery != nil && len(qp.CompositeQuery.BuilderQueries) > 0 { - for _, query := range qp.CompositeQuery.BuilderQueries { - // if there is no temporality specified in the query but we have it in the map - // then use the value from the map - if query.Temporality == "" && r.temporalityMap[query.AggregateAttribute.Key] != nil { - // We prefer delta if it is available - if r.temporalityMap[query.AggregateAttribute.Key][v3.Delta] { - query.Temporality = v3.Delta - } else if r.temporalityMap[query.AggregateAttribute.Key][v3.Cumulative] { - query.Temporality = v3.Cumulative - } else { - query.Temporality = v3.Unspecified - } - } - // we don't have temporality for this metric - if query.DataSource == v3.DataSourceMetrics && query.Temporality == "" { - missingTemporality = append(missingTemporality, query.AggregateAttribute.Key) - } - if _, ok := metricNameToTemporality[query.AggregateAttribute.Key]; !ok { - metricNameToTemporality[query.AggregateAttribute.Key] = make(map[v3.Temporality]bool) - } - } - } - - var nameToTemporality map[string]map[v3.Temporality]bool - var err error - - if len(missingTemporality) > 0 { - nameToTemporality, err = r.reader.FetchTemporality(ctx, missingTemporality) - if err != nil { - return err - } - } - - if qp.CompositeQuery != nil && len(qp.CompositeQuery.BuilderQueries) > 0 { - for name := range qp.CompositeQuery.BuilderQueries { - query := qp.CompositeQuery.BuilderQueries[name] - if query.DataSource == v3.DataSourceMetrics && query.Temporality == "" { - if nameToTemporality[query.AggregateAttribute.Key][v3.Delta] { - query.Temporality = v3.Delta - } else if nameToTemporality[query.AggregateAttribute.Key][v3.Cumulative] { - query.Temporality = v3.Cumulative - } else { - query.Temporality = v3.Unspecified - } - r.temporalityMap[query.AggregateAttribute.Key] = nameToTemporality[query.AggregateAttribute.Key] - } - } - } - return nil -} - func (r *ThresholdRule) prepareQueryRange(ts time.Time) (*v3.QueryRangeParamsV3, error) { zap.L().Info("prepareQueryRange", zap.Int64("ts", ts.UnixMilli()), zap.Int64("evalWindow", r.evalWindow.Milliseconds()), zap.Int64("evalDelay", r.evalDelay.Milliseconds())) @@ -313,7 +248,7 @@ func (r *ThresholdRule) buildAndRunQuery(ctx context.Context, ts time.Time) (Vec if err != nil { return nil, err } - err = r.populateTemporality(ctx, params) + err = r.PopulateTemporality(ctx, params) if err != nil { return nil, fmt.Errorf("internal error while setting temporality") } @@ -406,7 +341,7 @@ func (r *ThresholdRule) buildAndRunQuery(ctx context.Context, ts time.Time) (Vec } for _, series := range queryResult.Series { - smpl, shouldAlert := r.shouldAlert(*series) + smpl, shouldAlert := r.ShouldAlert(*series) if shouldAlert { resultVector = append(resultVector, smpl) } @@ -414,23 +349,6 @@ func (r *ThresholdRule) buildAndRunQuery(ctx context.Context, ts time.Time) (Vec return resultVector, nil } -func normalizeLabelName(name string) string { - // See https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels - - // Regular expression to match non-alphanumeric characters except underscores - reg := regexp.MustCompile(`[^a-zA-Z0-9_]`) - - // Replace all non-alphanumeric characters except underscores with underscores - normalized := reg.ReplaceAllString(name, "_") - - // If the first character is not a letter or an underscore, prepend an underscore - if len(normalized) > 0 && !unicode.IsLetter(rune(normalized[0])) && normalized[0] != '_' { - normalized = "_" + normalized - } - - return normalized -} - func (r *ThresholdRule) Eval(ctx context.Context, ts time.Time) (interface{}, error) { prevState := r.State() @@ -495,7 +413,7 @@ func (r *ThresholdRule) Eval(ctx context.Context, ts time.Time) (interface{}, er annotations := make(labels.Labels, 0, len(r.annotations.Map())) for name, value := range r.annotations.Map() { - annotations = append(annotations, labels.Label{Name: normalizeLabelName(name), Value: expand(value)}) + annotations = append(annotations, labels.Label{Name: common.NormalizeLabelName(name), Value: expand(value)}) } if smpl.IsMissing { lb.Set(labels.AlertNameLabel, "[No data] "+r.Name()) @@ -547,7 +465,7 @@ func (r *ThresholdRule) Eval(ctx context.Context, ts time.Time) (interface{}, er for h, a := range alerts { // Check whether we already have alerting state for the identifying label set. // Update the last value and annotations if so, create a new alert entry otherwise. - if alert, ok := r.active[h]; ok && alert.State != model.StateInactive { + if alert, ok := r.Active[h]; ok && alert.State != model.StateInactive { alert.Value = a.Value alert.Annotations = a.Annotations @@ -555,13 +473,13 @@ func (r *ThresholdRule) Eval(ctx context.Context, ts time.Time) (interface{}, er continue } - r.active[h] = a + r.Active[h] = a } itemsToAdd := []model.RuleStateHistory{} // Check if any pending alerts should be removed or fire now. Write out alert timeseries. - for fp, a := range r.active { + for fp, a := range r.Active { labelsJSON, err := json.Marshal(a.QueryResultLables) if err != nil { zap.L().Error("error marshaling labels", zap.Error(err), zap.Any("labels", a.Labels)) @@ -569,8 +487,8 @@ func (r *ThresholdRule) Eval(ctx context.Context, ts time.Time) (interface{}, er if _, ok := resultFPs[fp]; !ok { // If the alert was previously firing, keep it around for a given // retention time so it is reported as resolved to the AlertManager. - if a.State == model.StatePending || (!a.ResolvedAt.IsZero() && ts.Sub(a.ResolvedAt) > resolvedRetention) { - delete(r.active, fp) + if a.State == model.StatePending || (!a.ResolvedAt.IsZero() && ts.Sub(a.ResolvedAt) > ResolvedRetention) { + delete(r.Active, fp) } if a.State != model.StateInactive { a.State = model.StateInactive @@ -623,7 +541,7 @@ func (r *ThresholdRule) Eval(ctx context.Context, ts time.Time) (interface{}, er r.health = HealthGood r.lastError = err - return len(r.active), nil + return len(r.Active), nil } func (r *ThresholdRule) String() string { diff --git a/pkg/query-service/rules/threshold_rule_test.go b/pkg/query-service/rules/threshold_rule_test.go index 8f9554db52..e23ba0d05c 100644 --- a/pkg/query-service/rules/threshold_rule_test.go +++ b/pkg/query-service/rules/threshold_rule_test.go @@ -8,6 +8,7 @@ import ( "github.com/stretchr/testify/assert" "go.signoz.io/signoz/pkg/query-service/app/clickhouseReader" + "go.signoz.io/signoz/pkg/query-service/common" "go.signoz.io/signoz/pkg/query-service/featureManager" v3 "go.signoz.io/signoz/pkg/query-service/model/v3" "go.signoz.io/signoz/pkg/query-service/utils/labels" @@ -800,7 +801,7 @@ func TestThresholdRuleShouldAlert(t *testing.T) { values.Points[i].Timestamp = time.Now().UnixMilli() } - smpl, shoulAlert := rule.shouldAlert(c.values) + smpl, shoulAlert := rule.ShouldAlert(c.values) assert.Equal(t, c.expectAlert, shoulAlert, "Test case %d", idx) if shoulAlert { assert.Equal(t, c.expectedAlertSample.Value, smpl.V, "Test case %d", idx) @@ -844,7 +845,7 @@ func TestNormalizeLabelName(t *testing.T) { } for _, c := range cases { - assert.Equal(t, c.expected, normalizeLabelName(c.labelName)) + assert.Equal(t, c.expected, common.NormalizeLabelName(c.labelName)) } } @@ -1007,9 +1008,9 @@ func TestThresholdRuleLabelNormalization(t *testing.T) { values.Points[i].Timestamp = time.Now().UnixMilli() } - sample, shoulAlert := rule.shouldAlert(c.values) + sample, shoulAlert := rule.ShouldAlert(c.values) for name, value := range c.values.Labels { - assert.Equal(t, value, sample.Metric.Get(normalizeLabelName(name))) + assert.Equal(t, value, sample.Metric.Get(common.NormalizeLabelName(name))) } assert.Equal(t, c.expectAlert, shoulAlert, "Test case %d", idx) @@ -1243,7 +1244,7 @@ func TestThresholdRuleUnitCombinations(t *testing.T) { reader := clickhouseReader.NewReaderFromClickhouseConnection(mock, options, nil, "", fm, "", true) rule, err := NewThresholdRule("69", &postableRule, fm, reader, true) - rule.temporalityMap = map[string]map[v3.Temporality]bool{ + rule.TemporalityMap = map[string]map[v3.Temporality]bool{ "signoz_calls_total": { v3.Delta: true, }, @@ -1260,7 +1261,7 @@ func TestThresholdRuleUnitCombinations(t *testing.T) { assert.Equal(t, c.expectAlerts, retVal.(int), "case %d", idx) if c.expectAlerts != 0 { foundCount := 0 - for _, item := range rule.active { + for _, item := range rule.Active { for _, summary := range c.summaryAny { if strings.Contains(item.Annotations.Get("summary"), summary) { foundCount++ @@ -1342,7 +1343,7 @@ func TestThresholdRuleNoData(t *testing.T) { reader := clickhouseReader.NewReaderFromClickhouseConnection(mock, options, nil, "", fm, "", true) rule, err := NewThresholdRule("69", &postableRule, fm, reader, true) - rule.temporalityMap = map[string]map[v3.Temporality]bool{ + rule.TemporalityMap = map[string]map[v3.Temporality]bool{ "signoz_calls_total": { v3.Delta: true, }, @@ -1357,7 +1358,7 @@ func TestThresholdRuleNoData(t *testing.T) { } assert.Equal(t, 1, retVal.(int), "case %d", idx) - for _, item := range rule.active { + for _, item := range rule.Active { if c.expectNoData { assert.True(t, strings.Contains(item.Labels.Get(labels.AlertNameLabel), "[No data]"), "case %d", idx) } else { @@ -1447,7 +1448,7 @@ func TestThresholdRuleTracesLink(t *testing.T) { reader := clickhouseReader.NewReaderFromClickhouseConnection(mock, options, nil, "", fm, "", true) rule, err := NewThresholdRule("69", &postableRule, fm, reader, true) - rule.temporalityMap = map[string]map[v3.Temporality]bool{ + rule.TemporalityMap = map[string]map[v3.Temporality]bool{ "signoz_calls_total": { v3.Delta: true, }, @@ -1465,7 +1466,7 @@ func TestThresholdRuleTracesLink(t *testing.T) { assert.Equal(t, 0, retVal.(int), "case %d", idx) } else { assert.Equal(t, c.expectAlerts, retVal.(int), "case %d", idx) - for _, item := range rule.active { + for _, item := range rule.Active { for name, value := range item.Annotations.Map() { if name == "related_traces" { assert.NotEmpty(t, value, "case %d", idx) @@ -1572,7 +1573,7 @@ func TestThresholdRuleLogsLink(t *testing.T) { reader := clickhouseReader.NewReaderFromClickhouseConnection(mock, options, nil, "", fm, "", true) rule, err := NewThresholdRule("69", &postableRule, fm, reader, true) - rule.temporalityMap = map[string]map[v3.Temporality]bool{ + rule.TemporalityMap = map[string]map[v3.Temporality]bool{ "signoz_calls_total": { v3.Delta: true, }, @@ -1590,7 +1591,7 @@ func TestThresholdRuleLogsLink(t *testing.T) { assert.Equal(t, 0, retVal.(int), "case %d", idx) } else { assert.Equal(t, c.expectAlerts, retVal.(int), "case %d", idx) - for _, item := range rule.active { + for _, item := range rule.Active { for name, value := range item.Annotations.Map() { if name == "related_logs" { assert.NotEmpty(t, value, "case %d", idx) From b49ed913c754a2c4c010c266d6833ae6a0797a15 Mon Sep 17 00:00:00 2001 From: Srikanth Chekuri Date: Tue, 24 Sep 2024 10:56:49 +0530 Subject: [PATCH 65/79] chore: handle error before using task (#6055) --- pkg/query-service/rules/manager.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pkg/query-service/rules/manager.go b/pkg/query-service/rules/manager.go index 09eb7ad367..75b2b5fade 100644 --- a/pkg/query-service/rules/manager.go +++ b/pkg/query-service/rules/manager.go @@ -476,15 +476,15 @@ func (m *Manager) addTask(rule *PostableRule, taskName string) error { UseLogsNewSchema: m.opts.UseLogsNewSchema, }) - for _, r := range newTask.Rules() { - m.rules[r.ID()] = r - } - if err != nil { zap.L().Error("creating rule task failed", zap.String("name", taskName), zap.Error(err)) return errors.New("error loading rules, previous rule set restored") } + for _, r := range newTask.Rules() { + m.rules[r.ID()] = r + } + // If there is an another task with the same identifier, raise an error _, ok := m.tasks[taskName] if ok { From 0feab5aa93df69d4a20452a932fab504e2351638 Mon Sep 17 00:00:00 2001 From: Kobe Cai Date: Tue, 24 Sep 2024 13:50:45 +0800 Subject: [PATCH 66/79] fix: check alert rule queries are all disabled if at least one query is set (#5966) --- pkg/query-service/rules/api_params.go | 46 +++++++++++- pkg/query-service/rules/api_params_test.go | 86 ++++++++++++++++++++++ 2 files changed, 131 insertions(+), 1 deletion(-) create mode 100644 pkg/query-service/rules/api_params_test.go diff --git a/pkg/query-service/rules/api_params.go b/pkg/query-service/rules/api_params.go index 77a1552946..b3c174b147 100644 --- a/pkg/query-service/rules/api_params.go +++ b/pkg/query-service/rules/api_params.go @@ -170,18 +170,62 @@ func isValidLabelValue(v string) bool { return utf8.ValidString(v) } +func isAllQueriesDisabled(compositeQuery *v3.CompositeQuery) bool { + if compositeQuery == nil { + return false + } + if compositeQuery.BuilderQueries == nil && compositeQuery.PromQueries == nil && compositeQuery.ClickHouseQueries == nil { + return false + } + switch compositeQuery.QueryType { + case v3.QueryTypeBuilder: + if len(compositeQuery.BuilderQueries) == 0 { + return false + } + for _, query := range compositeQuery.BuilderQueries { + if !query.Disabled { + return false + } + } + case v3.QueryTypePromQL: + if len(compositeQuery.PromQueries) == 0 { + return false + } + for _, query := range compositeQuery.PromQueries { + if !query.Disabled { + return false + } + } + case v3.QueryTypeClickHouseSQL: + if len(compositeQuery.ClickHouseQueries) == 0 { + return false + } + for _, query := range compositeQuery.ClickHouseQueries { + if !query.Disabled { + return false + } + } + } + return true +} + func (r *PostableRule) Validate() error { var errs []error if r.RuleCondition == nil { - errs = append(errs, errors.Errorf("rule condition is required")) + // will get panic if we try to access CompositeQuery, so return here + return errors.Errorf("rule condition is required") } else { if r.RuleCondition.CompositeQuery == nil { errs = append(errs, errors.Errorf("composite metric query is required")) } } + if isAllQueriesDisabled(r.RuleCondition.CompositeQuery) { + errs = append(errs, errors.Errorf("all queries are disabled in rule condition")) + } + if r.RuleType == RuleTypeThreshold { if r.RuleCondition.Target == nil { errs = append(errs, errors.Errorf("rule condition missing the threshold")) diff --git a/pkg/query-service/rules/api_params_test.go b/pkg/query-service/rules/api_params_test.go new file mode 100644 index 0000000000..6a1245d0fe --- /dev/null +++ b/pkg/query-service/rules/api_params_test.go @@ -0,0 +1,86 @@ +package rules + +import ( + "testing" + + v3 "go.signoz.io/signoz/pkg/query-service/model/v3" +) + +func TestIsAllQueriesDisabled(t *testing.T) { + testCases := []*v3.CompositeQuery{ + &v3.CompositeQuery{ + BuilderQueries: map[string]*v3.BuilderQuery{ + "query1": { + Disabled: true, + }, + "query2": { + Disabled: true, + }, + }, + QueryType: v3.QueryTypeBuilder, + }, + nil, + &v3.CompositeQuery{ + QueryType: v3.QueryTypeBuilder, + }, + &v3.CompositeQuery{ + QueryType: v3.QueryTypeBuilder, + BuilderQueries: map[string]*v3.BuilderQuery{ + "query1": { + Disabled: true, + }, + "query2": { + Disabled: false, + }, + }, + }, + &v3.CompositeQuery{ + QueryType: v3.QueryTypePromQL, + }, + &v3.CompositeQuery{ + QueryType: v3.QueryTypePromQL, + PromQueries: map[string]*v3.PromQuery{ + "query3": { + Disabled: false, + }, + }, + }, + &v3.CompositeQuery{ + QueryType: v3.QueryTypePromQL, + PromQueries: map[string]*v3.PromQuery{ + "query3": { + Disabled: true, + }, + }, + }, + &v3.CompositeQuery{ + QueryType: v3.QueryTypeClickHouseSQL, + }, + &v3.CompositeQuery{ + QueryType: v3.QueryTypeClickHouseSQL, + ClickHouseQueries: map[string]*v3.ClickHouseQuery{ + "query4": { + Disabled: false, + }, + }, + }, + &v3.CompositeQuery{ + QueryType: v3.QueryTypeClickHouseSQL, + ClickHouseQueries: map[string]*v3.ClickHouseQuery{ + "query4": { + Disabled: true, + }, + }, + }, + } + + expectedResult := []bool{true, false, false, false, false, false, true, false, false, true} + + for index, compositeQuery := range testCases { + expected := expectedResult[index] + actual := isAllQueriesDisabled(compositeQuery) + if actual != expected { + t.Errorf("Expected %v, but got %v", expected, actual) + } + } +} From 708158f50f41e202cf3ce46546ebd4197a296d2b Mon Sep 17 00:00:00 2001 From: Raj Kamal Singh <1133322+raj-k-singh@users.noreply.github.com> Date: Wed, 25 Sep 2024 09:51:16 +0530 Subject: [PATCH 67/79] chore: move clickhousereader filter suggestion methods to dedicated file (#6061) --- .../clickhouseReader/filter_suggestions.go | 270 ++++++++++++++++++ .../app/clickhouseReader/reader.go | 257 ----------------- 2 files changed, 270 insertions(+), 257 deletions(-) create mode 100644 pkg/query-service/app/clickhouseReader/filter_suggestions.go diff --git a/pkg/query-service/app/clickhouseReader/filter_suggestions.go b/pkg/query-service/app/clickhouseReader/filter_suggestions.go new file mode 100644 index 0000000000..1b80ccbef4 --- /dev/null +++ b/pkg/query-service/app/clickhouseReader/filter_suggestions.go @@ -0,0 +1,270 @@ +// Clickhouse reader methods for powering QB filter suggestions +package clickhouseReader + +import ( + "context" + "database/sql" + "fmt" + "slices" + "strings" + + "go.signoz.io/signoz/pkg/query-service/model" + v3 "go.signoz.io/signoz/pkg/query-service/model/v3" + "go.uber.org/zap" +) + +func (r *ClickHouseReader) GetQBFilterSuggestionsForLogs( + ctx context.Context, + req *v3.QBFilterSuggestionsRequest, +) (*v3.QBFilterSuggestionsResponse, *model.ApiError) { + suggestions := v3.QBFilterSuggestionsResponse{ + AttributeKeys: []v3.AttributeKey{}, + ExampleQueries: []v3.FilterSet{}, + } + + // Use existing autocomplete logic for generating attribute suggestions + attribKeysResp, err := r.GetLogAttributeKeys( + ctx, &v3.FilterAttributeKeyRequest{ + SearchText: req.SearchText, + DataSource: v3.DataSourceLogs, + Limit: int(req.AttributesLimit), + }) + if err != nil { + return nil, model.InternalError(fmt.Errorf("couldn't get attribute keys: %w", err)) + } + + suggestions.AttributeKeys = attribKeysResp.AttributeKeys + + // Rank suggested attributes + slices.SortFunc(suggestions.AttributeKeys, func(a v3.AttributeKey, b v3.AttributeKey) int { + + // Higher score => higher rank + attribKeyScore := func(a v3.AttributeKey) int { + + // Scoring criteria is expected to get more sophisticated in follow up changes + if a.Type == v3.AttributeKeyTypeResource { + return 2 + } + + if a.Type == v3.AttributeKeyTypeTag { + return 1 + } + + return 0 + } + + // To sort in descending order of score the return value must be negative when a > b + return attribKeyScore(b) - attribKeyScore(a) + }) + + // Put together suggested example queries. + + newExampleQuery := func() v3.FilterSet { + // Include existing filter in example query if specified. + if req.ExistingFilter != nil { + return *req.ExistingFilter + } + + return v3.FilterSet{ + Operator: "AND", + Items: []v3.FilterItem{}, + } + } + + // Suggest example queries for top suggested log attributes and resource attributes + exampleAttribs := []v3.AttributeKey{} + for _, attrib := range suggestions.AttributeKeys { + isAttributeOrResource := slices.Contains([]v3.AttributeKeyType{ + v3.AttributeKeyTypeResource, v3.AttributeKeyTypeTag, + }, attrib.Type) + + isNumOrStringType := slices.Contains([]v3.AttributeKeyDataType{ + v3.AttributeKeyDataTypeInt64, v3.AttributeKeyDataTypeFloat64, v3.AttributeKeyDataTypeString, + }, attrib.DataType) + + if isAttributeOrResource && isNumOrStringType { + exampleAttribs = append(exampleAttribs, attrib) + } + + if len(exampleAttribs) >= int(req.ExamplesLimit) { + break + } + } + + if len(exampleAttribs) > 0 { + exampleAttribValues, err := r.getValuesForLogAttributes( + ctx, exampleAttribs, req.ExamplesLimit, + ) + if err != nil { + // Do not fail the entire request if only example query generation fails + zap.L().Error("could not find attribute values for creating example query", zap.Error(err)) + } else { + + // add example queries for as many attributes as possible. + // suggest 1st value for 1st attrib, followed by 1st value for second attrib and so on + // and if there is still room, suggest 2nd value for 1st attrib, 2nd value for 2nd attrib and so on + for valueIdx := 0; valueIdx < int(req.ExamplesLimit); valueIdx++ { + for attrIdx, attr := range exampleAttribs { + needMoreExamples := len(suggestions.ExampleQueries) < int(req.ExamplesLimit) + + if needMoreExamples && valueIdx < len(exampleAttribValues[attrIdx]) { + exampleQuery := newExampleQuery() + exampleQuery.Items = append(exampleQuery.Items, v3.FilterItem{ + Key: attr, + Operator: "=", + Value: exampleAttribValues[attrIdx][valueIdx], + }) + + suggestions.ExampleQueries = append( + suggestions.ExampleQueries, exampleQuery, + ) + } + } + } + } + } + + // Suggest static example queries for standard log attributes if needed. + if len(suggestions.ExampleQueries) < int(req.ExamplesLimit) { + exampleQuery := newExampleQuery() + exampleQuery.Items = append(exampleQuery.Items, v3.FilterItem{ + Key: v3.AttributeKey{ + Key: "body", + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeUnspecified, + IsColumn: true, + }, + Operator: "contains", + Value: "error", + }) + suggestions.ExampleQueries = append(suggestions.ExampleQueries, exampleQuery) + } + + return &suggestions, nil +} + +// Get up to `limit` values seen for each attribute in `attributes` +// Returns a slice of slices where the ith slice has values for ith entry in `attributes` +func (r *ClickHouseReader) getValuesForLogAttributes( + ctx context.Context, attributes []v3.AttributeKey, limit uint64, +) ([][]any, *model.ApiError) { + /* + The query used here needs to be as cheap as possible, and while uncommon, it is possible for + a tag to have 100s of millions of values (eg: message, request_id) + + Construct a query to UNION the result of querying first `limit` values for each attribute. For example: + ``` + select * from ( + ( + select tagKey, stringTagValue, int64TagValue, float64TagValue + from signoz_logs.distributed_tag_attributes + where tagKey = $1 and ( + stringTagValue != '' or int64TagValue is not null or float64TagValue is not null + ) + limit 2 + ) UNION DISTINCT ( + select tagKey, stringTagValue, int64TagValue, float64TagValue + from signoz_logs.distributed_tag_attributes + where tagKey = $2 and ( + stringTagValue != '' or int64TagValue is not null or float64TagValue is not null + ) + limit 2 + ) + ) settings max_threads=2 + ``` + Since tag_attributes table uses ReplacingMergeTree, the values would be distinct and no order by + is being used to ensure the `limit` clause minimizes the amount of data scanned. + + This query scanned ~30k rows per attribute on fiscalnote-v2 for attributes like `message` and `time` + that had >~110M values each + */ + + if len(attributes) > 10 { + zap.L().Error( + "log attribute values requested for too many attributes. This can lead to slow and costly queries", + zap.Int("count", len(attributes)), + ) + attributes = attributes[:10] + } + + tagQueries := []string{} + tagKeyQueryArgs := []any{} + for idx, attrib := range attributes { + tagQueries = append(tagQueries, fmt.Sprintf(`( + select tagKey, stringTagValue, int64TagValue, float64TagValue + from %s.%s + where tagKey = $%d and ( + stringTagValue != '' or int64TagValue is not null or float64TagValue is not null + ) + limit %d + )`, r.logsDB, r.logsTagAttributeTable, idx+1, limit)) + + tagKeyQueryArgs = append(tagKeyQueryArgs, attrib.Key) + } + + query := fmt.Sprintf(`select * from ( + %s + ) settings max_threads=2`, strings.Join(tagQueries, " UNION DISTINCT ")) + + rows, err := r.db.Query(ctx, query, tagKeyQueryArgs...) + if err != nil { + zap.L().Error("couldn't query attrib values for suggestions", zap.Error(err)) + return nil, model.InternalError(fmt.Errorf( + "couldn't query attrib values for suggestions: %w", err, + )) + } + defer rows.Close() + + result := make([][]any, len(attributes)) + + // Helper for getting hold of the result slice to append to for each scanned row + resultIdxForAttrib := func(key string, dataType v3.AttributeKeyDataType) int { + return slices.IndexFunc(attributes, func(attrib v3.AttributeKey) bool { + return attrib.Key == key && attrib.DataType == dataType + }) + } + + // Scan rows and append to result + for rows.Next() { + var tagKey string + var stringValue string + var float64Value sql.NullFloat64 + var int64Value sql.NullInt64 + + err := rows.Scan( + &tagKey, &stringValue, &int64Value, &float64Value, + ) + if err != nil { + return nil, model.InternalError(fmt.Errorf( + "couldn't scan attrib value rows: %w", err, + )) + } + + if len(stringValue) > 0 { + attrResultIdx := resultIdxForAttrib(tagKey, v3.AttributeKeyDataTypeString) + if attrResultIdx >= 0 { + result[attrResultIdx] = append(result[attrResultIdx], stringValue) + } + + } else if int64Value.Valid { + attrResultIdx := resultIdxForAttrib(tagKey, v3.AttributeKeyDataTypeInt64) + if attrResultIdx >= 0 { + result[attrResultIdx] = append(result[attrResultIdx], int64Value.Int64) + } + + } else if float64Value.Valid { + attrResultIdx := resultIdxForAttrib(tagKey, v3.AttributeKeyDataTypeFloat64) + if attrResultIdx >= 0 { + result[attrResultIdx] = append(result[attrResultIdx], float64Value.Float64) + } + } + } + + if err := rows.Err(); err != nil { + return nil, model.InternalError(fmt.Errorf( + "couldn't scan attrib value rows: %w", err, + )) + } + + return result, nil +} diff --git a/pkg/query-service/app/clickhouseReader/reader.go b/pkg/query-service/app/clickhouseReader/reader.go index 8dd29856fb..b3aee7ca60 100644 --- a/pkg/query-service/app/clickhouseReader/reader.go +++ b/pkg/query-service/app/clickhouseReader/reader.go @@ -10,7 +10,6 @@ import ( "os" "reflect" "regexp" - "slices" "sort" "strconv" "strings" @@ -4069,262 +4068,6 @@ func (r *ClickHouseReader) GetLogAttributeValues(ctx context.Context, req *v3.Fi } -func (r *ClickHouseReader) GetQBFilterSuggestionsForLogs( - ctx context.Context, - req *v3.QBFilterSuggestionsRequest, -) (*v3.QBFilterSuggestionsResponse, *model.ApiError) { - suggestions := v3.QBFilterSuggestionsResponse{ - AttributeKeys: []v3.AttributeKey{}, - ExampleQueries: []v3.FilterSet{}, - } - - // Use existing autocomplete logic for generating attribute suggestions - attribKeysResp, err := r.GetLogAttributeKeys( - ctx, &v3.FilterAttributeKeyRequest{ - SearchText: req.SearchText, - DataSource: v3.DataSourceLogs, - Limit: int(req.AttributesLimit), - }) - if err != nil { - return nil, model.InternalError(fmt.Errorf("couldn't get attribute keys: %w", err)) - } - - suggestions.AttributeKeys = attribKeysResp.AttributeKeys - - // Rank suggested attributes - slices.SortFunc(suggestions.AttributeKeys, func(a v3.AttributeKey, b v3.AttributeKey) int { - - // Higher score => higher rank - attribKeyScore := func(a v3.AttributeKey) int { - - // Scoring criteria is expected to get more sophisticated in follow up changes - if a.Type == v3.AttributeKeyTypeResource { - return 2 - } - - if a.Type == v3.AttributeKeyTypeTag { - return 1 - } - - return 0 - } - - // To sort in descending order of score the return value must be negative when a > b - return attribKeyScore(b) - attribKeyScore(a) - }) - - // Put together suggested example queries. - - newExampleQuery := func() v3.FilterSet { - // Include existing filter in example query if specified. - if req.ExistingFilter != nil { - return *req.ExistingFilter - } - - return v3.FilterSet{ - Operator: "AND", - Items: []v3.FilterItem{}, - } - } - - // Suggest example queries for top suggested log attributes and resource attributes - exampleAttribs := []v3.AttributeKey{} - for _, attrib := range suggestions.AttributeKeys { - isAttributeOrResource := slices.Contains([]v3.AttributeKeyType{ - v3.AttributeKeyTypeResource, v3.AttributeKeyTypeTag, - }, attrib.Type) - - isNumOrStringType := slices.Contains([]v3.AttributeKeyDataType{ - v3.AttributeKeyDataTypeInt64, v3.AttributeKeyDataTypeFloat64, v3.AttributeKeyDataTypeString, - }, attrib.DataType) - - if isAttributeOrResource && isNumOrStringType { - exampleAttribs = append(exampleAttribs, attrib) - } - - if len(exampleAttribs) >= int(req.ExamplesLimit) { - break - } - } - - if len(exampleAttribs) > 0 { - exampleAttribValues, err := r.getValuesForLogAttributes( - ctx, exampleAttribs, req.ExamplesLimit, - ) - if err != nil { - // Do not fail the entire request if only example query generation fails - zap.L().Error("could not find attribute values for creating example query", zap.Error(err)) - } else { - - // add example queries for as many attributes as possible. - // suggest 1st value for 1st attrib, followed by 1st value for second attrib and so on - // and if there is still room, suggest 2nd value for 1st attrib, 2nd value for 2nd attrib and so on - for valueIdx := 0; valueIdx < int(req.ExamplesLimit); valueIdx++ { - for attrIdx, attr := range exampleAttribs { - needMoreExamples := len(suggestions.ExampleQueries) < int(req.ExamplesLimit) - - if needMoreExamples && valueIdx < len(exampleAttribValues[attrIdx]) { - exampleQuery := newExampleQuery() - exampleQuery.Items = append(exampleQuery.Items, v3.FilterItem{ - Key: attr, - Operator: "=", - Value: exampleAttribValues[attrIdx][valueIdx], - }) - - suggestions.ExampleQueries = append( - suggestions.ExampleQueries, exampleQuery, - ) - } - } - } - } - } - - // Suggest static example queries for standard log attributes if needed. - if len(suggestions.ExampleQueries) < int(req.ExamplesLimit) { - exampleQuery := newExampleQuery() - exampleQuery.Items = append(exampleQuery.Items, v3.FilterItem{ - Key: v3.AttributeKey{ - Key: "body", - DataType: v3.AttributeKeyDataTypeString, - Type: v3.AttributeKeyTypeUnspecified, - IsColumn: true, - }, - Operator: "contains", - Value: "error", - }) - suggestions.ExampleQueries = append(suggestions.ExampleQueries, exampleQuery) - } - - return &suggestions, nil -} - -// Get up to `limit` values seen for each attribute in `attributes` -// Returns a slice of slices where the ith slice has values for ith entry in `attributes` -func (r *ClickHouseReader) getValuesForLogAttributes( - ctx context.Context, attributes []v3.AttributeKey, limit uint64, -) ([][]any, *model.ApiError) { - /* - The query used here needs to be as cheap as possible, and while uncommon, it is possible for - a tag to have 100s of millions of values (eg: message, request_id) - - Construct a query to UNION the result of querying first `limit` values for each attribute. For example: - ``` - select * from ( - ( - select tagKey, stringTagValue, int64TagValue, float64TagValue - from signoz_logs.distributed_tag_attributes - where tagKey = $1 and ( - stringTagValue != '' or int64TagValue is not null or float64TagValue is not null - ) - limit 2 - ) UNION DISTINCT ( - select tagKey, stringTagValue, int64TagValue, float64TagValue - from signoz_logs.distributed_tag_attributes - where tagKey = $2 and ( - stringTagValue != '' or int64TagValue is not null or float64TagValue is not null - ) - limit 2 - ) - ) settings max_threads=2 - ``` - Since tag_attributes table uses ReplacingMergeTree, the values would be distinct and no order by - is being used to ensure the `limit` clause minimizes the amount of data scanned. - - This query scanned ~30k rows per attribute on fiscalnote-v2 for attributes like `message` and `time` - that had >~110M values each - */ - - if len(attributes) > 10 { - zap.L().Error( - "log attribute values requested for too many attributes. This can lead to slow and costly queries", - zap.Int("count", len(attributes)), - ) - attributes = attributes[:10] - } - - tagQueries := []string{} - tagKeyQueryArgs := []any{} - for idx, attrib := range attributes { - tagQueries = append(tagQueries, fmt.Sprintf(`( - select tagKey, stringTagValue, int64TagValue, float64TagValue - from %s.%s - where tagKey = $%d and ( - stringTagValue != '' or int64TagValue is not null or float64TagValue is not null - ) - limit %d - )`, r.logsDB, r.logsTagAttributeTable, idx+1, limit)) - - tagKeyQueryArgs = append(tagKeyQueryArgs, attrib.Key) - } - - query := fmt.Sprintf(`select * from ( - %s - ) settings max_threads=2`, strings.Join(tagQueries, " UNION DISTINCT ")) - - rows, err := r.db.Query(ctx, query, tagKeyQueryArgs...) - if err != nil { - zap.L().Error("couldn't query attrib values for suggestions", zap.Error(err)) - return nil, model.InternalError(fmt.Errorf( - "couldn't query attrib values for suggestions: %w", err, - )) - } - defer rows.Close() - - result := make([][]any, len(attributes)) - - // Helper for getting hold of the result slice to append to for each scanned row - resultIdxForAttrib := func(key string, dataType v3.AttributeKeyDataType) int { - return slices.IndexFunc(attributes, func(attrib v3.AttributeKey) bool { - return attrib.Key == key && attrib.DataType == dataType - }) - } - - // Scan rows and append to result - for rows.Next() { - var tagKey string - var stringValue string - var float64Value sql.NullFloat64 - var int64Value sql.NullInt64 - - err := rows.Scan( - &tagKey, &stringValue, &int64Value, &float64Value, - ) - if err != nil { - return nil, model.InternalError(fmt.Errorf( - "couldn't scan attrib value rows: %w", err, - )) - } - - if len(stringValue) > 0 { - attrResultIdx := resultIdxForAttrib(tagKey, v3.AttributeKeyDataTypeString) - if attrResultIdx >= 0 { - result[attrResultIdx] = append(result[attrResultIdx], stringValue) - } - - } else if int64Value.Valid { - attrResultIdx := resultIdxForAttrib(tagKey, v3.AttributeKeyDataTypeInt64) - if attrResultIdx >= 0 { - result[attrResultIdx] = append(result[attrResultIdx], int64Value.Int64) - } - - } else if float64Value.Valid { - attrResultIdx := resultIdxForAttrib(tagKey, v3.AttributeKeyDataTypeFloat64) - if attrResultIdx >= 0 { - result[attrResultIdx] = append(result[attrResultIdx], float64Value.Float64) - } - } - } - - if err := rows.Err(); err != nil { - return nil, model.InternalError(fmt.Errorf( - "couldn't scan attrib value rows: %w", err, - )) - } - - return result, nil -} - func readRow(vars []interface{}, columnNames []string, countOfNumberCols int) ([]string, map[string]string, []map[string]string, *v3.Point) { // Each row will have a value and a timestamp, and an optional list of label values // example: {Timestamp: ..., Value: ...} From 58d6487f779b3990954180103a1ae52c1f7f23d7 Mon Sep 17 00:00:00 2001 From: SagarRajput-7 <162284829+SagarRajput-7@users.noreply.github.com> Date: Wed, 25 Sep 2024 11:32:06 +0530 Subject: [PATCH 68/79] fix: fixed extra space at bottom for list and table panel (#6049) --- frontend/src/container/GridCardLayout/styles.ts | 9 ++++++++- .../LogsPanelTable/LogsPanelComponent.styles.scss | 2 ++ .../TracesTableComponent.styles.scss | 2 ++ 3 files changed, 12 insertions(+), 1 deletion(-) diff --git a/frontend/src/container/GridCardLayout/styles.ts b/frontend/src/container/GridCardLayout/styles.ts index e3f24308de..df2004da52 100644 --- a/frontend/src/container/GridCardLayout/styles.ts +++ b/frontend/src/container/GridCardLayout/styles.ts @@ -33,7 +33,14 @@ export const Card = styled(CardComponent)` } .ant-card-body { - height: calc(100% - 30px); + ${({ $panelType }): StyledCSS => + $panelType === PANEL_TYPES.TABLE + ? css` + height: 100%; + ` + : css` + height: calc(100% - 30px); + `} padding: 0; } `; diff --git a/frontend/src/container/LogsPanelTable/LogsPanelComponent.styles.scss b/frontend/src/container/LogsPanelTable/LogsPanelComponent.styles.scss index 6317ea2134..b355c90551 100644 --- a/frontend/src/container/LogsPanelTable/LogsPanelComponent.styles.scss +++ b/frontend/src/container/LogsPanelTable/LogsPanelComponent.styles.scss @@ -63,6 +63,8 @@ height: 40px; justify-content: end; padding: 0 8px; + margin-top: 12px; + margin-bottom: 2px; } } diff --git a/frontend/src/container/TracesTableComponent/TracesTableComponent.styles.scss b/frontend/src/container/TracesTableComponent/TracesTableComponent.styles.scss index 74e80f8764..c59bf3c5ad 100644 --- a/frontend/src/container/TracesTableComponent/TracesTableComponent.styles.scss +++ b/frontend/src/container/TracesTableComponent/TracesTableComponent.styles.scss @@ -52,6 +52,8 @@ height: 40px; justify-content: end; padding: 0 8px; + margin-top: 12px; + margin-bottom: 2px; } } From 35f8e133a9e539d66203d7afa06192875ee7a64d Mon Sep 17 00:00:00 2001 From: SagarRajput-7 <162284829+SagarRajput-7@users.noreply.github.com> Date: Wed, 25 Sep 2024 11:32:19 +0530 Subject: [PATCH 69/79] fix: dashboard variable - ux and usability fixes (#6038) * fix: dashboard variable - ux and usability fixes * fix: separarted all option, fixed tooltip handling, added clear option etc --- .../DashboardVariableSelection.styles.scss | 22 ++++++++++------ .../VariableItem.tsx | 25 ++++++++++++------- 2 files changed, 30 insertions(+), 17 deletions(-) diff --git a/frontend/src/container/NewDashboard/DashboardVariablesSelection/DashboardVariableSelection.styles.scss b/frontend/src/container/NewDashboard/DashboardVariablesSelection/DashboardVariableSelection.styles.scss index f7fcb83a53..6df3e79906 100644 --- a/frontend/src/container/NewDashboard/DashboardVariablesSelection/DashboardVariableSelection.styles.scss +++ b/frontend/src/container/NewDashboard/DashboardVariablesSelection/DashboardVariableSelection.styles.scss @@ -43,6 +43,15 @@ .ant-select-item { display: flex; align-items: center; + gap: 8px; + } + + .rc-virtual-list-holder { + [data-testid='option-ALL'] { + border-bottom: 1px solid var(--bg-slate-400); + padding-bottom: 12px; + margin-bottom: 8px; + } } .all-label { @@ -56,28 +65,25 @@ } .dropdown-value { - display: flex; - justify-content: space-between; - align-items: center; + display: grid; + grid-template-columns: 1fr max-content; .option-text { - max-width: 180px; padding: 0 8px; } .toggle-tag-label { padding-left: 8px; right: 40px; - font-weight: normal; - position: absolute; + font-weight: 500; } } } } .dropdown-styles { - min-width: 300px; - max-width: 350px; + min-width: 400px; + max-width: 500px; } .lightMode { diff --git a/frontend/src/container/NewDashboard/DashboardVariablesSelection/VariableItem.tsx b/frontend/src/container/NewDashboard/DashboardVariablesSelection/VariableItem.tsx index e14162d0ce..a0a444a715 100644 --- a/frontend/src/container/NewDashboard/DashboardVariablesSelection/VariableItem.tsx +++ b/frontend/src/container/NewDashboard/DashboardVariablesSelection/VariableItem.tsx @@ -62,14 +62,14 @@ interface VariableItemProps { const getSelectValue = ( selectedValue: IDashboardVariable['selectedValue'], variableData: IDashboardVariable, -): string | string[] => { +): string | string[] | undefined => { if (Array.isArray(selectedValue)) { if (!variableData.multiSelect && selectedValue.length === 1) { - return selectedValue[0]?.toString() || ''; + return selectedValue[0]?.toString(); } return selectedValue.map((item) => item.toString()); } - return selectedValue?.toString() || ''; + return selectedValue?.toString(); }; // eslint-disable-next-line sonarjs/cognitive-complexity @@ -300,7 +300,7 @@ function VariableItem({ e.stopPropagation(); e.preventDefault(); const isChecked = - variableData.allSelected || selectValue.includes(ALL_SELECT_VALUE); + variableData.allSelected || selectValue?.includes(ALL_SELECT_VALUE); if (isChecked) { handleChange([]); @@ -462,6 +462,7 @@ function VariableItem({ + {omittedValues.length} )} + allowClear > {enableSelectAll && ( @@ -500,11 +501,17 @@ function VariableItem({ {...retProps(option as string)} onClick={(e): void => handleToggle(e as any, option as string)} > - - - {option.toString()} - - + + {option.toString()} + {variableData.multiSelect && optionState.tag === option.toString() && From 55f653d92eefe5101f64e198c6b0d8c970541e40 Mon Sep 17 00:00:00 2001 From: Vikrant Gupta Date: Wed, 25 Sep 2024 20:31:06 +0530 Subject: [PATCH 70/79] fix: added support for `body contains X` tag on pressing enter after selecting attribute key (#6059) * fix: added empty operator in the top to support body contains * fix: address review comments --- .../QueryBuilderSearchV2.tsx | 104 +++++++++++++++--- 1 file changed, 87 insertions(+), 17 deletions(-) diff --git a/frontend/src/container/QueryBuilder/filters/QueryBuilderSearchV2/QueryBuilderSearchV2.tsx b/frontend/src/container/QueryBuilder/filters/QueryBuilderSearchV2/QueryBuilderSearchV2.tsx index 3d3fca4654..0925c10d97 100644 --- a/frontend/src/container/QueryBuilder/filters/QueryBuilderSearchV2/QueryBuilderSearchV2.tsx +++ b/frontend/src/container/QueryBuilder/filters/QueryBuilderSearchV2/QueryBuilderSearchV2.tsx @@ -286,16 +286,62 @@ function QueryBuilderSearchV2( parsedValue = value; } if (currentState === DropdownState.ATTRIBUTE_KEY) { - setCurrentFilterItem((prev) => ({ - ...prev, - key: parsedValue as BaseAutocompleteData, - op: '', - value: '', - })); - setCurrentState(DropdownState.OPERATOR); - setSearchValue((parsedValue as BaseAutocompleteData)?.key); + // Case - convert abc def ghi type attribute keys directly to body contains abc def ghi + if ( + isObject(parsedValue) && + parsedValue?.key && + parsedValue?.key?.split(' ').length > 1 + ) { + setTags((prev) => [ + ...prev, + { + key: { + key: 'body', + dataType: DataTypes.String, + type: '', + isColumn: true, + isJSON: false, + // eslint-disable-next-line sonarjs/no-duplicate-string + id: 'body--string----true', + }, + op: OPERATORS.CONTAINS, + value: (parsedValue as BaseAutocompleteData)?.key, + }, + ]); + setCurrentFilterItem(undefined); + setSearchValue(''); + setCurrentState(DropdownState.ATTRIBUTE_KEY); + } else { + setCurrentFilterItem((prev) => ({ + ...prev, + key: parsedValue as BaseAutocompleteData, + op: '', + value: '', + })); + setCurrentState(DropdownState.OPERATOR); + setSearchValue((parsedValue as BaseAutocompleteData)?.key); + } } else if (currentState === DropdownState.OPERATOR) { - if (value === OPERATORS.EXISTS || value === OPERATORS.NOT_EXISTS) { + if (isEmpty(value) && currentFilterItem?.key?.key) { + setTags((prev) => [ + ...prev, + { + key: { + key: 'body', + dataType: DataTypes.String, + type: '', + isColumn: true, + isJSON: false, + id: 'body--string----true', + }, + op: OPERATORS.CONTAINS, + value: currentFilterItem?.key?.key, + }, + ]); + setCurrentFilterItem(undefined); + setSearchValue(''); + setCurrentState(DropdownState.ATTRIBUTE_KEY); + } else if (value === OPERATORS.EXISTS || value === OPERATORS.NOT_EXISTS) { setTags((prev) => [ ...prev, { @@ -399,6 +445,7 @@ function QueryBuilderSearchV2( whereClauseConfig?.customKey === 'body' && whereClauseConfig?.customOp === OPERATORS.CONTAINS ) { + // eslint-disable-next-line sonarjs/no-identical-functions setTags((prev) => [ ...prev, { @@ -519,19 +566,20 @@ function QueryBuilderSearchV2( setCurrentState(DropdownState.OPERATOR); } } - if (suggestionsData?.payload?.attributes?.length === 0) { + // again let's not auto select anything for the user + if (tagOperator) { setCurrentFilterItem({ key: { - key: tagKey.split(' ')[0], + key: tagKey, dataType: DataTypes.EMPTY, type: '', isColumn: false, isJSON: false, }, - op: '', + op: tagOperator, value: '', }); - setCurrentState(DropdownState.OPERATOR); + setCurrentState(DropdownState.ATTRIBUTE_VALUE); } } else if ( // Case 2 - if key is defined but the search text doesn't match with the set key, @@ -607,13 +655,32 @@ function QueryBuilderSearchV2( // the useEffect takes care of setting the dropdown values correctly on change of the current state useEffect(() => { if (currentState === DropdownState.ATTRIBUTE_KEY) { + const { tagKey } = getTagToken(searchValue); if (isLogsExplorerPage) { - setDropdownOptions( - suggestionsData?.payload?.attributes?.map((key) => ({ + // add the user typed option in the dropdown to select that and move ahead irrespective of the matches and all + setDropdownOptions([ + ...(!isEmpty(tagKey) && + !suggestionsData?.payload?.attributes?.some((val) => + isEqual(val.key, tagKey), + ) + ? [ + { + label: tagKey, + value: { + key: tagKey, + dataType: DataTypes.EMPTY, + type: '', + isColumn: false, + isJSON: false, + }, + }, + ] + : []), + ...(suggestionsData?.payload?.attributes?.map((key) => ({ label: key.key, value: key, - })) || [], - ); + })) || []), + ]); } else { setDropdownOptions( data?.payload?.attributeKeys?.map((key) => ({ @@ -643,12 +710,14 @@ function QueryBuilderSearchV2( op.label.startsWith(partialOperator.toLocaleUpperCase()), ); } + operatorOptions = [{ label: '', value: '' }, ...operatorOptions]; setDropdownOptions(operatorOptions); } else if (strippedKey.endsWith('[*]') && strippedKey.startsWith('body.')) { operatorOptions = [OPERATORS.HAS, OPERATORS.NHAS].map((operator) => ({ label: operator, value: operator, })); + operatorOptions = [{ label: '', value: '' }, ...operatorOptions]; setDropdownOptions(operatorOptions); } else { operatorOptions = QUERY_BUILDER_OPERATORS_BY_TYPES.universal.map( @@ -663,6 +732,7 @@ function QueryBuilderSearchV2( op.label.startsWith(partialOperator.toLocaleUpperCase()), ); } + operatorOptions = [{ label: '', value: '' }, ...operatorOptions]; setDropdownOptions(operatorOptions); } } From fc8391c5aabc75146ad145065920cd3e65f72d8d Mon Sep 17 00:00:00 2001 From: Raj Kamal Singh <1133322+raj-k-singh@users.noreply.github.com> Date: Thu, 26 Sep 2024 15:26:14 +0530 Subject: [PATCH 71/79] Feat: logs filter suggestions higher rank for special resource attribs like service.name and env etc (#6060) * chore: upgrade signoz-otel-collector dependency to v0.102.10 * feat: first stab at ranking resource attribs higher * chore: add test todo for validating resource attribs get ranked higher in logs filter suggestions * chore: add test validating higher ranking for special resource attribs * chore: some cleanup * chore: some more cleanup --- go.mod | 2 +- go.sum | 2 + .../clickhouseReader/filter_suggestions.go | 78 ++++++++++++++----- .../integration/filter_suggestions_test.go | 56 +++++++++++++ 4 files changed, 117 insertions(+), 21 deletions(-) diff --git a/go.mod b/go.mod index 9d61916d42..6f523045d1 100644 --- a/go.mod +++ b/go.mod @@ -6,7 +6,7 @@ require ( github.com/ClickHouse/clickhouse-go/v2 v2.23.2 github.com/DATA-DOG/go-sqlmock v1.5.2 github.com/SigNoz/govaluate v0.0.0-20240203125216-988004ccc7fd - github.com/SigNoz/signoz-otel-collector v0.102.2 + github.com/SigNoz/signoz-otel-collector v0.102.10 github.com/SigNoz/zap_otlp/zap_otlp_encoder v0.0.0-20230822164844-1b861a431974 github.com/SigNoz/zap_otlp/zap_otlp_sync v0.0.0-20230822164844-1b861a431974 github.com/antonmedv/expr v1.15.3 diff --git a/go.sum b/go.sum index a442200b0e..c557bc6f87 100644 --- a/go.sum +++ b/go.sum @@ -66,6 +66,8 @@ github.com/SigNoz/prometheus v1.11.1 h1:roM8ugYf4UxaeKKujEeBvoX7ybq3IrS+TB26KiRt github.com/SigNoz/prometheus v1.11.1/go.mod h1:uv4mQwZQtx7y4GQ6EdHOi8Wsk07uHNn2XHd1zM85m6I= github.com/SigNoz/signoz-otel-collector v0.102.2 h1:SmjsBZjMjTVVpuOlfJXlsDJQbdefQP/9Wz3CyzSuZuU= github.com/SigNoz/signoz-otel-collector v0.102.2/go.mod h1:ISAXYhZenojCWg6CdDJtPMpfS6Zwc08+uoxH25tc6Y0= +github.com/SigNoz/signoz-otel-collector v0.102.10 h1:1zjU31OcRZL6fS0IIag8LA8bdhP4S28dzovDwuOg7Lg= +github.com/SigNoz/signoz-otel-collector v0.102.10/go.mod h1:APoBVD4aRu9vIny1vdzZSi2wPY3elyjHA/I/rh1hKfs= github.com/SigNoz/zap_otlp v0.1.0 h1:T7rRcFN87GavY8lDGZj0Z3Xv6OhJA6Pj3I9dNPmqvRc= github.com/SigNoz/zap_otlp v0.1.0/go.mod h1:lcHvbDbRgvDnPxo9lDlaL1JK2PyOyouP/C3ynnYIvyo= github.com/SigNoz/zap_otlp/zap_otlp_encoder v0.0.0-20230822164844-1b861a431974 h1:PKVgdf83Yw+lZJbFtNGBgqXiXNf3+kOXW2qZ7Ms7OaY= diff --git a/pkg/query-service/app/clickhouseReader/filter_suggestions.go b/pkg/query-service/app/clickhouseReader/filter_suggestions.go index 1b80ccbef4..fe78fc5555 100644 --- a/pkg/query-service/app/clickhouseReader/filter_suggestions.go +++ b/pkg/query-service/app/clickhouseReader/filter_suggestions.go @@ -8,6 +8,7 @@ import ( "slices" "strings" + "github.com/SigNoz/signoz-otel-collector/exporter/clickhouselogsexporter/logsv2" "go.signoz.io/signoz/pkg/query-service/model" v3 "go.signoz.io/signoz/pkg/query-service/model/v3" "go.uber.org/zap" @@ -36,26 +37,7 @@ func (r *ClickHouseReader) GetQBFilterSuggestionsForLogs( suggestions.AttributeKeys = attribKeysResp.AttributeKeys // Rank suggested attributes - slices.SortFunc(suggestions.AttributeKeys, func(a v3.AttributeKey, b v3.AttributeKey) int { - - // Higher score => higher rank - attribKeyScore := func(a v3.AttributeKey) int { - - // Scoring criteria is expected to get more sophisticated in follow up changes - if a.Type == v3.AttributeKeyTypeResource { - return 2 - } - - if a.Type == v3.AttributeKeyTypeTag { - return 1 - } - - return 0 - } - - // To sort in descending order of score the return value must be negative when a > b - return attribKeyScore(b) - attribKeyScore(a) - }) + attribRanker.sort(suggestions.AttributeKeys) // Put together suggested example queries. @@ -268,3 +250,59 @@ func (r *ClickHouseReader) getValuesForLogAttributes( return result, nil } + +var attribRanker = newRankingStrategy() + +func newRankingStrategy() attribRankingStrategy { + // Some special resource attributes should get ranked above all others. + interestingResourceAttrsInDescRank := []string{ + "service", "service.name", "env", "k8s.namespace.name", + } + + // Synonyms of interesting attributes should come next + resourceHierarchy := logsv2.ResourceHierarchy() + for _, attr := range []string{ + "service.name", + "deployment.environment", + "k8s.namespace.name", + "k8s.pod.name", + "k8s.container.name", + "k8s.node.name", + } { + interestingResourceAttrsInDescRank = append( + interestingResourceAttrsInDescRank, resourceHierarchy.Synonyms(attr)..., + ) + } + + interestingResourceAttrsInAscRank := interestingResourceAttrsInDescRank[:] + slices.Reverse(interestingResourceAttrsInAscRank) + + return attribRankingStrategy{ + interestingResourceAttrsInAscRank: interestingResourceAttrsInAscRank, + } +} + +type attribRankingStrategy struct { + interestingResourceAttrsInAscRank []string +} + +// The higher the score, the higher the rank +func (s *attribRankingStrategy) score(attrib v3.AttributeKey) int { + if attrib.Type == v3.AttributeKeyTypeResource { + // 3 + (-1) if attrib.Key is not an interesting resource attribute + return 3 + slices.Index(s.interestingResourceAttrsInAscRank, attrib.Key) + } + + if attrib.Type == v3.AttributeKeyTypeTag { + return 1 + } + + return 0 +} + +func (s *attribRankingStrategy) sort(attribKeys []v3.AttributeKey) { + slices.SortFunc(attribKeys, func(a v3.AttributeKey, b v3.AttributeKey) int { + // To sort in descending order of score the return value must be negative when a > b + return s.score(b) - s.score(a) + }) +} diff --git a/pkg/query-service/tests/integration/filter_suggestions_test.go b/pkg/query-service/tests/integration/filter_suggestions_test.go index a1f56115c5..6c8224be50 100644 --- a/pkg/query-service/tests/integration/filter_suggestions_test.go +++ b/pkg/query-service/tests/integration/filter_suggestions_test.go @@ -138,6 +138,62 @@ func TestLogsFilterSuggestionsWithExistingFilter(t *testing.T) { } } +func TestResourceAttribsRankedHigherInLogsFilterSuggestions(t *testing.T) { + require := require.New(t) + + tagKeys := []v3.AttributeKey{} + for _, k := range []string{"user_id", "user_email"} { + tagKeys = append(tagKeys, v3.AttributeKey{ + Key: k, + Type: v3.AttributeKeyTypeTag, + DataType: v3.AttributeKeyDataTypeString, + IsColumn: false, + }) + } + + specialResourceAttrKeys := []v3.AttributeKey{} + for _, k := range []string{"service", "env"} { + specialResourceAttrKeys = append(specialResourceAttrKeys, v3.AttributeKey{ + Key: k, + Type: v3.AttributeKeyTypeResource, + DataType: v3.AttributeKeyDataTypeString, + IsColumn: false, + }) + } + + otherResourceAttrKeys := []v3.AttributeKey{} + for _, k := range []string{"container_name", "container_id"} { + otherResourceAttrKeys = append(otherResourceAttrKeys, v3.AttributeKey{ + Key: k, + Type: v3.AttributeKeyTypeResource, + DataType: v3.AttributeKeyDataTypeString, + IsColumn: false, + }) + } + + tb := NewFilterSuggestionsTestBed(t) + + mockAttrKeysInDB := append(tagKeys, otherResourceAttrKeys...) + mockAttrKeysInDB = append(mockAttrKeysInDB, specialResourceAttrKeys...) + + tb.mockAttribKeysQueryResponse(mockAttrKeysInDB) + + expectedTopSuggestions := append(specialResourceAttrKeys, otherResourceAttrKeys...) + expectedTopSuggestions = append(expectedTopSuggestions, tagKeys...) + + tb.mockAttribValuesQueryResponse( + expectedTopSuggestions[:2], [][]string{{"test"}, {"test"}}, + ) + + suggestionsQueryParams := map[string]string{"examplesLimit": "2"} + suggestionsResp := tb.GetQBFilterSuggestionsForLogs(suggestionsQueryParams) + + require.Equal( + expectedTopSuggestions, + suggestionsResp.AttributeKeys[:len(expectedTopSuggestions)], + ) +} + // Mocks response for CH queries made by reader.GetLogAttributeKeys func (tb *FilterSuggestionsTestBed) mockAttribKeysQueryResponse( attribsToReturn []v3.AttributeKey, From 6e3141a4ce28ffb5d4e2c763f2dad9aa3935e546 Mon Sep 17 00:00:00 2001 From: rahulkeswani101 Date: Thu, 26 Sep 2024 16:12:49 +0530 Subject: [PATCH 72/79] feat: added blur event to having input in query section (#5684) * feat: added blur event to having input in query section * feat: added a error message for incomplete having clause and improved handleBlur * feat: added focus event to remove error message --------- Co-authored-by: Srikanth Chekuri --- .../filters/HavingFilter/HavingFilter.tsx | 79 +++++++++++++------ 1 file changed, 56 insertions(+), 23 deletions(-) diff --git a/frontend/src/container/QueryBuilder/filters/HavingFilter/HavingFilter.tsx b/frontend/src/container/QueryBuilder/filters/HavingFilter/HavingFilter.tsx index 7d11d018cc..3eab3e50ee 100644 --- a/frontend/src/container/QueryBuilder/filters/HavingFilter/HavingFilter.tsx +++ b/frontend/src/container/QueryBuilder/filters/HavingFilter/HavingFilter.tsx @@ -1,3 +1,4 @@ +import { Color } from '@signozhq/design-tokens'; import { Select } from 'antd'; import { ENTITY_VERSION_V4 } from 'constants/app'; // ** Constants @@ -34,6 +35,7 @@ export function HavingFilter({ const [currentFormValue, setCurrentFormValue] = useState( initialHavingValues, ); + const [errorMessage, setErrorMessage] = useState(null); const { isMulti } = useTagValidation( currentFormValue.op, @@ -198,6 +200,29 @@ export function HavingFilter({ resetChanges(); }; + const handleFocus = useCallback(() => { + setErrorMessage(null); + }, []); + + const handleBlur = useCallback((): void => { + if (searchText) { + const { columnName, op, value } = getHavingObject(searchText); + const isCompleteHavingClause = + columnName && op && value.every((v) => v !== ''); + + if (isCompleteHavingClause && isValidHavingValue(searchText)) { + setLocalValues((prev) => { + const updatedValues = [...prev, searchText]; + onChange(updatedValues.map(transformFromStringToHaving)); + return updatedValues; + }); + setSearchText(''); + } else { + setErrorMessage('Invalid HAVING clause'); + } + } + }, [searchText, onChange]); + useEffect(() => { parseSearchText(searchText); }, [searchText, parseSearchText]); @@ -209,28 +234,36 @@ export function HavingFilter({ const isMetricsDataSource = query.dataSource === DataSource.METRICS; return ( - + <> + + {errorMessage && ( +
{errorMessage}
+ )} + ); } From 7a125e31ecf8c9733370ac62d9449598a3de66cf Mon Sep 17 00:00:00 2001 From: Vishal Sharma Date: Thu, 26 Sep 2024 17:33:21 +0530 Subject: [PATCH 73/79] chore: remove slack connect and call help (#6044) * chore: remove slack connect and call help --- frontend/src/pages/Support/Support.tsx | 36 -------------------------- 1 file changed, 36 deletions(-) diff --git a/frontend/src/pages/Support/Support.tsx b/frontend/src/pages/Support/Support.tsx index 9d3d8fff8f..668ac34143 100644 --- a/frontend/src/pages/Support/Support.tsx +++ b/frontend/src/pages/Support/Support.tsx @@ -10,8 +10,6 @@ import useLicense from 'hooks/useLicense'; import { useNotifications } from 'hooks/useNotifications'; import { Book, - Cable, - Calendar, CreditCard, Github, MessageSquare, @@ -78,22 +76,6 @@ const supportChannels = [ url: '', btnText: 'Launch chat', }, - { - key: 'schedule_call', - name: 'Schedule a call', - icon: , - title: 'Schedule a call with the founders.', - url: 'https://calendly.com/vishal-signoz/30min', - btnText: 'Schedule call', - }, - { - key: 'slack_connect', - name: 'Slack Connect', - icon: , - title: 'Get a dedicated support channel for your team.', - url: '', - btnText: 'Request Slack connect', - }, ]; export default function Support(): JSX.Element { @@ -122,20 +104,6 @@ export default function Support(): JSX.Element { // eslint-disable-next-line react-hooks/exhaustive-deps }, []); - const handleSlackConnectRequest = (): void => { - const recipient = 'support@signoz.io'; - const subject = 'Slack Connect Request'; - const body = `I'd like to request a dedicated Slack Connect channel for me and my team. Users (emails) to include besides mine:`; - - // Create the mailto link - const mailtoLink = `mailto:${recipient}?subject=${encodeURIComponent( - subject, - )}&body=${encodeURIComponent(body)}`; - - // Open the default email client - window.location.href = mailtoLink; - }; - const isPremiumChatSupportEnabled = useFeatureFlags(FeatureKeys.PREMIUM_SUPPORT)?.active || false; @@ -214,15 +182,11 @@ export default function Support(): JSX.Element { case channelsMap.documentation: case channelsMap.github: case channelsMap.slack_community: - case channelsMap.schedule_call: handleChannelWithRedirects(channel.url); break; case channelsMap.chat: handleChat(); break; - case channelsMap.slack_connect: - handleSlackConnectRequest(); - break; default: handleChannelWithRedirects('https://signoz.io/slack'); break; From ef4b70f67b014393483c0adb079a2c732c323d9e Mon Sep 17 00:00:00 2001 From: Vikrant Gupta Date: Thu, 26 Sep 2024 19:06:22 +0530 Subject: [PATCH 74/79] fix: intermittent undefined page in trace details page (#6084) --- .../TracesExplorer/ListView/index.tsx | 21 ++----------------- .../TracesExplorer/ListView/utils.tsx | 18 ++++++++-------- 2 files changed, 11 insertions(+), 28 deletions(-) diff --git a/frontend/src/container/TracesExplorer/ListView/index.tsx b/frontend/src/container/TracesExplorer/ListView/index.tsx index 810ffb8241..c22623772b 100644 --- a/frontend/src/container/TracesExplorer/ListView/index.tsx +++ b/frontend/src/container/TracesExplorer/ListView/index.tsx @@ -14,9 +14,8 @@ import { Pagination } from 'hooks/queryPagination'; import useDragColumns from 'hooks/useDragColumns'; import { getDraggedColumns } from 'hooks/useDragColumns/utils'; import useUrlQueryData from 'hooks/useUrlQueryData'; -import history from 'lib/history'; import { RowData } from 'lib/query/createTableColumnsFromQuery'; -import { HTMLAttributes, memo, useCallback, useMemo } from 'react'; +import { memo, useCallback, useMemo } from 'react'; import { useSelector } from 'react-redux'; import { AppState } from 'store/reducers'; import { DataSource } from 'types/common/queryBuilder'; @@ -25,7 +24,7 @@ import { GlobalReducer } from 'types/reducer/globalTime'; import { TracesLoading } from '../TraceLoading/TraceLoading'; import { defaultSelectedColumns, PER_PAGE_OPTIONS } from './configs'; import { Container, ErrorText, tableStyles } from './styles'; -import { getListColumns, getTraceLink, transformDataWithDate } from './utils'; +import { getListColumns, transformDataWithDate } from './utils'; interface ListViewProps { isFilterApplied: boolean; @@ -108,21 +107,6 @@ function ListView({ isFilterApplied }: ListViewProps): JSX.Element { [queryTableData], ); - const handleRow = useCallback( - (record: RowData): HTMLAttributes => ({ - onClick: (event): void => { - event.preventDefault(); - event.stopPropagation(); - if (event.metaKey || event.ctrlKey) { - window.open(getTraceLink(record), '_blank'); - } else { - history.push(getTraceLink(record)); - } - }, - }), - [], - ); - const handleDragColumn = useCallback( (fromIndex: number, toIndex: number) => onDragColumns(columns, fromIndex, toIndex), @@ -169,7 +153,6 @@ function ListView({ isFilterApplied }: ListViewProps): JSX.Element { style={tableStyles} dataSource={transformedQueryTableData} columns={columns} - onRow={handleRow} onDragColumn={handleDragColumn} /> )} diff --git a/frontend/src/container/TracesExplorer/ListView/utils.tsx b/frontend/src/container/TracesExplorer/ListView/utils.tsx index a6201436d1..dc0e3808ae 100644 --- a/frontend/src/container/TracesExplorer/ListView/utils.tsx +++ b/frontend/src/container/TracesExplorer/ListView/utils.tsx @@ -47,11 +47,11 @@ export const getListColumns = ( key: 'date', title: 'Timestamp', width: 145, - render: (item): JSX.Element => { + render: (value, item): JSX.Element => { const date = - typeof item === 'string' - ? dayjs(item).format('YYYY-MM-DD HH:mm:ss.SSS') - : dayjs(item / 1e6).format('YYYY-MM-DD HH:mm:ss.SSS'); + typeof value === 'string' + ? dayjs(value).format('YYYY-MM-DD HH:mm:ss.SSS') + : dayjs(value / 1e6).format('YYYY-MM-DD HH:mm:ss.SSS'); return ( {date} @@ -67,10 +67,10 @@ export const getListColumns = ( dataIndex: key, key: `${key}-${dataType}-${type}`, width: 145, - render: (value): JSX.Element => { + render: (value, item): JSX.Element => { if (value === '') { return ( - + N/A ); @@ -78,7 +78,7 @@ export const getListColumns = ( if (key === 'httpMethod' || key === 'responseStatusCode') { return ( - + {value} @@ -88,14 +88,14 @@ export const getListColumns = ( if (key === 'durationNano') { return ( - + {getMs(value)}ms ); } return ( - + {value} ); From 44a3469b9b974f188019ba4c37d2966452171371 Mon Sep 17 00:00:00 2001 From: Sudeep MP Date: Thu, 26 Sep 2024 16:14:34 +0100 Subject: [PATCH 75/79] =?UTF-8?q?style:=20enhance=20FAQ=20container=20styl?= =?UTF-8?q?ing=20and=20adjust=20layout=20for=20customer=20s=E2=80=A6=20(#5?= =?UTF-8?q?999)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * style: enhance FAQ container styling and adjust layout for customer stories fixed the button casing faq layout shift issue due to parent flex centered * style: add dark theme support to WorkspaceLocked styles * refactor: moved inline styles to class --- .../WorkspaceLocked.styles.scss | 29 +++++++++++++++++++ .../pages/WorkspaceLocked/WorkspaceLocked.tsx | 28 +++++++++++++----- 2 files changed, 50 insertions(+), 7 deletions(-) diff --git a/frontend/src/pages/WorkspaceLocked/WorkspaceLocked.styles.scss b/frontend/src/pages/WorkspaceLocked/WorkspaceLocked.styles.scss index 131601bfb0..d2317c8c68 100644 --- a/frontend/src/pages/WorkspaceLocked/WorkspaceLocked.styles.scss +++ b/frontend/src/pages/WorkspaceLocked/WorkspaceLocked.styles.scss @@ -1,4 +1,5 @@ $light-theme: 'lightMode'; +$dark-theme: 'darkMode'; @keyframes gradientFlow { 0% { @@ -147,6 +148,34 @@ $light-theme: 'lightMode'; animation: gradientFlow 24s ease infinite; margin-bottom: 18px; } + + &__faq-container { + width: 100%; + + .ant-collapse, + .ant-collapse-item, + .ant-collapse-content-active { + .#{$dark-theme} & { + border-color: var(--bg-slate-400); + } + } + } + + &__customer-stories { + &__left-container, + &__right-container { + display: flex; + flex-direction: column; + } + + &__left-container { + align-items: flex-end; + } + + &__right-container { + align-items: flex-start; + } + } } .contact-us { diff --git a/frontend/src/pages/WorkspaceLocked/WorkspaceLocked.tsx b/frontend/src/pages/WorkspaceLocked/WorkspaceLocked.tsx index 84d977ae81..052d8dcbc5 100644 --- a/frontend/src/pages/WorkspaceLocked/WorkspaceLocked.tsx +++ b/frontend/src/pages/WorkspaceLocked/WorkspaceLocked.tsx @@ -187,8 +187,18 @@ export default function WorkspaceBlocked(): JSX.Element { children: ( {/* #FIXME: please suggest if there is any better way to loop in different columns to get the masonry layout */} -
{renderCustomerStories((index) => index % 2 === 0)} - {renderCustomerStories((index) => index % 2 !== 0)} + + {renderCustomerStories((index) => index % 2 === 0)} + + + {renderCustomerStories((index) => index % 2 !== 0)} + {isAdmin && ( @@ -218,8 +228,12 @@ export default function WorkspaceBlocked(): JSX.Element { label: t('faqs'), children: ( - - + + {isAdmin && ( @@ -340,9 +354,9 @@ export default function WorkspaceBlocked(): JSX.Element { )} - +
- +
)} From 9b42326f80d7e92a1486e7829524f5e2735b88af Mon Sep 17 00:00:00 2001 From: Sudeep MP Date: Thu, 26 Sep 2024 19:00:58 +0100 Subject: [PATCH 76/79] chore(trial end): analytics events added (#6048) --- .../WorkspaceLocked/CustomerStoryCard.tsx | 1 + .../pages/WorkspaceLocked/WorkspaceLocked.tsx | 36 ++++++++++++++++--- .../WorkspaceLocked/workspaceLocked.data.ts | 22 ++++++------ 3 files changed, 43 insertions(+), 16 deletions(-) diff --git a/frontend/src/pages/WorkspaceLocked/CustomerStoryCard.tsx b/frontend/src/pages/WorkspaceLocked/CustomerStoryCard.tsx index c22401f7c4..68df0f5d7c 100644 --- a/frontend/src/pages/WorkspaceLocked/CustomerStoryCard.tsx +++ b/frontend/src/pages/WorkspaceLocked/CustomerStoryCard.tsx @@ -32,4 +32,5 @@ function CustomerStoryCard({ ); } + export default CustomerStoryCard; diff --git a/frontend/src/pages/WorkspaceLocked/WorkspaceLocked.tsx b/frontend/src/pages/WorkspaceLocked/WorkspaceLocked.tsx index 052d8dcbc5..7b6b5af294 100644 --- a/frontend/src/pages/WorkspaceLocked/WorkspaceLocked.tsx +++ b/frontend/src/pages/WorkspaceLocked/WorkspaceLocked.tsx @@ -54,6 +54,23 @@ export default function WorkspaceBlocked(): JSX.Element { data: licensesData, } = useLicense(); + useEffect((): void => { + logEvent('Trial: Blocker Screen Viewed', {}); + }, []); + + const handleContactUsClick = (): void => { + logEvent('Trial: Contact Us CTA Clicked', {}); + }; + + const handleTabClick = (key: string): void => { + logEvent('Trial: Blocked Screen Tabs Clicked', { tabKey: key }); + }; + + const handleCollapseChange = (key: string | string[]): void => { + const lastKey = Array.isArray(key) ? key.slice(-1)[0] : key; + logEvent('Trial: Blocker Screen Tab FAQ Item Clicked', { panelKey: lastKey }); + }; + useEffect(() => { if (!isFetchingLicenseData) { const shouldBlockWorkspace = licensesData?.payload?.workSpaceBlock; @@ -135,7 +152,7 @@ export default function WorkspaceBlocked(): JSX.Element { const tabItems: TabsProps['items'] = [ { - key: '1', + key: 'whyChooseSignoz', label: t('whyChooseSignoz'), children: ( @@ -182,7 +199,7 @@ export default function WorkspaceBlocked(): JSX.Element { ), }, { - key: '2', + key: 'youAreInGoodCompany', label: t('youAreInGoodCompany'), children: ( @@ -224,7 +241,7 @@ export default function WorkspaceBlocked(): JSX.Element { // children: 'Our Pricing', // }, { - key: '4', + key: 'faqs', label: t('faqs'), children: ( @@ -234,7 +251,11 @@ export default function WorkspaceBlocked(): JSX.Element { direction="vertical" className="workspace-locked__faq-container" > - + {isAdmin && ( @@ -355,7 +377,11 @@ export default function WorkspaceBlocked(): JSX.Element { )}
- +
)} diff --git a/frontend/src/pages/WorkspaceLocked/workspaceLocked.data.ts b/frontend/src/pages/WorkspaceLocked/workspaceLocked.data.ts index 0f4d07b96e..bebcdaf64a 100644 --- a/frontend/src/pages/WorkspaceLocked/workspaceLocked.data.ts +++ b/frontend/src/pages/WorkspaceLocked/workspaceLocked.data.ts @@ -42,7 +42,7 @@ export const enterpriseGradeValuesData = [ export const customerStoriesData = [ { - key: 'c-story-1', + key: 'story-subomi-oluwalana', avatar: 'https://signoz.io/img/users/subomi-oluwalana.webp', personName: 'Subomi Oluwalana', role: 'Founder & CEO at Convoy', @@ -53,7 +53,7 @@ export const customerStoriesData = [ 'https://www.linkedin.com/feed/update/urn:li:activity:7212117589068591105/', }, { - key: 'c-story-2', + key: 'story-dhruv-garg', avatar: 'https://signoz.io/img/users/dhruv-garg.webp', personName: 'Dhruv Garg', role: 'Tech Lead at Nudge', @@ -64,7 +64,7 @@ export const customerStoriesData = [ 'https://www.linkedin.com/posts/dhruv-garg79_signoz-docker-kubernetes-activity-7205163679028240384-Otlb/', }, { - key: 'c-story-3', + key: 'story-vivek-bhakta', avatar: 'https://signoz.io/img/users/vivek-bhakta.webp', personName: 'Vivek Bhakta', role: 'CTO at Wombo AI', @@ -74,7 +74,7 @@ export const customerStoriesData = [ link: 'https://x.com/notorious_VB/status/1701773119696904242', }, { - key: 'c-story-4', + key: 'story-pranay-narang', avatar: 'https://signoz.io/img/users/pranay-narang.webp', personName: 'Pranay Narang', role: 'Engineering at Azodha', @@ -84,7 +84,7 @@ export const customerStoriesData = [ link: 'https://x.com/PranayNarang/status/1676247073396752387', }, { - key: 'c-story-4', + key: 'story-Sheheryar-Sewani', avatar: 'https://signoz.io/img/users/shey.webp', personName: 'Sheheryar Sewani', role: 'Seasoned Rails Dev & Founder', @@ -95,7 +95,7 @@ export const customerStoriesData = [ 'https://www.linkedin.com/feed/update/urn:li:activity:7181011853915926528/', }, { - key: 'c-story-5', + key: 'story-daniel-schell', avatar: 'https://signoz.io/img/users/daniel.webp', personName: 'Daniel Schell', role: 'Founder & CTO at Airlockdigital', @@ -115,7 +115,7 @@ export const customerStoriesData = [ link: 'https://x.com/gofrendiasgard/status/1680139003658641408', }, { - key: 'c-story-7', + key: 'story-anselm-eickhoff', avatar: 'https://signoz.io/img/users/anselm.jpg', personName: 'Anselm Eickhoff', role: 'Software Architect', @@ -129,26 +129,26 @@ export const customerStoriesData = [ export const faqData = [ { - key: '1', + key: 'signoz-cloud-vs-community', label: 'What is the difference between SigNoz Cloud(Teams) and Community Edition?', children: 'You can self-host and manage the community edition yourself. You should choose SigNoz Cloud if you don’t want to worry about managing the SigNoz cluster. There are some exclusive features like SSO & SAML support, which come with SigNoz cloud offering. Our team also offers support on the initial configuration of dashboards & alerts and advises on best practices for setting up your observability stack in the SigNoz cloud offering.', }, { - key: '2', + key: 'calc-for-metrics', label: 'How are number of samples calculated for metrics pricing?', children: "If a timeseries sends data every 30s, then it will generate 2 samples per min. So, if you have 10,000 time series sending data every 30s then you will be sending 20,000 samples per min to SigNoz. This will be around 864 mn samples per month and would cost 86.4 USD/month. Here's an explainer video on how metrics pricing is calculated - Link: https://vimeo.com/973012522", }, { - key: '3', + key: 'enterprise-support-plans', label: 'Do you offer enterprise support plans?', children: 'Yes, feel free to reach out to us on hello@signoz.io if you need a dedicated support plan or paid support for setting up your initial SigNoz setup.', }, { - key: '4', + key: 'who-should-use-enterprise-plans', label: 'Who should use Enterprise plans?', children: 'Teams which need enterprise support or features like SSO, Audit logs, etc. may find our enterprise plans valuable.', From 88ace79a644a12a3b32684c524ec81eca1cb137f Mon Sep 17 00:00:00 2001 From: rahulkeswani101 Date: Fri, 27 Sep 2024 00:22:42 +0530 Subject: [PATCH 77/79] feat: added meta tag to prevent page indexing (#5793) * feat: added meta tag to prevent page indexing * chore: revert to previous version --------- Co-authored-by: Pranay Prateek --- frontend/src/index.html.ejs | 1 + 1 file changed, 1 insertion(+) diff --git a/frontend/src/index.html.ejs b/frontend/src/index.html.ejs index f77e50f2b2..d08079587b 100644 --- a/frontend/src/index.html.ejs +++ b/frontend/src/index.html.ejs @@ -49,6 +49,7 @@ /> + From e4d1452f5ff63483b95c913cae37442b286e9aae Mon Sep 17 00:00:00 2001 From: Raj Kamal Singh <1133322+raj-k-singh@users.noreply.github.com> Date: Fri, 27 Sep 2024 10:40:02 +0530 Subject: [PATCH 78/79] Feat: use new logspipelineprocessor for generating logs pipeline collector conf (#6080) * chore: update logs pipeline prefix for generated collector config * chore: some cleanup * chore: some more cleanup * chore: some more cleanup --- .../logparsingpipeline/collector_config.go | 67 +++++++++++-------- .../collector_config_test.go | 4 +- .../app/logparsingpipeline/preview.go | 4 +- pkg/query-service/constants/constants.go | 4 +- 4 files changed, 46 insertions(+), 33 deletions(-) diff --git a/pkg/query-service/app/logparsingpipeline/collector_config.go b/pkg/query-service/app/logparsingpipeline/collector_config.go index 49f697fbd3..4b8da7b18a 100644 --- a/pkg/query-service/app/logparsingpipeline/collector_config.go +++ b/pkg/query-service/app/logparsingpipeline/collector_config.go @@ -19,24 +19,28 @@ var lockLogsPipelineSpec sync.RWMutex // check if the processors already exist // if yes then update the processor. // if something doesn't exists then remove it. -func buildLogParsingProcessors(agentConf, parsingProcessors map[string]interface{}) error { +func updateProcessorConfigsInCollectorConf( + collectorConf map[string]interface{}, + signozPipelineProcessors map[string]interface{}, +) error { agentProcessors := map[string]interface{}{} - if agentConf["processors"] != nil { - agentProcessors = (agentConf["processors"]).(map[string]interface{}) + if collectorConf["processors"] != nil { + agentProcessors = (collectorConf["processors"]).(map[string]interface{}) } exists := map[string]struct{}{} - for key, params := range parsingProcessors { + for key, params := range signozPipelineProcessors { agentProcessors[key] = params exists[key] = struct{}{} } - // remove the old unwanted processors + // remove the old unwanted pipeline processors for k := range agentProcessors { - if _, ok := exists[k]; !ok && strings.HasPrefix(k, constants.LogsPPLPfx) { + _, isInDesiredPipelineProcs := exists[k] + if hasSignozPipelineProcessorPrefix(k) && !isInDesiredPipelineProcs { delete(agentProcessors, k) } } - agentConf["processors"] = agentProcessors + collectorConf["processors"] = agentProcessors return nil } @@ -65,21 +69,24 @@ func getOtelPipelineFromConfig(config map[string]interface{}) (*otelPipeline, er return &p, nil } -func buildLogsProcessors(current []string, logsParserPipeline []string) ([]string, error) { +func buildCollectorPipelineProcessorsList( + currentCollectorProcessors []string, + signozPipelineProcessorNames []string, +) ([]string, error) { lockLogsPipelineSpec.Lock() defer lockLogsPipelineSpec.Unlock() exists := map[string]struct{}{} - for _, v := range logsParserPipeline { + for _, v := range signozPipelineProcessorNames { exists[v] = struct{}{} } // removed the old processors which are not used var pipeline []string - for _, v := range current { - k := v - if _, ok := exists[k]; ok || !strings.HasPrefix(k, constants.LogsPPLPfx) { - pipeline = append(pipeline, v) + for _, procName := range currentCollectorProcessors { + _, isInDesiredPipelineProcs := exists[procName] + if isInDesiredPipelineProcs || !hasSignozPipelineProcessorPrefix(procName) { + pipeline = append(pipeline, procName) } } @@ -96,7 +103,7 @@ func buildLogsProcessors(current []string, logsParserPipeline []string) ([]strin existingVsSpec := map[int]int{} // go through plan and map its elements to current positions in effective config - for i, m := range logsParserPipeline { + for i, m := range signozPipelineProcessorNames { if loc, ok := existing[m]; ok { specVsExistingMap[i] = loc existingVsSpec[loc] = i @@ -106,11 +113,11 @@ func buildLogsProcessors(current []string, logsParserPipeline []string) ([]strin lastMatched := 0 newPipeline := []string{} - for i := 0; i < len(logsParserPipeline); i++ { - m := logsParserPipeline[i] + for i := 0; i < len(signozPipelineProcessorNames); i++ { + m := signozPipelineProcessorNames[i] if loc, ok := specVsExistingMap[i]; ok { for j := lastMatched; j < loc; j++ { - if strings.HasPrefix(pipeline[j], constants.LogsPPLPfx) { + if hasSignozPipelineProcessorPrefix(pipeline[j]) { delete(specVsExistingMap, existingVsSpec[j]) } else { newPipeline = append(newPipeline, pipeline[j]) @@ -159,13 +166,13 @@ func GenerateCollectorConfigWithPipelines( config []byte, pipelines []Pipeline, ) ([]byte, *coreModel.ApiError) { - var c map[string]interface{} - err := yaml.Unmarshal([]byte(config), &c) + var collectorConf map[string]interface{} + err := yaml.Unmarshal([]byte(config), &collectorConf) if err != nil { return nil, coreModel.BadRequest(err) } - processors, procNames, err := PreparePipelineProcessor(pipelines) + signozPipelineProcessors, signozPipelineProcNames, err := PreparePipelineProcessor(pipelines) if err != nil { return nil, coreModel.BadRequest(errors.Wrap( err, "could not prepare otel collector processors for log pipelines", @@ -174,8 +181,8 @@ func GenerateCollectorConfigWithPipelines( // Escape any `$`s as `$$` in config generated for pipelines, to ensure any occurrences // like $data do not end up being treated as env vars when loading collector config. - for _, procName := range procNames { - procConf := processors[procName] + for _, procName := range signozPipelineProcNames { + procConf := signozPipelineProcessors[procName] serializedProcConf, err := yaml.Marshal(procConf) if err != nil { return nil, coreModel.InternalError(fmt.Errorf( @@ -194,14 +201,14 @@ func GenerateCollectorConfigWithPipelines( )) } - processors[procName] = escapedConf + signozPipelineProcessors[procName] = escapedConf } // Add processors to unmarshaled collector config `c` - buildLogParsingProcessors(c, processors) + updateProcessorConfigsInCollectorConf(collectorConf, signozPipelineProcessors) // build the new processor list in service.pipelines.logs - p, err := getOtelPipelineFromConfig(c) + p, err := getOtelPipelineFromConfig(collectorConf) if err != nil { return nil, coreModel.BadRequest(err) } @@ -211,16 +218,20 @@ func GenerateCollectorConfigWithPipelines( )) } - updatedProcessorList, _ := buildLogsProcessors(p.Pipelines.Logs.Processors, procNames) + updatedProcessorList, _ := buildCollectorPipelineProcessorsList(p.Pipelines.Logs.Processors, signozPipelineProcNames) p.Pipelines.Logs.Processors = updatedProcessorList // add the new processor to the data ( no checks required as the keys will exists) - c["service"].(map[string]interface{})["pipelines"].(map[string]interface{})["logs"] = p.Pipelines.Logs + collectorConf["service"].(map[string]interface{})["pipelines"].(map[string]interface{})["logs"] = p.Pipelines.Logs - updatedConf, err := yaml.Marshal(c) + updatedConf, err := yaml.Marshal(collectorConf) if err != nil { return nil, coreModel.BadRequest(err) } return updatedConf, nil } + +func hasSignozPipelineProcessorPrefix(procName string) bool { + return strings.HasPrefix(procName, constants.LogsPPLPfx) || strings.HasPrefix(procName, constants.OldLogsPPLPfx) +} diff --git a/pkg/query-service/app/logparsingpipeline/collector_config_test.go b/pkg/query-service/app/logparsingpipeline/collector_config_test.go index f5ba7a352b..2f2d898416 100644 --- a/pkg/query-service/app/logparsingpipeline/collector_config_test.go +++ b/pkg/query-service/app/logparsingpipeline/collector_config_test.go @@ -94,7 +94,7 @@ var buildProcessorTestData = []struct { func TestBuildLogParsingProcessors(t *testing.T) { for _, test := range buildProcessorTestData { Convey(test.Name, t, func() { - err := buildLogParsingProcessors(test.agentConf, test.pipelineProcessor) + err := updateProcessorConfigsInCollectorConf(test.agentConf, test.pipelineProcessor) So(err, ShouldBeNil) So(test.agentConf, ShouldResemble, test.outputConf) }) @@ -200,7 +200,7 @@ var BuildLogsPipelineTestData = []struct { func TestBuildLogsPipeline(t *testing.T) { for _, test := range BuildLogsPipelineTestData { Convey(test.Name, t, func() { - v, err := buildLogsProcessors(test.currentPipeline, test.logsPipeline) + v, err := buildCollectorPipelineProcessorsList(test.currentPipeline, test.logsPipeline) So(err, ShouldBeNil) fmt.Println(test.Name, "\n", test.currentPipeline, "\n", v, "\n", test.expectedPipeline) So(v, ShouldResemble, test.expectedPipeline) diff --git a/pkg/query-service/app/logparsingpipeline/preview.go b/pkg/query-service/app/logparsingpipeline/preview.go index b37295eb96..548c1ee2f5 100644 --- a/pkg/query-service/app/logparsingpipeline/preview.go +++ b/pkg/query-service/app/logparsingpipeline/preview.go @@ -7,7 +7,7 @@ import ( "time" _ "github.com/SigNoz/signoz-otel-collector/pkg/parser/grok" - "github.com/open-telemetry/opentelemetry-collector-contrib/processor/logstransformprocessor" + "github.com/SigNoz/signoz-otel-collector/processor/signozlogspipelineprocessor" "github.com/pkg/errors" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/plog" @@ -42,7 +42,7 @@ func SimulatePipelinesProcessing( simulatorInputPLogs := SignozLogsToPLogs(logs) processorFactories, err := processor.MakeFactoryMap( - logstransformprocessor.NewFactory(), + signozlogspipelineprocessor.NewFactory(), ) if err != nil { return nil, nil, model.InternalError(errors.Wrap( diff --git a/pkg/query-service/constants/constants.go b/pkg/query-service/constants/constants.go index 78ee31e1a1..541d46d5ca 100644 --- a/pkg/query-service/constants/constants.go +++ b/pkg/query-service/constants/constants.go @@ -341,7 +341,9 @@ var ReservedColumnTargetAliases = map[string]struct{}{ } // logsPPLPfx is a short constant for logsPipelinePrefix -const LogsPPLPfx = "logstransform/pipeline_" +// TODO(Raj): Remove old prefix after new processor based pipelines have been rolled out +const LogsPPLPfx = "signozlogspipeline/pipeline_" +const OldLogsPPLPfx = "logstransform/pipeline_" const IntegrationPipelineIdPrefix = "integration" From 717545e14c06195e497411314b1f80a938f87867 Mon Sep 17 00:00:00 2001 From: Yunus M Date: Fri, 27 Sep 2024 12:08:25 +0530 Subject: [PATCH 79/79] feat: remove sidebar dock option (#6083) * feat: remove sidebar doc option * feat: remove all references of sidebar collapse --- frontend/src/constants/app.ts | 1 - .../constants/shortcuts/globalShortcuts.ts | 6 -- .../container/AppLayout/AppLayout.styles.scss | 6 -- frontend/src/container/AppLayout/index.tsx | 62 ++-------------- .../src/container/SideNav/SideNav.styles.scss | 37 ---------- frontend/src/container/SideNav/SideNav.tsx | 37 ++-------- .../TracesExplorer/TracesExplorer.styles.scss | 1 + frontend/src/store/actions/app/index.ts | 1 - .../src/store/actions/app/sideBarCollapse.ts | 16 ----- frontend/src/store/actions/index.ts | 1 - frontend/src/store/reducers/app.ts | 10 --- frontend/src/types/actions/app.ts | 6 -- frontend/src/types/reducer/app.ts | 1 - frontend/tests/auth.json | 70 +++++++++---------- 14 files changed, 43 insertions(+), 212 deletions(-) delete mode 100644 frontend/src/store/actions/app/index.ts delete mode 100644 frontend/src/store/actions/app/sideBarCollapse.ts diff --git a/frontend/src/constants/app.ts b/frontend/src/constants/app.ts index d260806856..c82e2a34e8 100644 --- a/frontend/src/constants/app.ts +++ b/frontend/src/constants/app.ts @@ -6,7 +6,6 @@ export const AUTH0_REDIRECT_PATH = '/redirect'; export const DEFAULT_AUTH0_APP_REDIRECTION_PATH = ROUTES.APPLICATION; -export const IS_SIDEBAR_COLLAPSED = 'isSideBarCollapsed'; export const INVITE_MEMBERS_HASH = '#invite-team-members'; export const SIGNOZ_UPGRADE_PLAN_URL = diff --git a/frontend/src/constants/shortcuts/globalShortcuts.ts b/frontend/src/constants/shortcuts/globalShortcuts.ts index 4ab7752fac..8b68b7195e 100644 --- a/frontend/src/constants/shortcuts/globalShortcuts.ts +++ b/frontend/src/constants/shortcuts/globalShortcuts.ts @@ -1,8 +1,4 @@ -import { getUserOperatingSystem, UserOperatingSystem } from 'utils/getUserOS'; - -const userOS = getUserOperatingSystem(); export const GlobalShortcuts = { - SidebarCollapse: '\\+meta', NavigateToServices: 's+shift', NavigateToTraces: 't+shift', NavigateToLogs: 'l+shift', @@ -13,7 +9,6 @@ export const GlobalShortcuts = { }; export const GlobalShortcutsName = { - SidebarCollapse: `${userOS === UserOperatingSystem.MACOS ? 'cmd' : 'ctrl'}+\\`, NavigateToServices: 'shift+s', NavigateToTraces: 'shift+t', NavigateToLogs: 'shift+l', @@ -24,7 +19,6 @@ export const GlobalShortcutsName = { }; export const GlobalShortcutsDescription = { - SidebarCollapse: 'Collpase the sidebar', NavigateToServices: 'Navigate to Services page', NavigateToTraces: 'Navigate to Traces page', NavigateToLogs: 'Navigate to logs page', diff --git a/frontend/src/container/AppLayout/AppLayout.styles.scss b/frontend/src/container/AppLayout/AppLayout.styles.scss index 2ae1531c79..a991f08351 100644 --- a/frontend/src/container/AppLayout/AppLayout.styles.scss +++ b/frontend/src/container/AppLayout/AppLayout.styles.scss @@ -16,12 +16,6 @@ width: 100%; } } - - &.docked { - .app-content { - width: calc(100% - 240px); - } - } } .chat-support-gateway { diff --git a/frontend/src/container/AppLayout/index.tsx b/frontend/src/container/AppLayout/index.tsx index 4cf2e0f5bb..4f5a3c43d7 100644 --- a/frontend/src/container/AppLayout/index.tsx +++ b/frontend/src/container/AppLayout/index.tsx @@ -5,13 +5,11 @@ import './AppLayout.styles.scss'; import * as Sentry from '@sentry/react'; import { Flex } from 'antd'; -import getLocalStorageKey from 'api/browser/localstorage/get'; import getUserLatestVersion from 'api/user/getLatestVersion'; import getUserVersion from 'api/user/getVersion'; import cx from 'classnames'; import ChatSupportGateway from 'components/ChatSupportGateway/ChatSupportGateway'; import OverlayScrollbar from 'components/OverlayScrollbar/OverlayScrollbar'; -import { IS_SIDEBAR_COLLAPSED } from 'constants/app'; import { FeatureKeys } from 'constants/features'; import ROUTES from 'constants/routes'; import SideNav from 'container/SideNav'; @@ -22,22 +20,13 @@ import useLicense from 'hooks/useLicense'; import { useNotifications } from 'hooks/useNotifications'; import history from 'lib/history'; import ErrorBoundaryFallback from 'pages/ErrorBoundaryFallback/ErrorBoundaryFallback'; -import { - ReactNode, - useCallback, - useEffect, - useLayoutEffect, - useMemo, - useRef, - useState, -} from 'react'; +import { ReactNode, useEffect, useMemo, useRef, useState } from 'react'; import { Helmet } from 'react-helmet-async'; import { useTranslation } from 'react-i18next'; import { useQueries } from 'react-query'; import { useDispatch, useSelector } from 'react-redux'; import { useLocation } from 'react-router-dom'; import { Dispatch } from 'redux'; -import { sideBarCollapse } from 'store/actions'; import { AppState } from 'store/reducers'; import AppActions from 'types/actions'; import { @@ -59,10 +48,6 @@ function AppLayout(props: AppLayoutProps): JSX.Element { (state) => state.app, ); - const [collapsed, setCollapsed] = useState( - getLocalStorageKey(IS_SIDEBAR_COLLAPSED) === 'true', - ); - const { notifications } = useNotifications(); const isDarkMode = useIsDarkMode(); @@ -117,14 +102,6 @@ function AppLayout(props: AppLayoutProps): JSX.Element { const latestCurrentCounter = useRef(0); const latestVersionCounter = useRef(0); - const onCollapse = useCallback(() => { - setCollapsed((collapsed) => !collapsed); - }, []); - - useLayoutEffect(() => { - dispatch(sideBarCollapse(collapsed)); - }, [collapsed, dispatch]); - useEffect(() => { if ( getUserLatestVersionResponse.isFetched && @@ -279,23 +256,8 @@ function AppLayout(props: AppLayoutProps): JSX.Element { } }, [isDarkMode]); - const isSideNavCollapsed = getLocalStorageKey(IS_SIDEBAR_COLLAPSED); - - /** - * Note: Right now we don't have a page-level method to pass the sidebar collapse state. - * Since the use case for overriding is not widely needed, we are setting it here - * so that the workspace locked page will have an expanded sidebar regardless of how users - * have set it or what is stored in localStorage. This will not affect the localStorage config. - */ - const isWorkspaceLocked = pathname === ROUTES.WORKSPACE_LOCKED; - return ( - + {pageTitle} @@ -321,25 +283,11 @@ function AppLayout(props: AppLayoutProps): JSX.Element { )} - + {isToDisplayLayout && !renderFullScreen && ( - + )} -
+
}> diff --git a/frontend/src/container/SideNav/SideNav.styles.scss b/frontend/src/container/SideNav/SideNav.styles.scss index 2f5167f104..1a148e2469 100644 --- a/frontend/src/container/SideNav/SideNav.styles.scss +++ b/frontend/src/container/SideNav/SideNav.styles.scss @@ -3,10 +3,6 @@ height: 100%; position: relative; z-index: 1; - - &.docked { - width: 240px; - } } .sideNav { @@ -229,39 +225,6 @@ display: block; } } - - &.docked { - flex: 0 0 240px; - max-width: 240px; - min-width: 240px; - width: 240px; - - .secondary-nav-items { - width: 240px; - } - - .brand { - justify-content: space-between; - } - - .get-started-nav-items { - .get-started-btn { - justify-content: flex-start; - } - } - - .collapse-expand-handlers { - display: block; - } - - .nav-item-label { - display: block; - } - - .nav-item-beta { - display: block; - } - } } .lightMode { diff --git a/frontend/src/container/SideNav/SideNav.tsx b/frontend/src/container/SideNav/SideNav.tsx index 1ba863d8ec..16787bc3d8 100644 --- a/frontend/src/container/SideNav/SideNav.tsx +++ b/frontend/src/container/SideNav/SideNav.tsx @@ -3,7 +3,7 @@ import './SideNav.styles.scss'; import { Color } from '@signozhq/design-tokens'; -import { Button, Tooltip } from 'antd'; +import { Button } from 'antd'; import logEvent from 'api/common/logEvent'; import cx from 'classnames'; import { FeatureKeys } from 'constants/features'; @@ -16,9 +16,6 @@ import history from 'lib/history'; import { AlertTriangle, CheckSquare, - ChevronLeftCircle, - ChevronRightCircle, - PanelRight, RocketIcon, UserCircle, } from 'lucide-react'; @@ -55,13 +52,9 @@ interface UserManagementMenuItems { function SideNav({ licenseData, isFetching, - onCollapse, - collapsed, }: { licenseData: any; isFetching: boolean; - onCollapse: () => void; - collapsed: boolean; }): JSX.Element { const [menuItems, setMenuItems] = useState(defaultMenuItems); @@ -330,8 +323,6 @@ function SideNav({ }; useEffect(() => { - registerShortcut(GlobalShortcuts.SidebarCollapse, onCollapse); - registerShortcut(GlobalShortcuts.NavigateToServices, () => onClickHandler(ROUTES.APPLICATION, null), ); @@ -359,7 +350,6 @@ function SideNav({ ); return (): void => { - deregisterShortcut(GlobalShortcuts.SidebarCollapse); deregisterShortcut(GlobalShortcuts.NavigateToServices); deregisterShortcut(GlobalShortcuts.NavigateToTraces); deregisterShortcut(GlobalShortcuts.NavigateToLogs); @@ -368,11 +358,11 @@ function SideNav({ deregisterShortcut(GlobalShortcuts.NavigateToExceptions); deregisterShortcut(GlobalShortcuts.NavigateToMessagingQueues); }; - }, [deregisterShortcut, onClickHandler, onCollapse, registerShortcut]); + }, [deregisterShortcut, onClickHandler, registerShortcut]); return ( -
-
+
+
{licenseTag}
)}
- - -
{isCloudUserVal && ( @@ -504,14 +483,6 @@ function SideNav({ }} /> )} - -
- {collapsed ? ( - - ) : ( - - )} -
diff --git a/frontend/src/pages/TracesExplorer/TracesExplorer.styles.scss b/frontend/src/pages/TracesExplorer/TracesExplorer.styles.scss index cf6eb52b10..4323f901d5 100644 --- a/frontend/src/pages/TracesExplorer/TracesExplorer.styles.scss +++ b/frontend/src/pages/TracesExplorer/TracesExplorer.styles.scss @@ -1,6 +1,7 @@ .trace-explorer-header { display: flex; justify-content: space-between; + align-items: center; .trace-explorer-run-query { display: flex; diff --git a/frontend/src/store/actions/app/index.ts b/frontend/src/store/actions/app/index.ts deleted file mode 100644 index cb2a26f9e6..0000000000 --- a/frontend/src/store/actions/app/index.ts +++ /dev/null @@ -1 +0,0 @@ -export * from './sideBarCollapse'; diff --git a/frontend/src/store/actions/app/sideBarCollapse.ts b/frontend/src/store/actions/app/sideBarCollapse.ts deleted file mode 100644 index c99d79b6fb..0000000000 --- a/frontend/src/store/actions/app/sideBarCollapse.ts +++ /dev/null @@ -1,16 +0,0 @@ -import setLocalStorageKey from 'api/browser/localstorage/set'; -import { IS_SIDEBAR_COLLAPSED } from 'constants/app'; -import { Dispatch } from 'redux'; -import AppActions from 'types/actions'; - -export const sideBarCollapse = ( - collapseState: boolean, -): ((dispatch: Dispatch) => void) => { - setLocalStorageKey(IS_SIDEBAR_COLLAPSED, `${collapseState}`); - return (dispatch: Dispatch): void => { - dispatch({ - type: 'SIDEBAR_COLLAPSE', - payload: collapseState, - }); - }; -}; diff --git a/frontend/src/store/actions/index.ts b/frontend/src/store/actions/index.ts index 3074cdb3a2..f50fb7477c 100644 --- a/frontend/src/store/actions/index.ts +++ b/frontend/src/store/actions/index.ts @@ -1,4 +1,3 @@ -export * from './app'; export * from './global'; export * from './metrics'; export * from './serviceMap'; diff --git a/frontend/src/store/reducers/app.ts b/frontend/src/store/reducers/app.ts index 4db3965cad..bdb80d2565 100644 --- a/frontend/src/store/reducers/app.ts +++ b/frontend/src/store/reducers/app.ts @@ -1,11 +1,9 @@ import getLocalStorageKey from 'api/browser/localstorage/get'; -import { IS_SIDEBAR_COLLAPSED } from 'constants/app'; import { LOCALSTORAGE } from 'constants/localStorage'; import { getInitialUserTokenRefreshToken } from 'store/utils'; import { AppAction, LOGGED_IN, - SIDEBAR_COLLAPSE, UPDATE_CONFIGS, UPDATE_CURRENT_ERROR, UPDATE_CURRENT_VERSION, @@ -44,7 +42,6 @@ const getInitialUser = (): User | null => { const InitialValue: InitialValueTypes = { isLoggedIn: getLocalStorageKey(LOCALSTORAGE.IS_LOGGED_IN) === 'true', - isSideBarCollapsed: getLocalStorageKey(IS_SIDEBAR_COLLAPSED) === 'true', currentVersion: '', latestVersion: '', featureResponse: { @@ -76,13 +73,6 @@ const appReducer = ( }; } - case SIDEBAR_COLLAPSE: { - return { - ...state, - isSideBarCollapsed: action.payload, - }; - } - case UPDATE_FEATURE_FLAG_RESPONSE: { return { ...state, diff --git a/frontend/src/types/actions/app.ts b/frontend/src/types/actions/app.ts index 78a5da72ad..54b1992af2 100644 --- a/frontend/src/types/actions/app.ts +++ b/frontend/src/types/actions/app.ts @@ -34,11 +34,6 @@ export interface LoggedInUser { }; } -export interface SideBarCollapse { - type: typeof SIDEBAR_COLLAPSE; - payload: boolean; -} - export interface UpdateAppVersion { type: typeof UPDATE_CURRENT_VERSION; payload: { @@ -137,7 +132,6 @@ export interface UpdateFeatureFlag { export type AppAction = | LoggedInUser - | SideBarCollapse | UpdateAppVersion | UpdateLatestVersion | UpdateVersionError diff --git a/frontend/src/types/reducer/app.ts b/frontend/src/types/reducer/app.ts index 545cff7156..c51defcfb0 100644 --- a/frontend/src/types/reducer/app.ts +++ b/frontend/src/types/reducer/app.ts @@ -17,7 +17,6 @@ export interface User { export default interface AppReducer { isLoggedIn: boolean; - isSideBarCollapsed: boolean; currentVersion: string; latestVersion: string; isCurrentVersionError: boolean; diff --git a/frontend/tests/auth.json b/frontend/tests/auth.json index 2dd3d40466..0de47618ab 100644 --- a/frontend/tests/auth.json +++ b/frontend/tests/auth.json @@ -1,38 +1,34 @@ { - "cookies": [], - "origins": [ - { - "origin": "http://localhost:3301", - "localStorage": [ - { - "name": "isSideBarCollapsed", - "value": "false" - }, - { - "name": "metricsTimeDurations", - "value": "{}" - }, - { - "name": "i18nextLng", - "value": "en-US" - }, - { - "name": "reactQueryDevtoolsSortFn", - "value": "\"Status > Last Updated\"" - }, - { - "name": "AUTH_TOKEN", - "value": "authtoken" - }, - { - "name": "IS_LOGGED_IN", - "value": "true" - }, - { - "name": "REFRESH_AUTH_TOKEN", - "value": "refreshJwt" - } - ] - } - ] -} \ No newline at end of file + "cookies": [], + "origins": [ + { + "origin": "http://localhost:3301", + "localStorage": [ + { + "name": "metricsTimeDurations", + "value": "{}" + }, + { + "name": "i18nextLng", + "value": "en-US" + }, + { + "name": "reactQueryDevtoolsSortFn", + "value": "\"Status > Last Updated\"" + }, + { + "name": "AUTH_TOKEN", + "value": "authtoken" + }, + { + "name": "IS_LOGGED_IN", + "value": "true" + }, + { + "name": "REFRESH_AUTH_TOKEN", + "value": "refreshJwt" + } + ] + } + ] +}