diff --git a/cli/go.mod b/cli/go.mod index 1e6cbaad706f..0e5703db7bcd 100644 --- a/cli/go.mod +++ b/cli/go.mod @@ -68,7 +68,7 @@ require ( github.com/openshift/client-go v0.0.0-20210422153130-25c8450d1535 // indirect github.com/paulmach/orb v0.7.1 // indirect github.com/pelletier/go-toml/v2 v2.1.1 // indirect - github.com/pierrec/lz4/v4 v4.1.15 // indirect + github.com/pierrec/lz4/v4 v4.1.21 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect github.com/prometheus/common v0.35.0 // indirect diff --git a/cli/go.sum b/cli/go.sum index 08fdd6b537ca..b12114236860 100644 --- a/cli/go.sum +++ b/cli/go.sum @@ -340,8 +340,9 @@ github.com/pelletier/go-toml/v2 v2.1.1 h1:LWAJwfNvjQZCFIDKWYQaM62NcYeYViCmWIwmOS github.com/pelletier/go-toml/v2 v2.1.1/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= -github.com/pierrec/lz4/v4 v4.1.15 h1:MO0/ucJhngq7299dKLwIMtgTfbkoSPF6AoMYDd8Q4q0= github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ= +github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= diff --git a/server/common/module_shared.go b/server/common/module_shared.go index 0c3c8ebc72f3..e03569da4fb0 100644 --- a/server/common/module_shared.go +++ b/server/common/module_shared.go @@ -48,11 +48,11 @@ type Config struct { } type IngesterConfig struct { - Exporters ExportersConfig `yaml:"exporters"` + Exporters []ExportersConfig `yaml:"exporters"` } type ExportersConfig struct { - Enabled bool `yaml:"enabled"` + Protocol bool `yaml:"protocol"` } func ExportersEnabled(configPath string) bool { @@ -65,5 +65,7 @@ func ExportersEnabled(configPath string) bool { if err = yaml.Unmarshal(configBytes, &config); err != nil { log.Error("Unmarshal yaml error:", err) } - return config.Ingester.Exporters.Enabled + enabled := len(config.Ingester.Exporters) > 0 + log.Info("exporter enabled: ", enabled) + return enabled } diff --git a/server/go.mod b/server/go.mod index 040f2a8b7289..9639f3c4c08a 100644 --- a/server/go.mod +++ b/server/go.mod @@ -95,6 +95,7 @@ require ( require ( bou.ke/monkey v1.0.2 + github.com/IBM/sarama v1.43.0 github.com/aws/aws-sdk-go-v2/service/eks v1.26.0 github.com/deepflowio/deepflow/server/controller/http/appender v0.0.0-00010101000000-000000000000 github.com/deepflowio/deepflow/server/querier/app/prometheus/router/packet_adapter v0.0.0-00010101000000-000000000000 @@ -103,7 +104,7 @@ require ( github.com/go-redis/redis/v9 v9.0.0-rc.2 github.com/golang/mock v1.6.0 github.com/grafana/pyroscope-go v1.0.4 - github.com/klauspost/compress v1.15.9 + github.com/klauspost/compress v1.17.7 github.com/mitchellh/mapstructure v1.4.3 github.com/orcaman/concurrent-map/v2 v2.0.1 github.com/pyroscope-io/pyroscope v0.37.1 @@ -124,18 +125,27 @@ require ( github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 // indirect github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect github.com/dustin/go-humanize v1.0.0 // indirect + github.com/eapache/go-resiliency v1.6.0 // indirect + github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 // indirect + github.com/eapache/queue v1.1.0 // indirect github.com/edsrzf/mmap-go v1.1.0 // indirect - github.com/fortytw2/leaktest v1.3.0 // indirect github.com/gabriel-vasile/mimetype v1.4.3 // indirect github.com/golang/glog v1.2.0 // indirect github.com/grafana/pyroscope-go/godeltaprof v0.1.4 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1 // indirect github.com/hashicorp/errwrap v1.0.0 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect + github.com/hashicorp/go-uuid v1.0.3 // indirect github.com/ionos-cloud/sdk-go/v6 v6.1.0 // indirect + github.com/jcmturner/aescts/v2 v2.0.0 // indirect + github.com/jcmturner/dnsutils/v2 v2.0.0 // indirect + github.com/jcmturner/gofork v1.7.6 // indirect + github.com/jcmturner/gokrb5/v8 v8.4.4 // indirect + github.com/jcmturner/rpc/v2 v2.0.3 // indirect github.com/klauspost/cpuid/v2 v2.2.7 // indirect github.com/oklog/ulid v1.3.1 // indirect github.com/pyroscope-io/jfr-parser v0.5.2 // indirect + github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect github.com/sirupsen/logrus v1.8.1 // indirect github.com/twitchyliquid64/golang-asm v0.15.1 // indirect github.com/valyala/bytebufferpool v1.0.0 // indirect @@ -210,7 +220,7 @@ require ( github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect github.com/paulmach/orb v0.7.1 // indirect github.com/pelletier/go-toml/v2 v2.1.1 // indirect - github.com/pierrec/lz4/v4 v4.1.15 // indirect + github.com/pierrec/lz4/v4 v4.1.21 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect github.com/prometheus/client_golang v1.12.2 // indirect diff --git a/server/go.sum b/server/go.sum index 57fcb48c1101..36fd0efaecca 100644 --- a/server/go.sum +++ b/server/go.sum @@ -37,6 +37,8 @@ github.com/ClickHouse/clickhouse-go/v2 v2.1.0 h1:X53a5FzRna9TLGGYm1A7T+3kEnrfEYl github.com/ClickHouse/clickhouse-go/v2 v2.1.0/go.mod h1:nOBMOlMUGQJ2eb6PtECHYldbEHmDJFzfIrtaDXMjrb4= github.com/DataDog/zstd v1.4.1 h1:3oxKN3wbHibqx897utPC2LTQU4J+IHWWJO+glkAkpFM= github.com/DataDog/zstd v1.4.1/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= +github.com/IBM/sarama v1.43.0 h1:YFFDn8mMI2QL0wOrG0J2sFoVIAFl7hS9JQi2YZsXtJc= +github.com/IBM/sarama v1.43.0/go.mod h1:zlE6HEbC/SMQ9mhEYaF7nNLYOUyrs0obySKCckWP9BM= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible h1:1G1pk05UrOh0NlF1oeaaix1x8XzrfjIDK47TY0Zehcw= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= github.com/Microsoft/go-winio v0.5.1 h1:aPJp2QD7OOrhO5tQXqQoGSJc+DjDtWTGLOmNyAm6FgY= @@ -190,6 +192,12 @@ github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3 github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dvyukov/go-fuzz v0.0.0-20210103155950-6a8e9d1f2415/go.mod h1:11Gm+ccJnvAhCNLlf5+cS9KjtbaD5I5zaZpFMsTHWTw= +github.com/eapache/go-resiliency v1.6.0 h1:CqGDTLtpwuWKn6Nj3uNUdflaq+/kIPsg0gfNzHton30= +github.com/eapache/go-resiliency v1.6.0/go.mod h1:5yPzW0MIvSe0JDsv0v+DvcjEv2FyD6iZYSs1ZI+iQho= +github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 h1:Oy0F4ALJ04o5Qqpdz8XLIpNA3WM/iSIXqxtqo7UGVws= +github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3/go.mod h1:YvSRo5mw33fLEx1+DlK6L2VV43tJt5Eyel9n9XBcR+0= +github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= +github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= github.com/edsrzf/mmap-go v1.1.0 h1:6EUwBLQ/Mcr1EYLE4Tn1VdW1A4ckqCQWZBw8Hr0kjpQ= github.com/edsrzf/mmap-go v1.1.0/go.mod h1:19H/e8pUPLicwkyNgOykDXkJ9F0MHE+Z52B8EIth78Q= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= @@ -218,7 +226,6 @@ github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSw github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= -github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= @@ -370,6 +377,8 @@ github.com/gopherjs/gopherjs v0.0.0-20190812055157-5d271430af9f/go.mod h1:wJfORR github.com/gorilla/handlers v1.4.2/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= +github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/grafana/pyroscope-go v1.0.4 h1:oyQX0BOkL+iARXzHuCdIF5TQ7/sRSel1YFViMHC7Bm0= @@ -392,6 +401,9 @@ github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+l github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/go-retryablehttp v0.7.1 h1:sUiuQAnLlbvmExtFQs72iFW/HXeUn8Z1aJLQ4LJJbTQ= github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= +github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= +github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= @@ -409,6 +421,18 @@ github.com/ionos-cloud/sdk-go/v6 v6.1.0 h1:0EZz5H+t6W23zHt6dgHYkKavr72/30O9nA97E github.com/ionos-cloud/sdk-go/v6 v6.1.0/go.mod h1:Ox3W0iiEz0GHnfY9e5LmAxwklsxguuNFEUSu0gVRTME= github.com/jarcoal/httpmock v1.3.1 h1:iUx3whfZWVf3jT01hQTO/Eo5sAYtB2/rqaUuOtpInww= github.com/jarcoal/httpmock v1.3.1/go.mod h1:3yb8rc4BI7TCBhFY8ng0gjuLKJNquuDNiPaZjnENuYg= +github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8= +github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs= +github.com/jcmturner/dnsutils/v2 v2.0.0 h1:lltnkeZGL0wILNvrNiVCR6Ro5PGU/SeBvVO/8c/iPbo= +github.com/jcmturner/dnsutils/v2 v2.0.0/go.mod h1:b0TnjGOvI/n42bZa+hmXL+kFJZsFT7G4t3HTlQ184QM= +github.com/jcmturner/gofork v1.7.6 h1:QH0l3hzAU1tfT3rZCnW5zXl+orbkNMMRGJfdJjHVETg= +github.com/jcmturner/gofork v1.7.6/go.mod h1:1622LH6i/EZqLloHfE7IeZ0uEJwMSUyQ/nDd82IeqRo= +github.com/jcmturner/goidentity/v6 v6.0.1 h1:VKnZd2oEIMorCTsFBnJWbExfNN7yZr3EhJAxwOkZg6o= +github.com/jcmturner/goidentity/v6 v6.0.1/go.mod h1:X1YW3bgtvwAXju7V3LCIMpY0Gbxyjn/mY9zx4tFonSg= +github.com/jcmturner/gokrb5/v8 v8.4.4 h1:x1Sv4HaTpepFkXbt2IkL29DXRf8sOfZXo8eRKh687T8= +github.com/jcmturner/gokrb5/v8 v8.4.4/go.mod h1:1btQEpgT6k+unzCwX1KdWMEwPPkkgBtP+F6aCACiMrs= +github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZY= +github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc= github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E= github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= github.com/jinzhu/now v1.1.4/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= @@ -440,8 +464,8 @@ github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7V github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.15.9 h1:wKRjX6JRtDdrE9qwa4b/Cip7ACOshUI4smpCQanqjSY= -github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= +github.com/klauspost/compress v1.17.7 h1:ehO88t2UGzQK66LMdE8tibEd1ErmzZjNEqWkjLAKQQg= +github.com/klauspost/compress v1.17.7/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.2.7 h1:ZWSB3igEs+d0qvnxR/ZBzXVmxkgt8DdzP6m9pfuVLDM= github.com/klauspost/cpuid/v2 v2.2.7/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= @@ -555,8 +579,9 @@ github.com/pelletier/go-toml/v2 v2.1.1/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdU github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/philhofer/fwd v1.1.1/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= -github.com/pierrec/lz4/v4 v4.1.15 h1:MO0/ucJhngq7299dKLwIMtgTfbkoSPF6AoMYDd8Q4q0= github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ= +github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -598,6 +623,8 @@ github.com/pyroscope-io/jfr-parser v0.5.2 h1:gQhK9C/eZBAPHmfCI4aL6XNHVM7O27jY2P0 github.com/pyroscope-io/jfr-parser v0.5.2/go.mod h1:ZMcbJjfDkOwElEK8CvUJbpetztRWRXszCmf5WU0erV8= github.com/pyroscope-io/pyroscope v0.37.1 h1:ruVzV27HnhT9RynJxGYCAdBg2z9iPkgCMHj4J3WhSY4= github.com/pyroscope-io/pyroscope v0.37.1/go.mod h1:RSC/3Ua7fCA7I1R/vLFDuhpoZxfwRyIARKktrNYnVig= +github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= +github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= @@ -737,6 +764,7 @@ golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -770,6 +798,7 @@ golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= @@ -795,6 +824,7 @@ golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.22.0 h1:9sGLhx7iRIHEiX0oAJ3MRZMUCElJgy7Br1nO+AMN3Tc= golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= diff --git a/server/ingester/ckmonitor/monitor.go b/server/ingester/ckmonitor/monitor.go index fe16f7756ca5..266e0bed3950 100644 --- a/server/ingester/ckmonitor/monitor.go +++ b/server/ingester/ckmonitor/monitor.go @@ -144,7 +144,7 @@ func (m *Monitor) isDiskNeedClean(diskInfo *DiskInfo) bool { usage := (diskInfo.usedSpace*100 + diskInfo.totalSpace - 1) / diskInfo.totalSpace if usage > uint64(cleanCfg.UsedPercent) && diskInfo.freeSpace < uint64(cleanCfg.FreeSpace)<<30 { - log.Infof("disk usage is over %d%. disk name: %s, path: %s, total space: %d, free space: %d, usage: %d", + log.Infof("disk usage is over %d. disk name: %s, path: %s, total space: %d, free space: %d, usage: %d", cleanCfg.UsedPercent, diskInfo.name, diskInfo.path, diskInfo.totalSpace, diskInfo.freeSpace, usage) return true } else if cleanCfg.UsedSpace > 0 && diskInfo.usedSpace >= uint64(cleanCfg.UsedSpace)<<30 { diff --git a/server/ingester/event/common/common.go b/server/ingester/event/common/common.go index 71341ce9b40d..cc837d50979c 100644 --- a/server/ingester/event/common/common.go +++ b/server/ingester/event/common/common.go @@ -17,6 +17,7 @@ package common import ( + "github.com/deepflowio/deepflow/server/ingester/exporters/config" logging "github.com/op/go-logging" ) @@ -59,3 +60,12 @@ func (e EventType) TableName() string { return "unknown_event" } } + +func (e EventType) DataSource() uint32 { + switch e { + case PERF_EVENT: + return uint32(config.PERF_EVENT) + default: + return uint32(config.MAX_DATASOURCE_ID) + } +} diff --git a/server/ingester/event/dbwriter/event.go b/server/ingester/event/dbwriter/event.go index 31f8f3b48a2d..0472056fee66 100644 --- a/server/ingester/event/dbwriter/event.go +++ b/server/ingester/event/dbwriter/event.go @@ -17,14 +17,21 @@ package dbwriter import ( + "fmt" "net" + "reflect" "sync/atomic" + "unsafe" basecommon "github.com/deepflowio/deepflow/server/ingester/common" "github.com/deepflowio/deepflow/server/ingester/event/common" + exportercommon "github.com/deepflowio/deepflow/server/ingester/exporters/common" + "github.com/deepflowio/deepflow/server/ingester/exporters/config" + utag "github.com/deepflowio/deepflow/server/ingester/exporters/universal_tag" "github.com/deepflowio/deepflow/server/ingester/flow_tag" "github.com/deepflowio/deepflow/server/libs/ckdb" "github.com/deepflowio/deepflow/server/libs/pool" + "github.com/deepflowio/deepflow/server/libs/utils" ) const ( @@ -44,57 +51,59 @@ const ( ) type EventStore struct { - Time uint32 // s - _id uint64 + pool.ReferenceCount - StartTime int64 - EndTime int64 + Time uint32 `json:"time" category:"tag" sub:"flow_info"` // s + _id uint64 `json:"_id" category:"tag" sub:"flow_info"` + + StartTime int64 `json:"start_time" category:"tag" sub:"flow_info"` // us + EndTime int64 `json:"end_time" category:"tag" sub:"flow_info"` // us Tagged uint8 - SignalSource uint8 // Resource / File IO - EventType string + SignalSource uint8 `json:"signal_source" category:"tag" sub:"capture_info" enumfile:"perf_event_signal_source"` // Resource / File IO + EventType string `json:"event_type" category:"tag" sub:"event_info" enumfile:"perf_event_type"` EventDescription string - ProcessKName string - - GProcessID uint32 - - RegionID uint16 - AZID uint16 - L3EpcID int32 - HostID uint16 - PodID uint32 - PodNodeID uint32 - PodNSID uint16 - PodClusterID uint16 - PodGroupID uint32 - L3DeviceType uint8 - L3DeviceID uint32 - ServiceID uint32 - VTAPID uint16 - SubnetID uint16 - IsIPv4 bool - IP4 uint32 - IP6 net.IP + ProcessKName string `json:"process_kname" category:"tag" sub:"service_info"` // us + + GProcessID uint32 `json:"gprocess_id" category:"tag" sub:"universal_tag"` + + RegionID uint16 `json:"region_id" category:"tag" sub:"universal_tag"` + AZID uint16 `json:"az_id" category:"tag" sub:"universal_tag"` + L3EpcID int32 `json:"l3_epc_id" category:"tag" sub:"universal_tag"` + HostID uint16 `json:"host_id" category:"tag" sub:"universal_tag"` + PodID uint32 `json:"pod_id" category:"tag" sub:"universal_tag"` + PodNodeID uint32 `json:"host_node_id" category:"tag" sub:"universal_tag"` + PodNSID uint16 `json:"pod_ns_id" category:"tag" sub:"universal_tag"` + PodClusterID uint16 `json:"pod_cluster_id" category:"tag" sub:"universal_tag"` + PodGroupID uint32 `json:"pod_group_id" category:"tag" sub:"universal_tag"` + L3DeviceType uint8 `json:"l3_device_type" category:"tag" sub:"universal_tag"` + L3DeviceID uint32 `json:"l3_device_id" category:"tag" sub:"universal_tag"` + ServiceID uint32 `json:"service_id" category:"tag" sub:"universal_tag"` + VTAPID uint16 `json:"agent_id" category:"tag" sub:"universal_tag"` + SubnetID uint16 `json:"subnet_id" category:"tag" sub:"universal_tag"` + IsIPv4 bool `json:"is_ipv4" category:"tag" sub:"universal_tag"` + IP4 uint32 `json:"ip4" category:"tag" sub:"network_layer" to_string:"IPv4String"` + IP6 net.IP `json:"ip6" category:"tag" sub:"network_layer" to_string:"IPv6String"` // Not stored, only determines which database to store in. // When Orgid is 0 or 1, it is stored in database 'event', otherwise stored in '_event'. OrgId uint16 TeamID uint16 - AutoInstanceID uint32 - AutoInstanceType uint8 - AutoServiceID uint32 - AutoServiceType uint8 + AutoInstanceID uint32 `json:"auto_instance_id" category:"tag" sub:"universal_tag"` + AutoInstanceType uint8 `json:"auto_instance_type" category:"tag" sub:"universal_tag" enumfile:"auto_instance_type"` + AutoServiceID uint32 `json:"auto_service_id" category:"tag" sub:"universal_tag"` + AutoServiceType uint8 `json:"auto_service_type" category:"tag" sub:"universal_tag" enumfile:"auto_service_type"` - AppInstance string + AppInstance string `json:"app_instance" category:"tag" sub:"service_info"` - AttributeNames []string - AttributeValues []string + AttributeNames []string `json:"attribute_names" category:"tag" sub:"native_tag"` + AttributeValues []string `json:"attribute_values" category:"tag" sub:"native_tag"` HasMetrics bool - Bytes uint32 - Duration uint64 + Bytes uint32 `json:"bytes" category:"metrics" sub:"throughput"` + Duration uint64 `json:"duration" category:"metrics" sub:"delay"` } func (e *EventStore) WriteBlock(block *ckdb.Block) { @@ -166,6 +175,41 @@ func (e *EventStore) Release() { ReleaseEventStore(e) } +func (e *EventStore) DataSource() uint32 { + if e.HasMetrics { + return uint32(config.PERF_EVENT) + } + return uint32(config.MAX_DATASOURCE_ID) +} + +func (e *EventStore) EncodeTo(protocol config.ExportProtocol, utags *utag.UniversalTagsManager, cfg *config.ExporterCfg) (interface{}, error) { + switch protocol { + case config.PROTOCOL_KAFKA: + tags := e.QueryUniversalTags(utags) + k8sLabels := utags.QueryCustomK8sLabels(e.PodID) + return exportercommon.EncodeToJson(e, int(e.DataSource()), cfg, tags, tags, k8sLabels, k8sLabels), nil + default: + return nil, fmt.Errorf("event unsupport export to %s", protocol) + } +} + +func (e *EventStore) QueryUniversalTags(utags *utag.UniversalTagsManager) *utag.UniversalTags { + return utags.QueryUniversalTags( + e.RegionID, e.AZID, e.HostID, e.PodNSID, e.PodClusterID, e.SubnetID, e.VTAPID, + uint8(e.L3DeviceType), e.AutoServiceType, e.AutoInstanceType, + e.L3DeviceID, e.AutoServiceID, e.AutoInstanceID, e.PodNodeID, e.PodGroupID, e.PodID, uint32(e.L3EpcID), 0, e.ServiceID, + e.IsIPv4, e.IP4, e.IP6, + ) +} + +func (e *EventStore) GetFieldValueByOffsetAndKind(offset uintptr, kind reflect.Kind, fieldName string) interface{} { + return utils.GetValueByOffsetAndKind(uintptr(unsafe.Pointer(e)), offset, kind, fieldName) +} + +func (e *EventStore) TimestampUs() int64 { + return int64(e.EndTime) +} + var EventCounter uint32 func (e *EventStore) SetId(time, analyzerID uint32) { @@ -316,11 +360,13 @@ var eventPool = pool.NewLockFreePool(func() interface{} { }) func AcquireEventStore() *EventStore { - return eventPool.Get().(*EventStore) + e := eventPool.Get().(*EventStore) + e.Reset() + return e } func ReleaseEventStore(e *EventStore) { - if e == nil { + if e == nil || e.SubReferenceCount() { return } attributeNames := e.AttributeNames[:0] diff --git a/server/ingester/event/decoder/decoder.go b/server/ingester/event/decoder/decoder.go index da8452cd053f..21571d41a48c 100644 --- a/server/ingester/event/decoder/decoder.go +++ b/server/ingester/event/decoder/decoder.go @@ -30,6 +30,8 @@ import ( "github.com/deepflowio/deepflow/server/ingester/event/common" "github.com/deepflowio/deepflow/server/ingester/event/config" "github.com/deepflowio/deepflow/server/ingester/event/dbwriter" + "github.com/deepflowio/deepflow/server/ingester/exporters" + exporterscommon "github.com/deepflowio/deepflow/server/ingester/exporters/common" "github.com/deepflowio/deepflow/server/libs/codec" "github.com/deepflowio/deepflow/server/libs/eventapi" flow_metrics "github.com/deepflowio/deepflow/server/libs/flow-metrics" @@ -55,11 +57,13 @@ type Counter struct { } type Decoder struct { + index int eventType common.EventType resourceInfoTable *ResourceInfoTable platformData *grpc.PlatformInfoTable inQueue queue.QueueReader eventWriter *dbwriter.EventWriter + exporters *exporters.Exporters debugEnabled bool config *config.Config @@ -68,10 +72,12 @@ type Decoder struct { } func NewDecoder( + index int, eventType common.EventType, inQueue queue.QueueReader, eventWriter *dbwriter.EventWriter, platformData *grpc.PlatformInfoTable, + exporters *exporters.Exporters, config *config.Config, ) *Decoder { controllers := make([]net.IP, len(config.Base.ControllerIPs)) @@ -92,6 +98,7 @@ func NewDecoder( inQueue: inQueue, debugEnabled: log.IsEnabledFor(logging.DEBUG), eventWriter: eventWriter, + exporters: exporters, config: config, counter: &Counter{}, } @@ -116,6 +123,7 @@ func (d *Decoder) Run() { n := d.inQueue.Gets(buffer) for i := 0; i < n; i++ { if buffer[i] == nil { + d.export(nil) continue } d.counter.InCount++ @@ -247,9 +255,17 @@ func (d *Decoder) WritePerfEvent(vtapId uint16, e *pb.ProcEvent) { s.AppInstance = strconv.Itoa(int(e.Pid)) + d.export(s) d.eventWriter.Write(s) } +func (d *Decoder) export(item exporterscommon.ExportItem) { + if d.exporters == nil { + return + } + d.exporters.Put(d.eventType.DataSource(), d.index, item) +} + func (d *Decoder) handlePerfEvent(vtapId uint16, decoder *codec.SimpleDecoder) { for !decoder.IsEnd() { bytes := decoder.ReadBytes() diff --git a/server/ingester/event/event/event.go b/server/ingester/event/event/event.go index 8c0ec0d5ee48..b617583cffec 100644 --- a/server/ingester/event/event/event.go +++ b/server/ingester/event/event/event.go @@ -29,6 +29,7 @@ import ( "github.com/deepflowio/deepflow/server/ingester/event/config" "github.com/deepflowio/deepflow/server/ingester/event/dbwriter" "github.com/deepflowio/deepflow/server/ingester/event/decoder" + "github.com/deepflowio/deepflow/server/ingester/exporters" "github.com/deepflowio/deepflow/server/ingester/ingesterctl" "github.com/deepflowio/deepflow/server/libs/datatype" "github.com/deepflowio/deepflow/server/libs/grpc" @@ -51,14 +52,14 @@ type Eventor struct { PlatformDatas []*grpc.PlatformInfoTable } -func NewEvent(config *config.Config, resourceEventQueue *queue.OverwriteQueue, recv *receiver.Receiver, platformDataManager *grpc.PlatformDataManager) (*Event, error) { +func NewEvent(config *config.Config, resourceEventQueue *queue.OverwriteQueue, recv *receiver.Receiver, platformDataManager *grpc.PlatformDataManager, exporters *exporters.Exporters) (*Event, error) { manager := dropletqueue.NewManager(ingesterctl.INGESTERCTL_EVENT_QUEUE) resourceEventor, err := NewResouceEventor(resourceEventQueue, config, platformDataManager.GetMasterPlatformInfoTable()) if err != nil { return nil, err } - perfEventor, err := NewEventor(common.PERF_EVENT, config, recv, manager, platformDataManager) + perfEventor, err := NewEventor(common.PERF_EVENT, config, recv, manager, platformDataManager, exporters) if err != nil { return nil, err } @@ -68,7 +69,7 @@ func NewEvent(config *config.Config, resourceEventQueue *queue.OverwriteQueue, r return nil, err } - k8sEventor, err := NewEventor(common.K8S_EVENT, config, recv, manager, platformDataManager) + k8sEventor, err := NewEventor(common.K8S_EVENT, config, recv, manager, platformDataManager, nil) if err != nil { return nil, err } @@ -88,10 +89,12 @@ func NewResouceEventor(eventQueue *queue.OverwriteQueue, config *config.Config, return nil, err } d := decoder.NewDecoder( + 0, common.RESOURCE_EVENT, queue.QueueReader(eventQueue), eventWriter, platformTable, + nil, config, ) return &Eventor{ @@ -116,10 +119,12 @@ func NewAlarmEventor(config *config.Config, recv *receiver.Receiver, manager *dr return nil, err } d := decoder.NewDecoder( + 0, common.ALARM_EVENT, queue.QueueReader(decodeQueues.FixedMultiQueue[0]), eventWriter, platformTable, + nil, config, ) return &Eventor{ @@ -128,7 +133,7 @@ func NewAlarmEventor(config *config.Config, recv *receiver.Receiver, manager *dr }, nil } -func NewEventor(eventType common.EventType, config *config.Config, recv *receiver.Receiver, manager *dropletqueue.Manager, platformDataManager *grpc.PlatformDataManager) (*Eventor, error) { +func NewEventor(eventType common.EventType, config *config.Config, recv *receiver.Receiver, manager *dropletqueue.Manager, platformDataManager *grpc.PlatformDataManager, exporters *exporters.Exporters) (*Eventor, error) { var queueCount, queueSize int var msgType datatype.MessageType @@ -166,10 +171,12 @@ func NewEventor(eventType common.EventType, config *config.Config, recv *receive return nil, err } decoders[i] = decoder.NewDecoder( + i, eventType, queue.QueueReader(decodeQueues.FixedMultiQueue[i]), eventWriter, platformDatas[i], + exporters, config, ) } diff --git a/server/ingester/flow_log/exporters/README.md b/server/ingester/exporters/README.md similarity index 100% rename from server/ingester/flow_log/exporters/README.md rename to server/ingester/exporters/README.md diff --git a/server/ingester/exporters/common/interface.go b/server/ingester/exporters/common/interface.go new file mode 100644 index 000000000000..de8323eb3a9d --- /dev/null +++ b/server/ingester/exporters/common/interface.go @@ -0,0 +1,225 @@ +package common + +import ( + "fmt" + "net" + "reflect" + "strconv" + "strings" + "time" + + "github.com/deepflowio/deepflow/server/ingester/exporters/config" + utag "github.com/deepflowio/deepflow/server/ingester/exporters/universal_tag" + "github.com/deepflowio/deepflow/server/libs/utils" + logging "github.com/op/go-logging" +) + +var log = logging.MustGetLogger("exporters.interface") + +type ExportItem interface { + DataSource() uint32 + GetFieldValueByOffsetAndKind(offset uintptr, kind reflect.Kind, fieldName string) interface{} + EncodeTo(p config.ExportProtocol, utags *utag.UniversalTagsManager, cfg *config.ExporterCfg) (interface{}, error) + TimestampUs() int64 // us + Release() + AddReferenceCount() +} + +type EncodeItem interface { + GetFieldValueByOffsetAndKind(offset uintptr, kind reflect.Kind, fieldName string) interface{} + TimestampUs() int64 // us +} + +var funcMaps = map[string]interface{}{ + "IPv4String": IPv4String, + "IPv6String": IPv6String, + "MacString": MacString, +} + +func IPv4String(ip4 uint32) string { + ip := make(net.IP, 4) + ip[0] = byte(ip4 >> 24) + ip[1] = byte(ip4 >> 16) + ip[2] = byte(ip4 >> 8) + ip[3] = byte(ip4) + return ip.String() +} + +func IPv6String(ip6 net.IP) string { + return ip6.String() +} + +func MacString(mac uint64) string { + return utils.Uint64ToMac(mac).String() +} + +func GetFunc(funcName string) interface{} { + return funcMaps[funcName] +} + +func writeK8sLabels(sb *strings.Builder, keyName, valueName string, k8sLabels utag.Labels) { + if len(k8sLabels) == 0 { + return + } + valuesBuilder := &strings.Builder{} + sb.WriteString(`,"`) + sb.WriteString(keyName) + sb.WriteString(`":[`) + + valuesBuilder.WriteString(`,"`) + valuesBuilder.WriteString(valueName) + valuesBuilder.WriteString(`":[`) + + isFirst := true + for key, value := range k8sLabels { + if !isFirst { + sb.WriteString(`,`) + valuesBuilder.WriteString(`,`) + } + isFirst = false + sb.WriteString(`"`) + sb.WriteString(key) + sb.WriteString(`"`) + + valuesBuilder.WriteString(`"`) + valuesBuilder.WriteString(value) + valuesBuilder.WriteString(`"`) + + } + sb.WriteString(`]"`) + valuesBuilder.WriteString(`]`) + + sb.WriteString(valuesBuilder.String()) +} + +func EncodeToJson(item EncodeItem, dataSourceId int, exporterCfg *config.ExporterCfg, uTags0, uTags1 *utag.UniversalTags, k8sLabels0, k8sLabels1 utag.Labels) string { + var sb = &strings.Builder{} + sb.WriteString("{datasource:\"") + sb.WriteString(config.DataSourceID(dataSourceId).String()) + sb.WriteString(`"`) + + if dataSourceId >= int(config.MAX_DATASOURCE_ID) { + log.Errorf("export datasource wrong: datasourceid %d ", dataSourceId) + return "" + } + + isMapItem := config.DataSourceID(dataSourceId).IsMap() + var isString, isFloat64, isStringSlice, isFloat64Slice bool + var valueStr string + var valueFloat64 float64 + var stringSlice []string + var float64Slice []float64 + for _, structTags := range exporterCfg.ExportFieldStructTags[dataSourceId] { + isString, isFloat64, isStringSlice, isFloat64Slice = false, false, false, false + value := item.GetFieldValueByOffsetAndKind(structTags.Offset, structTags.DataType, structTags.FieldName) + if utils.IsNil(value) { + log.Debugf("%s is nil", structTags.FieldName) + continue + } + + keyStr := structTags.Name + if v, ok := value.(string); ok { + isString = true + valueStr = v + } else if v, ok := value.([]string); ok { + isStringSlice = true + stringSlice = v + } else if v, ok := value.([]float64); ok { + isFloat64Slice = true + float64Slice = v + } else if v, ok := utils.ConvertToFloat64(value); ok { + isFloat64 = true + valueFloat64 = v + } else { + isString = true + valueStr = fmt.Sprintf("%v", value) + } + + if structTags.ToStringFuncName != "" { + ret := structTags.ToStringFunc.Call([]reflect.Value{reflect.ValueOf(value)}) + valueStr = ret[0].String() + isString = true + } else if structTags.UniversalTagMapID > 0 && !exporterCfg.UniversalTagNotConvertToString { + // skip '_id' + if pos := strings.Index(keyStr, "_id"); pos != -1 { + keyStr = (keyStr[:pos]) + keyStr[pos+3:] // 3 is length of '_id' + } + if strings.HasSuffix(structTags.Name, "_1") { + valueStr = uTags1.GetTagValue(structTags.UniversalTagMapID) + } else { + valueStr = uTags0.GetTagValue(structTags.UniversalTagMapID) + } + isString = true + } else if structTags.EnumFile != "" && !exporterCfg.EnumNotConvertToString { + if isString { + valueStr = structTags.EnumStringMap[valueStr] + } else if isFloat64 { + valueStr = structTags.EnumIntMap[int(valueFloat64)] + } + isString = true + } + + // not export empty tags + if exporterCfg.TagOmitempty && + (structTags.CategoryBit&config.TAG) != 0 && + ((isString && valueStr == "") || (isFloat64 && valueFloat64 == 0) || + (isStringSlice && len(stringSlice) == 0)) { + continue + } + + // not export empty metrics + if exporterCfg.MetricsOmitempty && + (structTags.CategoryBit&config.METRICS) != 0 && + ((isString && valueStr == "") || (isFloat64 && valueFloat64 == 0) || + (isFloat64Slice && len(float64Slice) == 0)) { + continue + } + + sb.WriteString(`,"`) + sb.WriteString(keyStr) + sb.WriteString(`":`) + if isString { + sb.WriteString(`"`) + sb.WriteString(valueStr) + sb.WriteString(`"`) + } else if isStringSlice { + sb.WriteString("[") + for i, v := range stringSlice { + if i != 0 { + sb.WriteString(`,`) + } + sb.WriteString(`"`) + sb.WriteString(v) + sb.WriteString(`"`) + } + sb.WriteString("]") + } else if isFloat64Slice { + sb.WriteString("[") + for i, v := range float64Slice { + if i != 0 { + sb.WriteString(`,`) + } + sb.WriteString(strconv.FormatFloat(v, 'f', -1, 64)) + } + sb.WriteString("]") + } else if isFloat64 { + sb.WriteString(strconv.FormatFloat(valueFloat64, 'f', -1, 64)) + } else { + log.Warningf("unreachable") + } + } + + if isMapItem { + writeK8sLabels(sb, "k8s_label_names_0", "k8s_label_values_0", k8sLabels0) + writeK8sLabels(sb, "k8s_label_names_1", "k8s_label_values_1", k8sLabels1) + } else { + writeK8sLabels(sb, "k8s_label_names", "k8s_label_values", k8sLabels0) + } + + sb.WriteString(`,"time_str":"`) + sb.WriteString(time.UnixMicro(item.TimestampUs()).String()) + sb.WriteString(`"`) + + sb.WriteString("}") + return sb.String() +} diff --git a/server/ingester/exporters/config/config.go b/server/ingester/exporters/config/config.go new file mode 100644 index 000000000000..003197eaed74 --- /dev/null +++ b/server/ingester/exporters/config/config.go @@ -0,0 +1,607 @@ +package config + +import ( + "fmt" + "io/ioutil" + "math/rand" + "os" + "reflect" + "regexp" + "strconv" + "strings" + + "github.com/IBM/sarama" + logging "github.com/op/go-logging" + yaml "gopkg.in/yaml.v2" + + "github.com/deepflowio/deepflow/server/ingester/config" + "github.com/deepflowio/deepflow/server/libs/datatype" + flow_metrics "github.com/deepflowio/deepflow/server/libs/flow-metrics" + "github.com/deepflowio/deepflow/server/libs/utils" +) + +var log = logging.MustGetLogger("exporters_config") + +const ( + DefaultExportQueueCount = 4 + DefaultExportQueueSize = 100000 + DefaultExportOtlpBatchSize = 32 + DefaultExportOtherBatchSize = 1024 + SecurityProtocol = "SASL_SSL" +) + +var DefaultExportCategory = []string{"service_info", "tracing_info", "network_layer", "flow_info", "transport_layer", "application_layer", "metrics"} + +type DataSourceID uint32 + +const ( + NETWORK_1M DataSourceID = DataSourceID(flow_metrics.NETWORK_1M) + NETWORK_MAP_1M = DataSourceID(flow_metrics.NETWORK_MAP_1M) + APPLICATION_1M = DataSourceID(flow_metrics.APPLICATION_1M) + APPLICATION_MAP_1M = DataSourceID(flow_metrics.APPLICATION_MAP_1M) + NETWORK_1S = DataSourceID(flow_metrics.NETWORK_1S) + NETWORK_MAP_1S = DataSourceID(flow_metrics.NETWORK_MAP_1S) + APPLICATION_1S = DataSourceID(flow_metrics.APPLICATION_1S) + APPLICATION_MAP_1S = DataSourceID(flow_metrics.APPLICATION_MAP_1S) + + PERF_EVENT DataSourceID = DataSourceID(flow_metrics.METRICS_TABLE_ID_MAX) + 1 + iota + L4_FLOW_LOG + L7_FLOW_LOG + + MAX_DATASOURCE_ID +) + +var dataSourceStrings = []string{ + NETWORK_1M: "flow_metrics.network.1m", + NETWORK_MAP_1M: "flow_metrics.network_map.1m", + APPLICATION_1M: "flow_metrics.application.1m", + APPLICATION_MAP_1M: "flow_metrics.application_map.1m", + NETWORK_1S: "flow_metrics.network.1s", + NETWORK_MAP_1S: "flow_metrics.network_map.1s", + APPLICATION_1S: "flow_metrics.application.1s", + APPLICATION_MAP_1S: "flow_metrics.application_map.1s", + PERF_EVENT: "event.perf_event", + L4_FLOW_LOG: "flow_log.l4_flow_log", + L7_FLOW_LOG: "flow_log.l7_flow_log", + MAX_DATASOURCE_ID: "invalid_datasource", +} + +func FlowLogMessageToDataSourceID(messageType datatype.MessageType) uint32 { + switch messageType { + case datatype.MESSAGE_TYPE_TAGGEDFLOW: + return uint32(L4_FLOW_LOG) + case datatype.MESSAGE_TYPE_PROTOCOLLOG: + return uint32(L7_FLOW_LOG) + } + return uint32(MAX_DATASOURCE_ID) +} + +func ToDataSourceID(str string) (DataSourceID, error) { + for i, v := range dataSourceStrings { + if v == str { + return DataSourceID(i), nil + } + } + return MAX_DATASOURCE_ID, fmt.Errorf("invalid datasource %s", str) +} + +func StringsToDataSourceBits(strs []string) uint32 { + ret := uint32(0) + for _, str := range strs { + t, err := ToDataSourceID(str) + if err != nil { + log.Warningf("unknown export datasource: %s", str) + continue + } + ret |= (1 << uint32(t)) + } + return ret +} + +func (d DataSourceID) String() string { + return dataSourceStrings[d] +} + +func (d DataSourceID) IsMap() bool { + switch d { + case NETWORK_1M, APPLICATION_1M, NETWORK_1S, APPLICATION_1S, PERF_EVENT: + return false + default: + return true + } +} + +// 'n|nm|a|am' used to distinguish different datasources under flow metrics.* +func TagStringToDataSourceBits(s string) uint32 { + ret := uint32(0) + if s == "" { + return 0 + } + dss := strings.Split(s, "|") + for _, ds := range dss { + switch ds { + case "n": + ret |= 1 << uint32(NETWORK_1M) + ret |= 1 << uint32(NETWORK_1S) + case "nm": + ret |= 1 << uint32(NETWORK_MAP_1M) + ret |= 1 << uint32(NETWORK_MAP_1S) + case "a": + ret |= 1 << uint32(APPLICATION_1M) + ret |= 1 << uint32(APPLICATION_1S) + case "am": + ret |= 1 << uint32(APPLICATION_MAP_1M) + ret |= 1 << uint32(APPLICATION_MAP_1S) + } + } + return ret +} + +type OperatorID uint8 + +const ( + EQ OperatorID = iota + NEQ + IN + NOT_IN + WILDCARD_EQ + WILDCARD_NEQ + REGEXP_EQ + REGEXP_NEQ + + INVALID_OPERATOR_ID +) + +var operatorStrings = [INVALID_OPERATOR_ID]string{ + EQ: "=", + NEQ: "!=", + IN: "in", + NOT_IN: "not in", + WILDCARD_EQ: ":", + WILDCARD_NEQ: "!:", + REGEXP_EQ: "~", + REGEXP_NEQ: "!~", +} + +func (o OperatorID) String() string { + return operatorStrings[o] +} + +func operatorStringToID(op string) OperatorID { + for i, Operator := range operatorStrings { + if Operator == op { + return OperatorID(i) + } + } + log.Warningf("invalid operator(%s), support operators %v", op, operatorStrings[:INVALID_OPERATOR_ID]) + return INVALID_OPERATOR_ID +} + +type TagFilter struct { + FieldName string `yaml:"field-name"` + Operator string `yaml:"operator"` + FieldValues []string `yaml:"field-values"` + + FieldFloat64s []float64 + OperatorId OperatorID + RegexpComplied *regexp.Regexp +} + +func (t *TagFilter) Validate() { + t.OperatorId = operatorStringToID(t.Operator) + if t.OperatorId == EQ || t.OperatorId == NEQ || t.OperatorId == IN || t.OperatorId == NOT_IN { + for _, str := range t.FieldValues { + if float64Value, err := strconv.ParseFloat(str, 64); err != nil { + t.FieldFloat64s = []float64{} + } else { + t.FieldFloat64s = append(t.FieldFloat64s, float64Value) + } + } + } else if t.OperatorId == REGEXP_EQ || t.OperatorId == REGEXP_NEQ || t.OperatorId == WILDCARD_EQ || t.OperatorId == WILDCARD_NEQ { + for _, str := range t.FieldValues { + // when wildcard matching, '*' needs to be converted to '.*' + if t.OperatorId == WILDCARD_EQ || t.OperatorId == WILDCARD_NEQ { + str = strings.ReplaceAll(str, "*", ".*") + } + regCompiled, err := regexp.Compile(str) + if err != nil { + continue + } + t.RegexpComplied = regCompiled + break + } + } +} + +func strInSlice(strs []string, str string) bool { + for _, v := range strs { + if str == v { + return true + } + } + return false +} + +func float64InSlice(floats []float64, float float64) bool { + for _, v := range floats { + if float == v { + return true + } + } + return false +} + +func (t *TagFilter) MatchStringValue(value string) bool { + switch t.OperatorId { + case EQ, IN: + return strInSlice(t.FieldValues, value) + case NEQ, NOT_IN: + return !strInSlice(t.FieldValues, value) + case REGEXP_EQ, WILDCARD_EQ: + if t.RegexpComplied != nil { + return t.RegexpComplied.MatchString(value) + } + case REGEXP_NEQ, WILDCARD_NEQ: + if t.RegexpComplied != nil { + return !t.RegexpComplied.MatchString(value) + } + } + return true +} + +func (t *TagFilter) MatchFloatValue(value float64) bool { + switch t.OperatorId { + case EQ, IN: + return float64InSlice(t.FieldFloat64s, value) + case NEQ, NOT_IN: + return !float64InSlice(t.FieldFloat64s, value) + } + return true +} + +func (t *TagFilter) MatchValue(value interface{}) bool { + var float64Value float64 + var isFloat64 bool + strValue, isStr := value.(string) + if !isStr { + float64Value, isFloat64 = utils.ConvertToFloat64(value) + } + + if !isStr && !isFloat64 { + return true + } + + if isStr { + return t.MatchStringValue(strValue) + } else if isFloat64 { + return t.MatchFloatValue(float64Value) + } + return true +} + +type StructTags struct { + DataSourceID uint32 + Name string // tag: 'json' + MapName string // tag: 'map_json' + FieldName string // field name + Offset uintptr + Category string // tag: 'category' + CategoryBit uint64 // gen from tag: 'category' + SubCategoryBit uint64 // gen from tag: 'sub' + ToStringFuncName string // tag: 'to_string' + ToStringFunc reflect.Value + DataType reflect.Kind + EnumFile string // tag: 'enumfile': as l7_protocol, from server/querier/db_descriptions/clickhouse/tag/enum/* + EnumIntMap map[int]string // gen from content of `EnumFile` + EnumStringMap map[string]string // gen from content of `EnumFile` + UniversalTagMapID uint8 // gen from universal tags, region_id,az_id ... + Omitempty bool // tag: 'omitempty', not support yet + TagDataSourceStr string // tag: 'datasource' + TagDataSourceBits uint32 // gen from 'DatasourceStr' + + // the field has tagFilter, if it is not nil, should caculate filter + TagFilters []TagFilter + + // gen by `ExportFields` + IsExportedField bool +} + +// ExporterCfg holds configs of different exporters. +type ExporterCfg struct { + Protocol string `yaml:"protocol"` + ExportProtocol ExportProtocol // gen by `Protocol` + DataSources []string `yaml:"data-sources"` + DataSourceBits uint32 // gen by `DataSources` + Endpoints []string `yaml:"endpoints"` + RandomEndpoints []string // gen by `Endpoints` ` + + QueueCount int `yaml:"queue-count"` + QueueSize int `yaml:"queue-size"` + BatchSize int `yaml:"batch-size"` + FlusTimeout int `yaml:"flush-timeout"` + TagOmitempty bool `yaml:"tag-omitempty"` + MetricsOmitempty bool `yaml:"metrics-omitempty"` + EnumNotConvertToString bool `yaml:"enum-not-convert-to-string"` + UniversalTagNotConvertToString bool `yaml:"universal-tag-not-convert-to-string"` + + TagFilters []TagFilter `yaml:"tag-filters"` + ExportFields []string `yaml:"export-fields"` + ExportFieldCategoryBits uint64 // gen by `ExportFields` + ExportFieldNames []string // gen by `ExportFields` + ExportFieldK8s []string // gen by `ExportFields` + + ExportFieldStructTags [MAX_DATASOURCE_ID][]StructTags // gen by `ExportFields` and init when exporting item first time + TagFieltertStructTags [MAX_DATASOURCE_ID][]StructTags // gen by `TagFilters` and init when exporting item first time + + // private configuration + ExtraHeaders map[string]string `yaml:"extra-headers"` + + // kafka private configuration + Sasl Sasl `yaml:"sasl"` +} + +type Sasl struct { + Enabled bool `yaml:"enabled"` + SecurityProtocol string `yaml:"security-protocol"` // only support 'SASL_SSL' + Mechanism string `yaml:"sasl-mechanism"` // only support 'PLAIN' + Username string `yaml:"username"` + Password string `yaml:"password"` +} + +func (s *Sasl) Validate() error { + if !s.Enabled { + return nil + } + if s.SecurityProtocol != SecurityProtocol { + log.Warningf("'sasl-protocol' only support value %s", SecurityProtocol) + } + if s.Mechanism != sarama.SASLTypePlaintext { + log.Warningf("'sasl-mechanism' only support value %s", sarama.SASLTypePlaintext) + } + return nil +} + +type ExportProtocol uint8 + +const ( + PROTOCOL_OTLP ExportProtocol = iota + PROTOCOL_PROMETHEUS + PROTOCOL_KAFKA + + MAX_PROTOCOL_ID +) + +var protocolToStrings = []string{ + PROTOCOL_OTLP: "opentelemetry", + PROTOCOL_PROMETHEUS: "prometheus", + PROTOCOL_KAFKA: "kafka", + MAX_PROTOCOL_ID: "unknown", +} + +func stringToExportProtocol(str string) ExportProtocol { + for i, v := range protocolToStrings { + if v == str { + return ExportProtocol(i) + } + } + log.Warningf("unsupport export protocol: %s, support protocols %v", str, protocolToStrings[:MAX_PROTOCOL_ID]) + return MAX_PROTOCOL_ID +} + +func (p ExportProtocol) String() string { + return protocolToStrings[p] +} + +func (cfg *ExporterCfg) Validate() error { + l := len(cfg.Endpoints) + cfg.RandomEndpoints = make([]string, 0, l) + for _, v := range rand.Perm(l) { + cfg.RandomEndpoints = append(cfg.RandomEndpoints, cfg.Endpoints[v]) + } + + if cfg.BatchSize == 0 { + if cfg.Protocol == protocolToStrings[PROTOCOL_OTLP] { + cfg.BatchSize = DefaultExportOtlpBatchSize + } else { + cfg.BatchSize = DefaultExportOtherBatchSize + } + } + + if cfg.QueueCount == 0 { + cfg.QueueCount = DefaultExportQueueCount + } + if cfg.QueueSize == 0 { + cfg.QueueSize = DefaultExportQueueSize + } + if len(cfg.ExportFields) == 0 { + cfg.ExportFields = DefaultExportCategory + } + cfg.DataSourceBits = StringsToDataSourceBits(cfg.DataSources) + cfg.ExportFieldCategoryBits = StringsToCategoryBits(cfg.ExportFields) + cfg.ExportFieldNames = cfg.ExportFields + cfg.ExportProtocol = stringToExportProtocol(cfg.Protocol) + cfg.ExportFieldK8s = GetK8sLabelConfigs(cfg.ExportFields) + for i := range cfg.TagFilters { + cfg.TagFilters[i].Validate() + } + cfg.Sasl.Validate() + + return nil +} + +type ExportersConfig struct { + Exporters Config `yaml:"ingester"` +} + +type Config struct { + Base *config.Config + Exporters []ExporterCfg `yaml:"exporters"` +} + +func (c *Config) Validate() error { + for i := range c.Exporters { + if err := c.Exporters[i].Validate(); err != nil { + return err + } + } + return nil +} + +func bitsToString(bits uint64, strMap map[string]uint64) string { + ret := "" + for k, v := range strMap { + if bits&v != 0 { + if len(ret) == 0 { + ret = k + } else { + ret = ret + "," + k + } + } + } + return ret +} + +const ( + UNKNOWN_CATEGORY = 0 + + TAG uint64 = 1 << iota + FLOW_INFO + UNIVERSAL_TAG + CUSTOM_TAG + NATIVE_TAG + NETWORK_LAYER + TUNNEL_INFO + TRANSPORT_LAYER + APPLICATION_LAYER + SERVICE_INFO + TRACING_INFO + CAPTURE_INFO + EVENT_INFO // perf_event only + DATA_LINK_LAYER + K8S_LABEL + + METRICS + L3_THROUGHPUT // network*/l4_flow_log + L4_THROUGHPUT // network*/l4_flow_log + TCP_SLOW // network*/l4_flow_log + TCP_ERROR // network*/l4_flow_log + APPLICATION // network*/l4_flow_log + THROUGHPUT // application*/l7_flow_log + ERROR // application*/l7_flow_log + DELAY // all +) + +var categoryStringMap = map[string]uint64{ + "tag": TAG, // contains the sucategories before METRICS + "flow_info": FLOW_INFO, + "universal_tag": UNIVERSAL_TAG, + "custom_tag": CUSTOM_TAG, + "native_tag": NATIVE_TAG, + "network_layer": NETWORK_LAYER, + "tunnel_info": TUNNEL_INFO, + "transport_layer": TRANSPORT_LAYER, + "application_layer": APPLICATION_LAYER, + "service_info": SERVICE_INFO, + "tracing_info": TRACING_INFO, + "capture_info": CAPTURE_INFO, + "event_info": EVENT_INFO, + "data_link_layer": DATA_LINK_LAYER, + "k8s_label": K8S_LABEL, + + "metrics": METRICS, // contains the following sucategories + "l3_throughput": L3_THROUGHPUT, + "l4_throughput": L4_THROUGHPUT, + "tcp_slow": TCP_SLOW, + "tcp_error": TCP_ERROR, + "application": APPLICATION, + "throughput": THROUGHPUT, + "error": ERROR, + "delay": DELAY, +} + +func StringToCategoryBit(str string) uint64 { + if str == "" { + return UNKNOWN_CATEGORY + } + t, ok := categoryStringMap[str] + if !ok { + log.Warningf("unknown export category: %s", str) + } + return uint64(t) +} + +func StringsToCategoryBits(strs []string) uint64 { + ret := uint64(0) + for _, str := range strs { + if !strings.HasPrefix(str, "@") { + continue + } + // format: 'category.subcategory' + categorys := strings.Split(str[1:], ".") + category := categorys[0] + if len(categorys) > 1 { + category = categorys[1] + } + t, ok := categoryStringMap[category] + if !ok { + log.Warningf("unknown export category: %s", str) + continue + } + ret |= t + } + if ret&TAG != 0 { + ret |= FLOW_INFO | UNIVERSAL_TAG | CUSTOM_TAG | NATIVE_TAG | NETWORK_LAYER | TUNNEL_INFO | TRANSPORT_LAYER | APPLICATION_LAYER | SERVICE_INFO | TRACING_INFO | CAPTURE_INFO | DATA_LINK_LAYER | K8S_LABEL + } + if ret&METRICS != 0 { + ret |= L3_THROUGHPUT | L4_THROUGHPUT | TCP_SLOW | TCP_ERROR | APPLICATION | THROUGHPUT | ERROR | DELAY + } + return ret +} + +func GetK8sLabelConfigs(strs []string) []string { + ret := []string{} + k8sPrefix := "k8s_label." + k8sPrefixLen := len(k8sPrefix) + for _, str := range strs { + if len(str) > k8sPrefixLen && strings.HasPrefix(str, k8sPrefix) { + ret = append(ret, str[k8sPrefixLen:]) + } else if len(str) > k8sPrefixLen+1 && strings.HasPrefix(str, "~"+k8sPrefix) { + ret = append(ret, "~"+str[k8sPrefixLen+1:]) + } else if str == k8sPrefix[:k8sPrefixLen-1] { + ret = append(ret, str) + } + } + return ret +} + +func CategoryBitsToString(bits uint64) string { + return bitsToString(bits, categoryStringMap) +} + +func Load(base *config.Config, path string) *Config { + config := &ExportersConfig{ + Exporters: Config{ + Base: base, + }, + } + if _, err := os.Stat(path); os.IsNotExist(err) { + log.Info("no config file, use defaults") + return &config.Exporters + } + configBytes, err := ioutil.ReadFile(path) + if err != nil { + log.Warning("Read config file error:", err) + config.Exporters.Validate() + return &config.Exporters + } + if err = yaml.Unmarshal(configBytes, &config); err != nil { + log.Error("Unmarshal yaml error:", err) + os.Exit(1) + } + + if err = config.Exporters.Validate(); err != nil { + log.Error(err) + os.Exit(1) + } + return &config.Exporters +} diff --git a/server/ingester/exporters/config/config_test.go b/server/ingester/exporters/config/config_test.go new file mode 100644 index 000000000000..e2f2b6a41f5c --- /dev/null +++ b/server/ingester/exporters/config/config_test.go @@ -0,0 +1,34 @@ +package config + +import ( + "io/ioutil" + "reflect" + "testing" + + yaml "gopkg.in/yaml.v2" +) + +type baseConfig struct { + Config ingesterConfig `yaml:"ingester"` +} + +type ingesterConfig struct { + ExportersCfg []ExporterCfg `yaml:"exporters"` +} + +func TestConfig(t *testing.T) { + ingesterCfg := baseConfig{} + configBytes, _ := ioutil.ReadFile("./config_test.yaml") + err := yaml.Unmarshal(configBytes, &ingesterCfg) + if err != nil { + t.Fatalf("yaml unmarshal failed: %v", err) + } + expect := baseConfig{ + Config: ingesterConfig{ + ExportersCfg: []ExporterCfg{}, + }, + } + if !reflect.DeepEqual(expect, ingesterCfg) { + t.Fatalf("yaml unmarshal not equal, expect: %v, got: %v", expect, ingesterCfg) + } +} diff --git a/server/ingester/exporters/config/config_test.yaml b/server/ingester/exporters/config/config_test.yaml new file mode 100644 index 000000000000..423dd80a7344 --- /dev/null +++ b/server/ingester/exporters/config/config_test.yaml @@ -0,0 +1,61 @@ +ingester: + exporters: + - protocol: opentelemetry + endpoints: [127.0.0.1:4317, 1.1.1.1:4317] # [protocol://]ip:port,随机选择一个可以发送成功的地址 + data-sources: # $db_name.$table_name + - flow_log.l4_flow_log + - flow_log.l7_flow_log + queue-count: 4 + queue-size: 100000 + batch-size: 1024 # 不同协议的 batch-size 不同 + flush-timeout: 10 + tag-filters: + - field-name: signal_source + operator: "=" # 支持的操作符见 PS 正文 + field-values: [] # 仅当操作符是 IN 或 NOT IN 时,才支持填写长度大于 1 的列表 + export-fields: # $field_name 等 + - @flow-info # @$category + - @universal-tag # @$category + - ip_0 # $field_name + - k8s.label # $field_name + - k8s.annotation.app # $field_name.$sub_field_name + - ~k8s.env.(a.*|abc) # $field_name.$sub_field_name_regex + - @merics # @$category + - @metrics.delay # @$category.$sub_category + - rtt # $field_name + # 私有配置 + extra-headers: # type: map[string]string, extra http request headers + key1:value1 + key2:value2 + - protocol: prometheus + endpoints: [http://1.2.3.4:9091] # [protocol://]ip:port + data-sources: + - flow_metrics.network.1s + queue-count: 4 + queue-size: 100000 + batch-size: 1024 + flush-timeout: 10 + tag-filter: "" # 默认为空 + export-fields: [] # 不允许为空 + # 私有配置 + extra-headers: # type: map[string]string, extra http request headers + key1:value1 + key2:value2 + - protocol: kafka + endpoints: [http://1.2.3.4:9091] # [protocol://]ip:port + data-sources: + - flow_metrics.network.1m + - flow_metrics.network.1s + queue-count: 4 + queue-size: 100000 + batch-size: 1024 + flush-timeout: 10 + tag-filter: "" # 默认为空 + export-fields: [] # 不允许为空 + # 私有配置 + sasl: + enabled: false # 默认为 False + security-protocol: SASL_SSL # 目前只支持 SASL_SSL + sasl-mechanism: PLAIN # 目前只支持 PLAIN + username: aaa + password: aaa diff --git a/server/ingester/exporters/enum_translation/enum_translation.go b/server/ingester/exporters/enum_translation/enum_translation.go new file mode 100644 index 000000000000..680983f5a69f --- /dev/null +++ b/server/ingester/exporters/enum_translation/enum_translation.go @@ -0,0 +1,80 @@ +package enum_translation + +import ( + "fmt" + "strconv" + "strings" + + "github.com/deepflowio/deepflow/server/querier/db_descriptions" + logging "github.com/op/go-logging" +) + +var log = logging.MustGetLogger("exporters.translation") + +type EnumTranslation struct { + intMaps map[string]map[int]string + stringMaps map[string]map[string]string +} + +func NewEnumTranslation() *EnumTranslation { + t := &EnumTranslation{ + intMaps: make(map[string]map[int]string), + stringMaps: make(map[string]map[string]string), + } + err := t.Load() + if err != nil { + log.Error(err) + } + return t +} + +func (t *EnumTranslation) GetMaps(file string) (map[int]string, map[string]string) { + return t.intMaps[file], t.stringMaps[file] +} + +func (t *EnumTranslation) Load() error { + files, err := db_descriptions.EnumFiles.ReadDir("clickhouse/tag/enum") + if err != nil { + return fmt.Errorf("error reading directory: %s", err) + } + + for _, file := range files { + filename := file.Name() + if !strings.HasSuffix(filename, ".ch") { + content, err := db_descriptions.EnumFiles.ReadFile("clickhouse/tag/enum/" + filename) + if err != nil { + fmt.Printf("error reading file %s: %v\n", filename, err) + continue + } + stringMap, intMap := parseContent(string(content)) + if strings.HasSuffix(filename, ".en") { + filename = filename[:len(filename)-3] + } + t.intMaps[filename] = intMap + t.stringMaps[filename] = stringMap + } + } + return nil +} + +func parseContent(content string) (map[string]string, map[int]string) { + stringMap := make(map[string]string) + intMap := make(map[int]string) + + lines := strings.Split(content, "\n") + for _, line := range lines { + if !strings.HasPrefix(line, "#") && line != "" { + fields := strings.Split(line, ",") + if len(fields) >= 2 { + key := strings.TrimSpace(fields[0]) + value := strings.TrimSpace(fields[1]) + + if i, err := strconv.Atoi(key); err == nil { + intMap[i] = value + } + stringMap[key] = value + } + } + } + return stringMap, intMap +} diff --git a/server/ingester/exporters/exporters.go b/server/ingester/exporters/exporters.go new file mode 100644 index 000000000000..d8d21e84787d --- /dev/null +++ b/server/ingester/exporters/exporters.go @@ -0,0 +1,357 @@ +/* + * Copyright (c) 2024 Yunshan Networks + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package exporters + +import ( + "reflect" + "strings" + + logging "github.com/op/go-logging" + + "github.com/deepflowio/deepflow/server/ingester/exporters/common" + "github.com/deepflowio/deepflow/server/ingester/exporters/config" + "github.com/deepflowio/deepflow/server/ingester/exporters/enum_translation" + "github.com/deepflowio/deepflow/server/ingester/exporters/kafka_exporter" + "github.com/deepflowio/deepflow/server/ingester/exporters/otlp_exporter" + "github.com/deepflowio/deepflow/server/ingester/exporters/prometheus_exporter" + "github.com/deepflowio/deepflow/server/ingester/exporters/universal_tag" + "github.com/deepflowio/deepflow/server/libs/queue" + "github.com/deepflowio/deepflow/server/libs/utils" +) + +var log = logging.MustGetLogger("exporters") + +const ( + PUT_BATCH_SIZE = 1024 + MAX_EXPORTERS_PER_DATASOURCE = 8 +) + +type Exporter interface { + // Starts an exporter worker + Start() + // Close an exporter worker + Close() + + // Put sends data to the exporter worker. Worker could decide what to do next. e.g.: + // - send it out synchronously. + // - store it in a queue and handle it later. + Put(items ...interface{}) +} + +type ExportersCache []interface{} + +type Exporters struct { + config *config.Config + universalTagsManagerMap map[string]*universal_tag.UniversalTagsManager + translation *enum_translation.EnumTranslation + exporters []Exporter + dataSourceExporters [config.MAX_DATASOURCE_ID][]Exporter + dataSourceExporterCfgs [config.MAX_DATASOURCE_ID][]*config.ExporterCfg + putCaches []ExportersCache // cache for batch put to exporter, has multi decoders call Put(), and put to multi exporters +} + +func NewExporters(cfg *config.Config) *Exporters { + if len(cfg.Exporters) == 0 { + log.Infof("exporters is empty") + return nil + } + log.Infof("init exporters: %+v", cfg.Exporters) + + translation := enum_translation.NewEnumTranslation() + putCaches := make([]ExportersCache, config.MAX_DATASOURCE_ID*queue.MAX_QUEUE_COUNT*MAX_EXPORTERS_PER_DATASOURCE) + exporters := make([]Exporter, 0) + dataSourceExporters := [config.MAX_DATASOURCE_ID][]Exporter{} + dataSourceExporterCfgs := [config.MAX_DATASOURCE_ID][]*config.ExporterCfg{} + var exporter Exporter + var universalTagManager *universal_tag.UniversalTagsManager + uTagManagerMap := make(map[string]*universal_tag.UniversalTagsManager) + for i, exporterCfg := range cfg.Exporters { + // If the ExportFieldK8s are the same, you can use the same universalTagManager + uTagKey := strings.Join(exporterCfg.ExportFieldK8s, "-") + universalTagManager = uTagManagerMap[uTagKey] + if universalTagManager == nil { + universalTagManager = universal_tag.NewUniversalTagsManager(exporterCfg.ExportFieldK8s, cfg.Base) + uTagManagerMap[uTagKey] = universalTagManager + } + switch exporterCfg.ExportProtocol { + case config.PROTOCOL_OTLP: + exporter = otlp_exporter.NewOtlpExporter(i, &cfg.Exporters[i], universalTagManager) + case config.PROTOCOL_PROMETHEUS: + exporter = prometheus_exporter.NewPrometheusExporter(i, &cfg.Exporters[i], universalTagManager) + case config.PROTOCOL_KAFKA: + exporter = kafka_exporter.NewKafkaExporter(i, &cfg.Exporters[i], universalTagManager) + default: + exporter = nil + log.Warningf("unsupport export protocol %s", exporterCfg.Protocol) + } + if exporter == nil { + continue + } + exporters = append(exporters, exporter) + for _, dataSource := range exporterCfg.DataSources { + dataSourceId, err := config.ToDataSourceID(dataSource) + if err != nil { + log.Warning(err) + continue + } + dataSourceExporters[dataSourceId] = append(dataSourceExporters[dataSourceId], exporter) + dataSourceExporterCfgs[dataSourceId] = append(dataSourceExporterCfgs[dataSourceId], &cfg.Exporters[i]) + } + } + + return &Exporters{ + config: cfg, + universalTagsManagerMap: uTagManagerMap, + exporters: exporters, + putCaches: putCaches, + dataSourceExporters: dataSourceExporters, + dataSourceExporterCfgs: dataSourceExporterCfgs, + translation: translation, + } +} + +func (es *Exporters) Start() { + for _, v := range es.universalTagsManagerMap { + v.Start() + } + for _, e := range es.exporters { + e.Start() + } +} + +func (es *Exporters) Close() error { + for _, v := range es.universalTagsManagerMap { + v.Close() + } + for _, e := range es.exporters { + e.Close() + } + return nil +} + +func GetTagFilters(field string, tagFilters []config.TagFilter) []config.TagFilter { + tagFilter := []config.TagFilter{} + for _, filter := range tagFilters { + if filter.FieldName == field { + tagFilter = append(tagFilter, filter) + } + } + return tagFilter +} + +func IsExportField(tag *config.StructTags, exportFieldCategoryBits uint64, exportFieldNames []string) bool { + if tag.Name == "" { + return false + } + // for tags of flow_metrics are shared for app,flow,usage documents + if tag.TagDataSourceBits != 0 && tag.TagDataSourceBits&tag.DataSourceID == 0 { + return false + } + + if tag.CategoryBit&exportFieldCategoryBits != 0 || tag.SubCategoryBit&exportFieldCategoryBits != 0 { + return true + } + + for _, name := range exportFieldNames { + if name == tag.Name { + return true + } + } + + return false +} + +func (es *Exporters) initStructTags(item interface{}, dataSourceId uint32, exporterCfg *config.ExporterCfg) { + if exporterCfg.TagFieltertStructTags[dataSourceId] == nil { + t := reflect.TypeOf(item) + if t.Kind() == reflect.Pointer { + t = t.Elem() + } + if t.Kind() != reflect.Struct { + log.Warningf("item is not struct %v", item) + return + } + num := t.NumField() + + all := make([]config.StructTags, 0, num) + fields := make([]reflect.StructField, 0, num) + structFields := []reflect.StructField{} + for i := 0; i < num; i++ { + field := t.Field(i) + dataType := field.Type.Kind() + if dataType == reflect.Struct { + structFields = append(structFields, field) + } else { + fields = append(fields, field) + } + } + + // add all sub struct/interface + for len(structFields) != 0 { + sfs := structFields + structFields = []reflect.StructField{} + for _, field := range sfs { + fType := field.Type + if fType.Kind() != reflect.Struct { + log.Warningf("ftype is not struct %v", fType) + continue + } + subNum := fType.NumField() + for i := 0; i < subNum; i++ { + subField := fType.Field(i) + dataType := subField.Type.Kind() + // sub field offset should add parent struct field offset + subField.Offset += field.Offset + if dataType == reflect.Struct { + structFields = append(structFields, subField) + } else { + fields = append(fields, subField) + } + } + } + } + + for _, field := range fields { + dataType := field.Type.Kind() + name := field.Tag.Get("json") + mapName := field.Tag.Get("map_json") + category := field.Tag.Get("category") + subCategory := field.Tag.Get("sub") + + categoryBit := config.StringToCategoryBit(category) + subCategoryBit := config.StringToCategoryBit(subCategory) + omitempty := false + if field.Tag.Get("omitempty") != "" { + omitempty = true + } + toStringFuncName := field.Tag.Get("to_string") + toStringFunc := reflect.ValueOf(common.GetFunc(toStringFuncName)) + + dataSourceStr := field.Tag.Get("datasource") + dataSourceBits := config.TagStringToDataSourceBits(dataSourceStr) + + // enum files in "server/querier/db_descriptions/clickhouse/tag/enum/" + enumFile := field.Tag.Get("enumfile") + structTag := config.StructTags{ + DataSourceID: dataSourceId, + Name: name, + MapName: mapName, + FieldName: field.Name, + Category: category + "." + subCategory, + CategoryBit: categoryBit, + SubCategoryBit: subCategoryBit, + Offset: field.Offset, + DataType: dataType, + Omitempty: omitempty, + EnumFile: enumFile, + ToStringFuncName: toStringFuncName, + ToStringFunc: toStringFunc, + UniversalTagMapID: universal_tag.StringToUniversalTagID(name), + TagFilters: GetTagFilters(name, exporterCfg.TagFilters), + TagDataSourceBits: dataSourceBits, + } + if enumFile != "" { + structTag.EnumIntMap, structTag.EnumStringMap = es.translation.GetMaps(enumFile) + } + structTag.IsExportedField = IsExportField(&structTag, exporterCfg.ExportFieldCategoryBits, exporterCfg.ExportFieldNames) + all = append(all, structTag) + } + + tagFieltertStructTags := []config.StructTags{} + exportFieldStructTags := []config.StructTags{} + for _, structTag := range all { + if len(structTag.TagFilters) > 0 { + tagFieltertStructTags = append(tagFieltertStructTags, structTag) + } + if structTag.IsExportedField { + exportFieldStructTags = append(exportFieldStructTags, structTag) + } + } + exporterCfg.TagFieltertStructTags[dataSourceId] = tagFieltertStructTags + exporterCfg.ExportFieldStructTags[dataSourceId] = exportFieldStructTags + + dsid := config.DataSourceID(dataSourceId) + log.Infof("export protocl %s datasource %s, get all structTags: %+v", exporterCfg.Protocol, dsid.String(), all) + log.Infof("export protocl %s datasource %s, get tagfilter structTags: %+v", exporterCfg.Protocol, dsid.String(), tagFieltertStructTags) + log.Infof("export protocl %s datasource %s, get exportfield structTags: %+v", exporterCfg.Protocol, dsid.String(), exportFieldStructTags) + } +} + +func (es *Exporters) IsExportItem(item common.ExportItem, dataSourceId uint32, exporterCfg *config.ExporterCfg) bool { + es.initStructTags(item, dataSourceId, exporterCfg) + for _, structTag := range exporterCfg.TagFieltertStructTags[dataSourceId] { + value := item.GetFieldValueByOffsetAndKind(structTag.Offset, structTag.DataType, structTag.FieldName) + for _, tagFilter := range structTag.TagFilters { + if !tagFilter.MatchValue(value) { + return false + } + } + } + + return true +} + +func (es *Exporters) getPutCache(dataSourceId, decoderId, exporterId int) *ExportersCache { + return &es.putCaches[(dataSourceId*queue.MAX_QUEUE_COUNT+decoderId)*MAX_EXPORTERS_PER_DATASOURCE+exporterId] +} + +// parallel put +func (es *Exporters) Put(dataSourceId uint32, decoderIndex int, item common.ExportItem) { + if utils.IsNil(item) { + es.Flush(int(dataSourceId), decoderIndex) + return + } + + if dataSourceId != item.DataSource() { + log.Warningf("datasourceId %d != itemDatasoure %d", dataSourceId, item.DataSource()) + return + } + if es.dataSourceExporters[dataSourceId] == nil { + return + } + exporters := es.dataSourceExporters[dataSourceId] + if len(exporters) == 0 { + return + } + exporterCfgs := es.dataSourceExporterCfgs[dataSourceId] + for i, e := range exporters { + if !es.IsExportItem(item, dataSourceId, exporterCfgs[i]) { + continue + } + exportersCache := es.getPutCache(int(dataSourceId), decoderIndex, i) + item.AddReferenceCount() + *exportersCache = append(*exportersCache, item) + if len(*exportersCache) >= PUT_BATCH_SIZE { + e.Put(*exportersCache...) + *exportersCache = (*exportersCache)[:0] + } + } +} + +func (es *Exporters) Flush(dataSourceId, decoderIndex int) { + exporters := es.dataSourceExporters[dataSourceId] + if len(exporters) == 0 { + return + } + for i, e := range exporters { + exportersCache := es.getPutCache(int(dataSourceId), decoderIndex, i) + if len(*exportersCache) >= 0 { + e.Put(*exportersCache...) + *exportersCache = (*exportersCache)[:0] + } + } +} diff --git a/server/ingester/exporters/kafka_exporter/exporter.go b/server/ingester/exporters/kafka_exporter/exporter.go new file mode 100644 index 000000000000..b4564b6a119f --- /dev/null +++ b/server/ingester/exporters/kafka_exporter/exporter.go @@ -0,0 +1,241 @@ +/* + * Copyright (c) 2024 Yunshan Networks + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka_exporter + +import ( + "fmt" + "strconv" + "time" + + "github.com/IBM/sarama" + logging "github.com/op/go-logging" + + ingester_common "github.com/deepflowio/deepflow/server/ingester/common" + "github.com/deepflowio/deepflow/server/ingester/exporters/common" + exporters_cfg "github.com/deepflowio/deepflow/server/ingester/exporters/config" + utag "github.com/deepflowio/deepflow/server/ingester/exporters/universal_tag" + "github.com/deepflowio/deepflow/server/ingester/ingesterctl" + "github.com/deepflowio/deepflow/server/libs/debug" + "github.com/deepflowio/deepflow/server/libs/queue" + "github.com/deepflowio/deepflow/server/libs/stats" + "github.com/deepflowio/deepflow/server/libs/utils" +) + +var log = logging.MustGetLogger("kafka_exporter") + +const ( + QUEUE_BATCH_COUNT = 1024 +) + +type KafkaExporter struct { + index int + Addr string + dataQueues queue.FixedMultiQueue + queueCount int + producers []sarama.SyncProducer + universalTagsManager *utag.UniversalTagsManager + config *exporters_cfg.ExporterCfg + counter *Counter + lastCounter Counter + running bool + + utils.Closable +} + +type Counter struct { + RecvCounter int64 `statsd:"recv-count"` + SendCounter int64 `statsd:"send-count"` + SendBatchCounter int64 `statsd:"send-batch-count"` + ExportUsedTimeNs int64 `statsd:"export-used-time-ns"` + DropCounter int64 `statsd:"drop-count"` + DropBatchCounter int64 `statsd:"drop-batch-count"` + DropNoTraceIDCounter int64 `statsd:"drop-no-traceid-count"` +} + +func (e *KafkaExporter) GetCounter() interface{} { + var counter Counter + counter, *e.counter = *e.counter, Counter{} + e.lastCounter = counter + return &counter +} + +func NewKafkaExporter(index int, config *exporters_cfg.ExporterCfg, universalTagsManager *utag.UniversalTagsManager) *KafkaExporter { + dataQueues := queue.NewOverwriteQueues( + fmt.Sprintf("kafka_exporter_%d", index), queue.HashKey(config.QueueCount), config.QueueSize, + queue.OptionFlushIndicator(time.Second), + queue.OptionRelease(func(p interface{}) { p.(common.ExportItem).Release() }), + ingester_common.QUEUE_STATS_MODULE_INGESTER) + + exporter := &KafkaExporter{ + index: index, + dataQueues: dataQueues, + queueCount: config.QueueCount, + universalTagsManager: universalTagsManager, + producers: make([]sarama.SyncProducer, config.QueueCount), + config: config, + counter: &Counter{}, + } + debug.ServerRegisterSimple(ingesterctl.CMD_KAFKA_EXPORTER, exporter) + ingester_common.RegisterCountableForIngester("exporter", exporter, stats.OptionStatTags{ + "type": "kafka", "index": strconv.Itoa(index)}) + log.Infof("kafka exporter %d created", index) + return exporter +} + +func (e *KafkaExporter) Put(items ...interface{}) { + e.counter.RecvCounter++ + e.dataQueues.Put(queue.HashKey(int(e.counter.RecvCounter)%e.queueCount), items...) +} + +func (e *KafkaExporter) Start() { + if e.running { + log.Warningf("kafka exporter %d already running", e.index) + return + } + e.running = true + for i := 0; i < e.queueCount; i++ { + go e.queueProcess(int(i)) + } + log.Infof("kafka exporter %d started %d queue", e.index, e.queueCount) +} + +func (e *KafkaExporter) Close() { + e.Closable.Close() + e.running = false + for i := 0; i < e.queueCount; i++ { + if e.producers[i] != nil { + e.producers[i].Close() + e.producers[i] = nil + } + } + log.Infof("kafka exporter %d stopping", e.index) +} + +func (e *KafkaExporter) newProducer(id int) error { + // create producer config + config := sarama.NewConfig() + config.Producer.RequiredAcks = sarama.WaitForAll + config.Producer.Retry.Max = 3 + config.Producer.Return.Successes = true + config.Producer.Compression = sarama.CompressionSnappy + + config.Net.SASL.Enable = e.config.Sasl.Enabled + config.Net.SASL.Mechanism = sarama.SASLTypePlaintext + config.Net.SASL.User = e.config.Sasl.Username + config.Net.SASL.Password = e.config.Sasl.Password + + producer, err := sarama.NewSyncProducer(e.config.Endpoints, config) + if err != nil { + return err + } + + e.producers[id] = producer + return nil +} + +func (e *KafkaExporter) queueProcess(queueID int) { + items := make([]interface{}, QUEUE_BATCH_COUNT) + batch := []*sarama.ProducerMessage{} + + for e.running { + n := e.dataQueues.Gets(queue.HashKey(queueID), items) + for i, item := range items[:n] { + if item == nil { + e.exportBatch(queueID, batch) + batch = batch[:0] + continue + } + exportItem, ok := item.(common.ExportItem) + if !ok { + e.counter.DropCounter++ + continue + } + + json, err := exportItem.EncodeTo(exporters_cfg.PROTOCOL_KAFKA, e.universalTagsManager, e.config) + if err != nil { + if e.counter.DropCounter == 0 { + log.Warningf("kafka encode failed, err: %s", err) + } + e.counter.DropCounter++ + exportItem.Release() + continue + } + + jsonStr := json.(string) + if i%100 == 0 { + log.Infof("kafka: %s \n %+v", jsonStr, item) + } + batch = append(batch, + &sarama.ProducerMessage{ + Topic: exporters_cfg.DataSourceID(exportItem.DataSource()).String(), + Key: nil, + Value: sarama.ByteEncoder(utils.Slice(jsonStr)), + Timestamp: time.UnixMicro(exportItem.TimestampUs()), + }, + ) + if len(batch) >= e.config.BatchSize { + e.exportBatch(queueID, batch) + batch = batch[:0] + } + exportItem.Release() + } + } +} + +func (e *KafkaExporter) exportBatch(queueID int, batch []*sarama.ProducerMessage) { + defer func() { + if r := recover(); r != nil { + log.Warningf("kafka export error: %s", r) + } + }() + + if len(batch) == 0 { + return + } + + if utils.IsNil(e.producers[queueID]) { + err := e.newProducer(queueID) + if err != nil { + if e.counter.DropCounter == 0 { + log.Warningf("exporter %d queue %d new kafka producer failed. err: %s", e.index, queueID, err) + } + e.counter.DropCounter += int64(len(batch)) + e.counter.DropBatchCounter++ + return + } + } + + producer := e.producers[queueID] + + now := time.Now() + if err := producer.SendMessages(batch); err != nil { + if e.counter.DropCounter == 0 { + log.Warningf("exporter %d send kafka messages failed. err: %s", e.index, err) + } + e.counter.DropCounter += int64(len(batch)) + e.counter.DropBatchCounter++ + } else { + e.counter.SendCounter += int64(len(batch)) + e.counter.SendBatchCounter++ + } + + e.counter.ExportUsedTimeNs += int64(time.Since(now)) +} + +func (e *KafkaExporter) HandleSimpleCommand(op uint16, arg string) string { + return fmt.Sprintf("kafka exporter %d last 10s counter: %+v", e.index, e.lastCounter) +} diff --git a/server/ingester/exporters/otlp_exporter/exporter.go b/server/ingester/exporters/otlp_exporter/exporter.go new file mode 100644 index 000000000000..8f5fc40bc49f --- /dev/null +++ b/server/ingester/exporters/otlp_exporter/exporter.go @@ -0,0 +1,294 @@ +/* + * Copyright (c) 2024 Yunshan Networks + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package otlp_exporter + +import ( + "fmt" + "strconv" + "strings" + "time" + + logging "github.com/op/go-logging" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" + "go.opentelemetry.io/collector/pdata/ptrace/ptraceotlp" + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/metadata" + + ingester_common "github.com/deepflowio/deepflow/server/ingester/common" + "github.com/deepflowio/deepflow/server/ingester/exporters/common" + exporters_cfg "github.com/deepflowio/deepflow/server/ingester/exporters/config" + utag "github.com/deepflowio/deepflow/server/ingester/exporters/universal_tag" + "github.com/deepflowio/deepflow/server/ingester/ingesterctl" + "github.com/deepflowio/deepflow/server/libs/debug" + "github.com/deepflowio/deepflow/server/libs/queue" + "github.com/deepflowio/deepflow/server/libs/stats" + "github.com/deepflowio/deepflow/server/libs/utils" +) + +var log = logging.MustGetLogger("otlp_exporter") + +const ( + QUEUE_BATCH_COUNT = 1024 +) + +type OtlpExporter struct { + index int + Addr string + dataQueues queue.FixedMultiQueue + queueCount int + grpcExporters []ptraceotlp.GRPCClient + grpcConns []*grpc.ClientConn + grpcFailedCounters []int + universalTagsManager *utag.UniversalTagsManager + config *exporters_cfg.ExporterCfg + counter *Counter + lastCounter Counter + running bool + + utils.Closable +} + +type Counter struct { + RecvCounter int64 `statsd:"recv-count"` + SendCounter int64 `statsd:"send-count"` + SendBatchCounter int64 `statsd:"send-batch-count"` + ExportUsedTimeNs int64 `statsd:"export-used-time-ns"` + DropCounter int64 `statsd:"drop-count"` + DropBatchCounter int64 `statsd:"drop-batch-count"` +} + +func (e *OtlpExporter) GetCounter() interface{} { + var counter Counter + counter, *e.counter = *e.counter, Counter{} + e.lastCounter = counter + return &counter +} + +func NewOtlpExporter(index int, config *exporters_cfg.ExporterCfg, universalTagsManager *utag.UniversalTagsManager) *OtlpExporter { + dataQueues := queue.NewOverwriteQueues( + fmt.Sprintf("otlp_exporter_%d", index), queue.HashKey(config.QueueCount), config.QueueSize, + queue.OptionFlushIndicator(time.Second), + queue.OptionRelease(func(p interface{}) { p.(common.ExportItem).Release() }), + ingester_common.QUEUE_STATS_MODULE_INGESTER) + + exporter := &OtlpExporter{ + index: index, + dataQueues: dataQueues, + queueCount: config.QueueCount, + universalTagsManager: universalTagsManager, + grpcConns: make([]*grpc.ClientConn, config.QueueCount), + grpcFailedCounters: make([]int, config.QueueCount), + grpcExporters: make([]ptraceotlp.GRPCClient, config.QueueCount), + config: config, + counter: &Counter{}, + } + debug.ServerRegisterSimple(ingesterctl.CMD_OTLP_EXPORTER, exporter) + ingester_common.RegisterCountableForIngester("exporter", exporter, stats.OptionStatTags{ + "type": "otlp", "index": strconv.Itoa(index)}) + log.Infof("otlp exporter %d created", index) + return exporter +} + +func (e *OtlpExporter) Put(items ...interface{}) { + e.counter.RecvCounter++ + e.dataQueues.Put(queue.HashKey(int(e.counter.RecvCounter)%e.queueCount), items...) +} + +func (e *OtlpExporter) Start() { + if e.running { + log.Warningf("otlp exporter %d already running", e.index) + return + } + e.running = true + for i := 0; i < e.queueCount; i++ { + go e.queueProcess(int(i)) + } + log.Infof("otlp exporter %d started %d queue", e.index, e.queueCount) +} + +func (e *OtlpExporter) Close() { + e.running = false + log.Infof("otlp exporter %d stopping", e.index) +} + +func (e *OtlpExporter) queueProcess(queueID int) { + var batchCount int + traces := ptrace.NewTraces() + items := make([]interface{}, QUEUE_BATCH_COUNT) + + ctx := context.Background() + if len(e.config.ExtraHeaders) > 0 { + ctx = metadata.NewOutgoingContext(ctx, metadata.New(e.config.ExtraHeaders)) + } + for e.running { + n := e.dataQueues.Gets(queue.HashKey(queueID), items) + for _, item := range items[:n] { + if item == nil { + if batchCount > 0 { + if err := e.grpcExport(ctx, queueID, ptraceotlp.NewExportRequestFromTraces(traces)); err == nil { + e.counter.SendCounter += int64(batchCount) + } + batchCount = 0 + log.Debugf(tracesToString(traces)) + traces = ptrace.NewTraces() + } + continue + } + exportItem, ok := item.(common.ExportItem) + if !ok { + e.counter.DropCounter++ + continue + } + + dst, err := exportItem.EncodeTo(exporters_cfg.PROTOCOL_OTLP, e.universalTagsManager, e.config) + if err != nil { + if e.counter.DropCounter == 0 { + log.Warningf("otlp exporter encode faild. err: %s", err) + } + e.counter.DropCounter++ + exportItem.Release() + continue + } + rsSlice := dst.(ptrace.ResourceSpansSlice) + rsSlice.MoveAndAppendTo(traces.ResourceSpans()) + + batchCount++ + if batchCount >= e.config.BatchSize { + if err := e.grpcExport(ctx, queueID, ptraceotlp.NewExportRequestFromTraces(traces)); err == nil { + e.counter.SendCounter += int64(batchCount) + } + batchCount = 0 + log.Debugf(tracesToString(traces)) + traces = ptrace.NewTraces() + } + exportItem.Release() + } + } +} + +func (e *OtlpExporter) grpcExport(ctx context.Context, queueID int, req ptraceotlp.ExportRequest) error { + defer func() { + if r := recover(); r != nil { + log.Warningf("grpc otlp export error: %s", r) + if j, err := req.MarshalJSON(); err == nil { + log.Infof("otlp request: %s", string(j)) + } + } + }() + + now := time.Now() + + if e.grpcExporters[queueID] == nil { + if err := e.newGrpcExporter(queueID); err != nil { + if e.counter.DropCounter == 0 { + log.Warningf("new grpc otlp exporter failed. err: %s", err) + } + e.counter.DropCounter++ + return err + } + } + _, err := e.grpcExporters[queueID].Export(ctx, req) + if err != nil { + if e.counter.DropCounter == 0 { + log.Warningf("otlp exporter %d send grpc traces failed. faildCounter=%d, err: %s", e.index, e.grpcFailedCounters[queueID], err) + } + e.counter.DropCounter++ + e.grpcExporters[queueID] = nil + return err + } else { + e.counter.SendBatchCounter++ + } + e.counter.ExportUsedTimeNs += int64(time.Since(now)) + return nil +} + +func (e *OtlpExporter) getConn(queueID int) (*grpc.ClientConn, error) { + addrIndex := e.grpcFailedCounters[queueID] % len(e.config.Endpoints) + var options = []grpc.DialOption{grpc.WithInsecure(), grpc.WithTimeout(time.Minute)} + conn, err := grpc.Dial(e.config.Endpoints[addrIndex], options...) + if err != nil { + // next time, change to next endpoint + e.grpcFailedCounters[queueID]++ + return nil, fmt.Errorf("grpc dial %s failed, err: %s", e.config.Endpoints[addrIndex], err) + } + // next time, change to next endpoint + e.grpcFailedCounters[queueID]++ + log.Debugf("new grpc otlp exporter: %s", e.config.Endpoints[addrIndex]) + return conn, nil +} + +func (e *OtlpExporter) newGrpcExporter(queueID int) error { + if e.grpcConns[queueID] != nil { + e.grpcConns[queueID].Close() + e.grpcConns[queueID] = nil + } + + conn, err := e.getConn(queueID) + if err != nil { + return err + } + + e.grpcConns[queueID] = conn + e.grpcExporters[queueID] = ptraceotlp.NewGRPCClient(conn) + return nil +} + +func (e *OtlpExporter) HandleSimpleCommand(op uint16, arg string) string { + return fmt.Sprintf("otlp exporter %d last 10s counter: %+v", e.index, e.lastCounter) +} + +func tracesToString(traces ptrace.Traces) string { + sb := strings.Builder{} + for i := 0; i < traces.ResourceSpans().Len(); i++ { + resourceSpans := traces.ResourceSpans().At(i) + for j := 0; j < resourceSpans.ScopeSpans().Len(); j++ { + scopeSpans := resourceSpans.ScopeSpans().At(j) + for k := 0; k < scopeSpans.Spans().Len(); k++ { + span := scopeSpans.Spans().At(k) + sb.WriteString(fmt.Sprintf("Span Name: %s, ", span.Name())) + sb.WriteString(fmt.Sprintf("Trace ID: %s, ", traceIDToHex(span.TraceID()))) + sb.WriteString(fmt.Sprintf("Span ID: %s, ", spanIDToHex(span.SpanID()))) + sb.WriteString(fmt.Sprintf("Start Timestamp: %d, ", span.StartTimestamp())) + sb.WriteString(fmt.Sprintf("End Timestamp: %d, ", span.EndTimestamp())) + sb.WriteString(fmt.Sprintln("Attributes:")) + span.Attributes().Range(func(k string, v pcommon.Value) bool { + sb.WriteString(fmt.Sprintf(" %s: %v", k, v)) + return true + }) + } + } + } + return sb.String() +} + +func traceIDToHex(id [16]byte) string { + var buf [16]byte + for i := 0; i < 16; i++ { + buf[i] = id[i] + } + return fmt.Sprintf("%x", buf) +} + +func spanIDToHex(id [8]byte) string { + var buf [8]byte + for i := 0; i < 8; i++ { + buf[i] = id[i] + } + return fmt.Sprintf("%x", buf) +} diff --git a/server/ingester/exporters/prometheus_exporter/exporter.go b/server/ingester/exporters/prometheus_exporter/exporter.go new file mode 100644 index 000000000000..d5c94c53b2dc --- /dev/null +++ b/server/ingester/exporters/prometheus_exporter/exporter.go @@ -0,0 +1,267 @@ +/* + * Copyright (c) 2024 Yunshan Networks + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package prometheus_exporter + +import ( + "bytes" + "fmt" + "io" + "net/http" + "strconv" + "time" + + "github.com/gogo/protobuf/proto" + "github.com/golang/snappy" + logging "github.com/op/go-logging" + "github.com/prometheus/prometheus/prompb" + "golang.org/x/net/context" + + ingester_common "github.com/deepflowio/deepflow/server/ingester/common" + "github.com/deepflowio/deepflow/server/ingester/exporters/common" + exporters_cfg "github.com/deepflowio/deepflow/server/ingester/exporters/config" + utag "github.com/deepflowio/deepflow/server/ingester/exporters/universal_tag" + "github.com/deepflowio/deepflow/server/ingester/ingesterctl" + "github.com/deepflowio/deepflow/server/libs/debug" + "github.com/deepflowio/deepflow/server/libs/pool" + "github.com/deepflowio/deepflow/server/libs/queue" + "github.com/deepflowio/deepflow/server/libs/stats" + "github.com/deepflowio/deepflow/server/libs/utils" +) + +var log = logging.MustGetLogger("prometheus_exporter") + +const ( + QUEUE_BATCH_COUNT = 1024 +) + +type PrometheusExporter struct { + ctx context.Context + cancel context.CancelFunc + + index int + Addr string + dataQueues queue.FixedMultiQueue + queueCount int + requestFailedCounters []int + + universalTagsManager *utag.UniversalTagsManager + config *exporters_cfg.ExporterCfg + counter *Counter + lastCounter Counter + running bool + + utils.Closable +} + +type Counter struct { + RecvCounter int64 `statsd:"recv-count"` + SendCounter int64 `statsd:"send-count"` + SendBatchCounter int64 `statsd:"send-batch-count"` + DropCounter int64 `statsd:"drop-count"` + DropBatchCounter int64 `statsd:"drop-batch-count"` + ExportUsedTimeNs int64 `statsd:"export-used-time-ns"` +} + +func (e *PrometheusExporter) GetCounter() interface{} { + var counter Counter + counter, *e.counter = *e.counter, Counter{} + e.lastCounter = counter + return &counter +} + +func NewPrometheusExporter(index int, config *exporters_cfg.ExporterCfg, universalTagsManager *utag.UniversalTagsManager) *PrometheusExporter { + ctx, cancel := context.WithCancel(context.Background()) + dataQueues := queue.NewOverwriteQueues( + fmt.Sprintf("prometheus_exporter_%d", index), queue.HashKey(config.QueueCount), config.QueueSize, + queue.OptionFlushIndicator(time.Second), + queue.OptionRelease(func(p interface{}) { p.(common.ExportItem).Release() }), + ingester_common.QUEUE_STATS_MODULE_INGESTER) + + exporter := &PrometheusExporter{ + index: index, + dataQueues: dataQueues, + queueCount: config.QueueCount, + requestFailedCounters: make([]int, config.QueueCount), + universalTagsManager: universalTagsManager, + config: config, + counter: &Counter{}, + ctx: ctx, + cancel: cancel, + } + debug.ServerRegisterSimple(ingesterctl.CMD_PROMETHEUS_EXPORTER, exporter) + ingester_common.RegisterCountableForIngester("exporter", exporter, stats.OptionStatTags{ + "type": "promethues", "index": strconv.Itoa(index)}) + log.Infof("promethues exporter %d created", index) + return exporter +} + +func (e *PrometheusExporter) Put(items ...interface{}) { + e.counter.RecvCounter++ + e.dataQueues.Put(queue.HashKey(int(e.counter.RecvCounter)%e.queueCount), items...) +} + +func (e *PrometheusExporter) Start() { + if e.running { + log.Warningf("promethues exporter %d already running", e.index) + return + } + e.running = true + for i := 0; i < e.queueCount; i++ { + go e.queueProcess(int(i)) + } + log.Infof("promethues exporter %d started %d queue", e.index, e.queueCount) +} + +func (e *PrometheusExporter) Close() { + e.running = false + e.Close() + e.cancel() + log.Infof("promethues exporter %d stopping", e.index) +} + +func (e *PrometheusExporter) queueProcess(queueID int) { + items := make([]interface{}, QUEUE_BATCH_COUNT) + batchs := make([]prompb.TimeSeries, 0, e.config.BatchSize) + + doReq := func() { + batchCount := len(batchs) + if batchCount == 0 { + return + } + now := time.Now() + if err := e.sendRequest(queueID, batchs); err != nil { + if e.counter.DropCounter == 0 { + log.Warningf("failed to send promrw request,requestFaildCounter=%d, err: %v", e.requestFailedCounters[queueID], err) + } + e.counter.DropCounter += int64(batchCount) + e.counter.DropBatchCounter++ + } else { + e.counter.SendCounter += int64(batchCount) + e.counter.SendBatchCounter++ + } + e.counter.ExportUsedTimeNs += int64(time.Since(now)) + batchs = batchs[:0] + } + + for e.running { + n := e.dataQueues.Gets(queue.HashKey(queueID), items) + for i, item := range items[:n] { + if item == nil { + doReq() + continue + } + exportItem, ok := item.(common.ExportItem) + if !ok { + e.counter.DropCounter++ + continue + } + + ts, err := exportItem.EncodeTo(exporters_cfg.PROTOCOL_PROMETHEUS, e.universalTagsManager, e.config) + if err != nil { + if e.counter.DropCounter == 0 { + log.Warningf("failed to encode promrw request, err: %v", err) + } + e.counter.DropCounter++ + exportItem.Release() + continue + } + timeSeries := ts.([]prompb.TimeSeries) + if i%100 == 0 { + log.Infof("promethueus timeSeries %+v", timeSeries) + } + batchs = append(batchs, timeSeries...) + batchCount := len(batchs) + if batchCount >= e.config.BatchSize { + doReq() + } + exportItem.Release() + } + } +} + +func (e *PrometheusExporter) HandleSimpleCommand(op uint16, arg string) string { + return fmt.Sprintf("promethues exporter %d last 10s counter: %+v", e.index, e.lastCounter) +} + +func (e *PrometheusExporter) getEndpont(queueID int) string { + l := len(e.config.RandomEndpoints) + return e.config.RandomEndpoints[e.requestFailedCounters[queueID]%l] +} + +func (e *PrometheusExporter) sendRequest(queueID int, batchs []prompb.TimeSeries) error { + wr := &prompb.WriteRequest{Timeseries: batchs} + data, err := proto.Marshal(wr) + if err != nil { + return err + } + buf := make([]byte, len(data), cap(data)) + compressedData := snappy.Encode(buf, data) + + endpoint := e.getEndpont(queueID) + req, err := http.NewRequestWithContext(e.ctx, "POST", endpoint, bytes.NewReader(compressedData)) + if err != nil { + e.requestFailedCounters[queueID]++ + return err + } + + // Add necessary headers specified by: + // https://cortexmetrics.io/docs/apis/#remote-api + req.Header.Add("Content-Encoding", "snappy") + req.Header.Set("Content-Type", "application/x-protobuf") + req.Header.Set("X-Prometheus-Remote-Write-Version", "0.1.0") + + // inject extra headers + for k, v := range e.config.ExtraHeaders { + req.Header.Set(k, v) + } + + resp, err := http.DefaultClient.Do(req) + if err != nil { + e.requestFailedCounters[queueID]++ + return err + } + defer resp.Body.Close() + + // 5xx errors are recoverable and the writer should retry? + // Reference for different behavior according to status code: + // https://github.com/prometheus/prometheus/pull/2552/files#diff-ae8db9d16d8057358e49d694522e7186 + body, err := io.ReadAll(io.LimitReader(resp.Body, 256)) + if resp.StatusCode >= 500 && resp.StatusCode < 600 { + e.requestFailedCounters[queueID]++ + return fmt.Errorf("remote write returned HTTP status %v; err = %w: %s", resp.Status, err, body) + } + + return nil +} + +var prompbTimeSeriesPool = pool.NewLockFreePool(func() interface{} { + return &prompb.TimeSeries{ + Samples: make([]prompb.Sample, 1), + } +}) + +func AcquirePrompbTimeSeries() *prompb.TimeSeries { + return prompbTimeSeriesPool.Get().(*prompb.TimeSeries) +} + +func ReleasePrompbTimeSeries(t *prompb.TimeSeries) { + if t == nil { + return + } + t.Labels = t.Labels[:0] + prompbTimeSeriesPool.Put(t) +} diff --git a/server/ingester/flow_log/exporters/universal_tag/universal_tag.go b/server/ingester/exporters/universal_tag/universal_tag.go similarity index 57% rename from server/ingester/flow_log/exporters/universal_tag/universal_tag.go rename to server/ingester/exporters/universal_tag/universal_tag.go index 09a871ce3ea5..2868b9c17ab0 100644 --- a/server/ingester/flow_log/exporters/universal_tag/universal_tag.go +++ b/server/ingester/exporters/universal_tag/universal_tag.go @@ -27,7 +27,6 @@ import ( "github.com/deepflowio/deepflow/message/trident" "github.com/deepflowio/deepflow/server/ingester/config" - "github.com/deepflowio/deepflow/server/ingester/flow_log/log_data" "github.com/deepflowio/deepflow/server/ingester/ingesterctl" "github.com/deepflowio/deepflow/server/libs/debug" "github.com/deepflowio/deepflow/server/libs/grpc" @@ -36,38 +35,97 @@ import ( var log = logging.MustGetLogger("universal_tag") -type UniversalTags struct { - Region string - AZ string - Host string - L3DeviceType string - L3Device string - PodNode string - PodNS string - PodGroup string - Pod string - PodCluster string - L3Epc string - Subnet string - Service string - GProcess string - Vtap string - - CHost string - Router string - DhcpGW string - PodService string - Redis string - RDS string - LB string - NatGW string - - TapPortName string - - AutoInstanceType string - AutoInstance string - AutoServiceType string - AutoService string +const ( + Unknown = iota + Region + AZ + Host + L3DeviceType + L3Device + PodNode + PodNS + PodGroup + Pod + PodCluster + L3Epc + Subnet + Service + GProcess + Vtap + + CHost + Router + DhcpGW + PodService + Redis + RDS + LB + NatGW + + // TapPortName string + + AutoInstanceType + AutoInstance + AutoServiceType + AutoService + + MAX_TAG_ID +) + +var idStrings = []string{ + Unknown: "unknown", + Region: "region", + AZ: "az", + Host: "host", + L3DeviceType: "l3_device_type", + L3Device: "l3_device", + PodNode: "pod_node", + PodNS: "pod_ns", + PodGroup: "pod_group", + Pod: "pod", + PodCluster: "pod_cluster", + L3Epc: "l3_epc", + Subnet: "subnet", + Service: "service", + GProcess: "gprocess", + Vtap: "agent", + + CHost: "chost", + Router: "router", + DhcpGW: "dhcpgw", + PodService: "pod_service", + Redis: "redis", + RDS: "rds", + LB: "lb", + NatGW: "natgw", + + // TapPortName string + + AutoInstanceType: "auto_instance_type", + AutoInstance: "auto_instance", + AutoServiceType: "auto_service_type", + AutoService: "auto_service", +} + +type UniversalTags [MAX_TAG_ID]string + +func (u UniversalTags) GetTagValue(id uint8) string { + return u[id] +} + +func StringToUniversalTagID(str string) uint8 { + for i, name := range idStrings { + if name == str { + return uint8(i) + } + l := len(str) + if ((strings.HasSuffix(str, "_id_0") || strings.HasSuffix(str, "_id_1")) && str[:l-5] == name) || + ((strings.HasSuffix(str, "_0") || strings.HasSuffix(str, "_1")) && str[:l-2] == name) || + (strings.HasSuffix(str, "_id") && str[:l-3] == name) { + return uint8(i) + } + } + return Unknown } type DeviceType uint8 @@ -133,77 +191,113 @@ type UniversalTagMaps struct { vtapMap map[uint16]string } -func (u *UniversalTagsManager) QueryUniversalTags(l7FlowLog *log_data.L7FlowLog) (*UniversalTags, *UniversalTags) { - tagMaps := u.universalTagMaps - tags0, tags1 := &UniversalTags{ - Region: tagMaps.regionMap[l7FlowLog.RegionID0], - AZ: tagMaps.azMap[l7FlowLog.AZID0], - Host: tagMaps.deviceMap[uint64(TYPE_HOST)<<32|uint64(l7FlowLog.HostID0)], - L3DeviceType: DeviceType(l7FlowLog.L3DeviceType0).String(), - L3Device: tagMaps.deviceMap[uint64(l7FlowLog.L3DeviceType0)<<32|uint64(l7FlowLog.L3DeviceID0)], - PodNode: tagMaps.podNodeMap[l7FlowLog.PodNodeID0], - PodNS: tagMaps.podNsMap[l7FlowLog.PodNSID0], - PodGroup: tagMaps.podGroupMap[l7FlowLog.PodGroupID0], - Pod: tagMaps.podMap[l7FlowLog.PodID0], - PodCluster: tagMaps.podClusterMap[l7FlowLog.PodClusterID0], - L3Epc: tagMaps.l3EpcMap[uint32(l7FlowLog.L3EpcID0)], - Subnet: tagMaps.subnetMap[l7FlowLog.SubnetID0], - Service: tagMaps.deviceMap[uint64(TYPE_SERVICE)<<32|uint64(l7FlowLog.ServiceID0)], - GProcess: tagMaps.gprocessMap[l7FlowLog.GPID0], - Vtap: tagMaps.vtapMap[l7FlowLog.VtapID], - }, &UniversalTags{ - Region: tagMaps.regionMap[l7FlowLog.RegionID1], - AZ: tagMaps.azMap[l7FlowLog.AZID1], - Host: tagMaps.deviceMap[uint64(TYPE_HOST)<<32|uint64(l7FlowLog.HostID1)], - L3DeviceType: DeviceType(l7FlowLog.L3DeviceType1).String(), - L3Device: tagMaps.deviceMap[uint64(l7FlowLog.L3DeviceType1)<<32|uint64(l7FlowLog.L3DeviceID1)], - PodNode: tagMaps.podNodeMap[l7FlowLog.PodNodeID1], - PodNS: tagMaps.podNsMap[l7FlowLog.PodNSID1], - PodGroup: tagMaps.podGroupMap[l7FlowLog.PodGroupID1], - Pod: tagMaps.podMap[l7FlowLog.PodID1], - PodCluster: tagMaps.podClusterMap[l7FlowLog.PodClusterID1], - L3Epc: tagMaps.l3EpcMap[uint32(l7FlowLog.L3EpcID1)], - Subnet: tagMaps.subnetMap[l7FlowLog.SubnetID1], - Service: tagMaps.deviceMap[uint64(TYPE_SERVICE)<<32|uint64(l7FlowLog.ServiceID1)], - GProcess: tagMaps.gprocessMap[l7FlowLog.GPID1], - Vtap: tagMaps.vtapMap[l7FlowLog.VtapID], - } +func (u *UniversalTagsManager) QueryRegion(regionID uint16) string { + return u.universalTagMaps.regionMap[regionID] +} + +func (u *UniversalTagsManager) QueryAZ(azID uint16) string { + return u.universalTagMaps.azMap[azID] +} + +func (u *UniversalTagsManager) QueryHost(hostID uint16) string { + return u.universalTagMaps.deviceMap[uint64(TYPE_HOST)<<32|uint64(hostID)] +} - l3Device0 := tagMaps.deviceMap[uint64(l7FlowLog.L3DeviceType0)<<32|uint64(l7FlowLog.L3DeviceID0)] - fillDevice(tags0, DeviceType(l7FlowLog.L3DeviceType0), l3Device0) +func (u *UniversalTagsManager) QueryL3Device(l3DeviceType uint8, l3DeviceID uint32) (string, string) { + return DeviceType(l3DeviceType).String(), u.universalTagMaps.deviceMap[uint64(l3DeviceType)<<32|uint64(l3DeviceID)] +} + +func (u *UniversalTagsManager) QueryPodNode(podNodeID uint32) string { + return u.universalTagMaps.podNodeMap[podNodeID] +} - l3Device1 := tagMaps.deviceMap[uint64(l7FlowLog.L3DeviceType1)<<32|uint64(l7FlowLog.L3DeviceID1)] - fillDevice(tags1, DeviceType(l7FlowLog.L3DeviceType1), l3Device1) +func (u *UniversalTagsManager) QueryPodNs(podNsID uint16) string { + return u.universalTagMaps.podNsMap[podNsID] +} - tags0.AutoServiceType = DeviceType(l7FlowLog.AutoServiceType0).String() - tags0.AutoService = u.getAuto(DeviceType(l7FlowLog.AutoServiceType0), l7FlowLog.AutoServiceID0, l7FlowLog.IsIPv4, l7FlowLog.IP40, l7FlowLog.IP60) - tags0.AutoInstanceType = DeviceType(l7FlowLog.AutoInstanceType0).String() - tags0.AutoInstance = u.getAuto(DeviceType(l7FlowLog.AutoInstanceType0), l7FlowLog.AutoInstanceID0, l7FlowLog.IsIPv4, l7FlowLog.IP40, l7FlowLog.IP60) +func (u *UniversalTagsManager) QueryPodGroup(podGroupID uint32) string { + return u.universalTagMaps.podGroupMap[podGroupID] +} - tags1.AutoServiceType = DeviceType(l7FlowLog.AutoServiceType1).String() - tags1.AutoService = u.getAuto(DeviceType(l7FlowLog.AutoServiceType1), l7FlowLog.AutoServiceID1, l7FlowLog.IsIPv4, l7FlowLog.IP41, l7FlowLog.IP61) - tags1.AutoInstanceType = DeviceType(l7FlowLog.AutoInstanceType1).String() - tags1.AutoInstance = u.getAuto(DeviceType(l7FlowLog.AutoInstanceType1), l7FlowLog.AutoInstanceID1, l7FlowLog.IsIPv4, l7FlowLog.IP41, l7FlowLog.IP61) +func (u *UniversalTagsManager) QueryPod(podID uint32) string { + return u.universalTagMaps.podMap[podID] +} - return tags0, tags1 +func (u *UniversalTagsManager) QueryPodCluster(podClusterID uint16) string { + return u.universalTagMaps.podClusterMap[podClusterID] +} + +func (u *UniversalTagsManager) QueryEpc(l3EpcID int32) string { + return u.universalTagMaps.l3EpcMap[uint32(l3EpcID)] +} + +func (u *UniversalTagsManager) QuerySubnet(subnetID uint16) string { + return u.universalTagMaps.subnetMap[subnetID] +} + +func (u *UniversalTagsManager) QueryGProcess(gprocessID uint32) string { + return u.universalTagMaps.gprocessMap[gprocessID] +} + +func (u *UniversalTagsManager) QueryVtap(agentID uint16) string { + return u.universalTagMaps.vtapMap[agentID] +} + +func (u *UniversalTagsManager) QueryAuto(autoType uint8, autoID uint32, isIPv4 bool, ip4 uint32, ip6 net.IP) (string, string) { + return DeviceType(autoType).String(), u.getAuto(DeviceType(autoType), autoID, isIPv4, ip4, ip6) +} + +func (u *UniversalTagsManager) QueryUniversalTags( + regionID, azID, hostID, podNsID, podClusterID, subnetID, agentID uint16, + l3DeviceType, autoServiceType, autoInstanceType uint8, + l3DeviceID, autoServiceID, autoInstanceID, podNodeID, podGroupID, podID, l3EpcID, gprocessID, serviceID uint32, + isIPv4 bool, ip4 uint32, ip6 net.IP, +) *UniversalTags { + tagMaps := u.universalTagMaps + tags := &UniversalTags{ + Region: tagMaps.regionMap[regionID], + AZ: tagMaps.azMap[azID], + Host: tagMaps.deviceMap[uint64(TYPE_HOST)<<32|uint64(hostID)], + L3DeviceType: DeviceType(l3DeviceType).String(), + L3Device: tagMaps.deviceMap[uint64(l3DeviceType)<<32|uint64(l3DeviceID)], + PodNode: tagMaps.podNodeMap[podNodeID], + PodNS: tagMaps.podNsMap[podNsID], + PodGroup: tagMaps.podGroupMap[podGroupID], + Pod: tagMaps.podMap[podID], + PodCluster: tagMaps.podClusterMap[podClusterID], + L3Epc: tagMaps.l3EpcMap[uint32(l3EpcID)], + Subnet: tagMaps.subnetMap[subnetID], + Service: tagMaps.deviceMap[uint64(TYPE_SERVICE)<<32|uint64(serviceID)], + GProcess: tagMaps.gprocessMap[gprocessID], + Vtap: tagMaps.vtapMap[agentID], + } + + fillDevice(tags, DeviceType(l3DeviceType), tags[L3Device]) + + tags[AutoServiceType] = DeviceType(autoServiceType).String() + tags[AutoService] = u.getAuto(DeviceType(autoServiceType), autoServiceID, isIPv4, ip4, ip6) + tags[AutoInstanceType] = DeviceType(autoInstanceType).String() + tags[AutoInstance] = u.getAuto(DeviceType(autoInstanceType), autoInstanceID, isIPv4, ip4, ip6) + + return tags } func fillDevice(tags *UniversalTags, deviceType DeviceType, device string) { switch deviceType { case TYPE_VM: - tags.CHost = device + tags[CHost] = device case TYPE_VROUTER: - tags.Router = device + tags[Router] = device case TYPE_DHCP_GW: - tags.DhcpGW = device + tags[DhcpGW] = device case TYPE_POD_SERVICE: - tags.PodService = device + tags[PodService] = device case TYPE_REDIS_INSTANCE: - tags.Redis = device + tags[Redis] = device case TYPE_RDS_INSTANCE: - tags.RDS = device + tags[RDS] = device case TYPE_LB: - tags.LB = device + tags[LB] = device } } @@ -226,29 +320,34 @@ type UniversalTagsManager struct { universalTagMaps *UniversalTagMaps tapPortNameMap map[uint64]string - customK8sLabelsRegexp string - k8sLabelsRegexp *regexp.Regexp + k8sLabelFields []string + k8sLabelRegexps []*regexp.Regexp grpcSession *grpc.GrpcSession versionUniversalTagMaps uint32 } -func NewUniversalTagsManager(customK8sLabelsRegexp string, baseCfg *config.Config) *UniversalTagsManager { +func NewUniversalTagsManager(k8sLabelConfig []string, baseCfg *config.Config) *UniversalTagsManager { universalTagMaps := &UniversalTagMaps{} - var k8sLabelsRegexp *regexp.Regexp - if customK8sLabelsRegexp != "" { - var err error - k8sLabelsRegexp, err = regexp.Compile(customK8sLabelsRegexp) - if err != nil { - log.Warningf("exporter compile k8s label regexp pattern failed: %s", err) + var k8sLabelRegexps []*regexp.Regexp + var k8sLabelFields []string + for _, k8sLabel := range k8sLabelConfig { + if strings.HasPrefix(k8sLabel, "~") { + if k8sLabelRegexp, err := regexp.Compile(k8sLabel[1:]); err == nil { + k8sLabelRegexps = append(k8sLabelRegexps, k8sLabelRegexp) + } else { + log.Warningf("exporter compile k8s label regexp pattern failed: %s", err) + } + } else { + k8sLabelFields = append(k8sLabelFields, k8sLabel) } } m := &UniversalTagsManager{ - customK8sLabelsRegexp: customK8sLabelsRegexp, - universalTagMaps: universalTagMaps, - tapPortNameMap: make(map[uint64]string), - k8sLabelsRegexp: k8sLabelsRegexp, - grpcSession: &grpc.GrpcSession{}, + k8sLabelFields: k8sLabelFields, + k8sLabelRegexps: k8sLabelRegexps, + universalTagMaps: universalTagMaps, + tapPortNameMap: make(map[uint64]string), + grpcSession: &grpc.GrpcSession{}, } runOnce := func() { @@ -358,13 +457,20 @@ func (u *UniversalTagsManager) GetUniversalTagMaps(response *trident.UniversalTa } func (u *UniversalTagsManager) isK8sLabelExport(name string) bool { - // if not configured, all are not exported - if len(u.customK8sLabelsRegexp) == 0 { - return false + for _, field := range u.k8sLabelFields { + // export `k8s_label` all + if field == "k8s_label" { + return true + } + if field == name { + return true + } } - if u.k8sLabelsRegexp != nil && u.k8sLabelsRegexp.MatchString(name) { - return true + for _, reg := range u.k8sLabelRegexps { + if reg != nil && reg.MatchString(name) { + return true + } } return false @@ -384,6 +490,6 @@ func (u *UniversalTagsManager) HandleSimpleCommand(operate uint16, arg string) s sb.WriteString(fmt.Sprintf("l3EpcMap: %+v\n", u.universalTagMaps.l3EpcMap)) sb.WriteString(fmt.Sprintf("subnetMap: %+v\n", u.universalTagMaps.subnetMap)) sb.WriteString(fmt.Sprintf("gprocessMap: %+v\n", u.universalTagMaps.gprocessMap)) - sb.WriteString(fmt.Sprintf("vtapMap: %+v\n", u.universalTagMaps.vtapMap)) + sb.WriteString(fmt.Sprintf("agentMap: %+v\n", u.universalTagMaps.vtapMap)) return sb.String() } diff --git a/server/ingester/exporters/utils/utils.go b/server/ingester/exporters/utils/utils.go new file mode 100644 index 000000000000..06ab7d0f9a35 --- /dev/null +++ b/server/ingester/exporters/utils/utils.go @@ -0,0 +1 @@ +package main diff --git a/server/ingester/flow_log/config/config.go b/server/ingester/flow_log/config/config.go index d002f1da51a8..cc5f0dafd51b 100644 --- a/server/ingester/flow_log/config/config.go +++ b/server/ingester/flow_log/config/config.go @@ -24,7 +24,6 @@ import ( yaml "gopkg.in/yaml.v2" "github.com/deepflowio/deepflow/server/ingester/config" - exporters_cfg "github.com/deepflowio/deepflow/server/ingester/flow_log/exporters/config" ) var log = logging.MustGetLogger("flow_log.config") @@ -46,19 +45,14 @@ type FlowLogTTL struct { type Config struct { Base *config.Config - CKWriterConfig config.CKWriterConfig `yaml:"flowlog-ck-writer"` - Throttle int `yaml:"throttle"` - ThrottleBucket int `yaml:"throttle-bucket"` - L4Throttle int `yaml:"l4-throttle"` - L7Throttle int `yaml:"l7-throttle"` - FlowLogTTL FlowLogTTL `yaml:"flow-log-ttl-hour"` - DecoderQueueCount int `yaml:"flow-log-decoder-queue-count"` - DecoderQueueSize int `yaml:"flow-log-decoder-queue-size"` - ExportersCfg exporters_cfg.ExportersCfg `yaml:"exporters"` - - // OTLPExporter is moved inside ExportersCfg hence deprecated. - // Preserved for backward compatibility ONLY. - OtlpDeprecated exporters_cfg.OtlpExporterConfigDeprecated `yaml:"otlp-exporter"` + CKWriterConfig config.CKWriterConfig `yaml:"flowlog-ck-writer"` + Throttle int `yaml:"throttle"` + ThrottleBucket int `yaml:"throttle-bucket"` + L4Throttle int `yaml:"l4-throttle"` + L7Throttle int `yaml:"l7-throttle"` + FlowLogTTL FlowLogTTL `yaml:"flow-log-ttl-hour"` + DecoderQueueCount int `yaml:"flow-log-decoder-queue-count"` + DecoderQueueSize int `yaml:"flow-log-decoder-queue-size"` } type FlowLogConfig struct { @@ -66,37 +60,6 @@ type FlowLogConfig struct { } func (c *Config) Validate() error { - // For backward compatibility reason, we must map some config - // to the latest field it belongs to. - - // Mapping "ingester.otlp-exporter" to "ingester.exporters.otlp-exporter" with default name. - // This won't work when "enabled: false" is set in otlp-exporter - if c.OtlpDeprecated.Enabled { - log.Warning("config ingester.otlp-exporter is deprecated. mapping to ingester.exporters.otlp-exporter.") - c.ExportersCfg = exporters_cfg.ExportersCfg{ - Enabled: true, - OverridableCfg: exporters_cfg.OverridableCfg{ - ExportDatas: c.OtlpDeprecated.ExportDatas, - ExportDataTypes: c.OtlpDeprecated.ExportDataTypes, - ExportCustomK8sLabelsRegexp: c.OtlpDeprecated.ExportCustomK8sLabelsRegexp, - ExportOnlyWithTraceID: &c.OtlpDeprecated.ExportOnlyWithTraceID, - }, - OtlpExporterCfgs: []exporters_cfg.OtlpExporterConfig{ - { - Enabled: true, - Addr: c.OtlpDeprecated.Addr, - QueueCount: c.OtlpDeprecated.QueueCount, - QueueSize: c.OtlpDeprecated.QueueSize, - ExportBatchCount: c.OtlpDeprecated.ExportBatchCount, - GrpcHeaders: c.OtlpDeprecated.GrpcHeaders, - }, - }, - } - } - if err := c.ExportersCfg.Validate(); err != nil { - return err - } - // Begin validation. if c.DecoderQueueCount == 0 { c.DecoderQueueCount = DefaultDecoderQueueCount @@ -114,12 +77,6 @@ func (c *Config) Validate() error { c.FlowLogTTL.L4Packet = DefaultFlowLogTTL } - if c.ExportersCfg.Enabled { - if err := c.ExportersCfg.Validate(); err != nil { - return err - } - } - return nil } @@ -133,8 +90,6 @@ func Load(base *config.Config, path string) *Config { DecoderQueueSize: DefaultDecoderQueueSize, CKWriterConfig: config.CKWriterConfig{QueueCount: 1, QueueSize: 1000000, BatchSize: 512000, FlushTimeout: 10}, FlowLogTTL: FlowLogTTL{DefaultFlowLogTTL, DefaultFlowLogTTL, DefaultFlowLogTTL}, - ExportersCfg: exporters_cfg.NewDefaultExportersCfg(), - OtlpDeprecated: exporters_cfg.NewOtlpDefaultConfigDeprecated(), }, } if _, err := os.Stat(path); os.IsNotExist(err) { diff --git a/server/ingester/flow_log/decoder/decoder.go b/server/ingester/flow_log/decoder/decoder.go index 38256ec78db6..d70df16eb2d6 100644 --- a/server/ingester/flow_log/decoder/decoder.go +++ b/server/ingester/flow_log/decoder/decoder.go @@ -28,8 +28,10 @@ import ( v1 "go.opentelemetry.io/proto/otlp/trace/v1" "github.com/deepflowio/deepflow/server/ingester/common" + "github.com/deepflowio/deepflow/server/ingester/exporters" + exportcommon "github.com/deepflowio/deepflow/server/ingester/exporters/common" + exportconfig "github.com/deepflowio/deepflow/server/ingester/exporters/config" "github.com/deepflowio/deepflow/server/ingester/flow_log/config" - "github.com/deepflowio/deepflow/server/ingester/flow_log/exporters" "github.com/deepflowio/deepflow/server/ingester/flow_log/log_data" "github.com/deepflowio/deepflow/server/ingester/flow_log/throttler" "github.com/deepflowio/deepflow/server/ingester/flow_tag" @@ -75,6 +77,7 @@ type Counter struct { type Decoder struct { index int msgType datatype.MessageType + dataSourceID uint32 platformData *grpc.PlatformInfoTable inQueue queue.QueueReader throttler *throttler.ThrottlingQueue @@ -102,6 +105,7 @@ func NewDecoder( return &Decoder{ index: index, msgType: msgType, + dataSourceID: exportconfig.FlowLogMessageToDataSourceID(msgType), platformData: platformData, inQueue: inQueue, throttler: throttler, @@ -287,17 +291,22 @@ func (d *Decoder) sendFlow(flow *pb.TaggedFlow) { l := log_data.TaggedFlowToL4FlowLog(flow, d.platformData) if l.HitPcapPolicy() { + d.export(l) d.throttler.SendWithoutThrottling(l) } else { + l.AddReferenceCount() if !d.throttler.SendWithThrottling(l) { d.counter.DropCount++ + } else { + d.export(l) } + l.Release() } } -func (d *Decoder) export(l *log_data.L7FlowLog) { +func (d *Decoder) export(l exportcommon.ExportItem) { if d.exporters != nil { - d.exporters.Put(l, d.index) + d.exporters.Put(d.dataSourceID, d.index, l) } } diff --git a/server/ingester/flow_log/exporters/config/config.go b/server/ingester/flow_log/exporters/config/config.go deleted file mode 100644 index 2b5cacc2d927..000000000000 --- a/server/ingester/flow_log/exporters/config/config.go +++ /dev/null @@ -1,145 +0,0 @@ -package config - -import ( - logging "github.com/op/go-logging" - - "github.com/deepflowio/deepflow/server/libs/datatype" -) - -var log = logging.MustGetLogger("exporters_config") - -type OverridableCfg struct { - ExportDatas []string `yaml:"export-datas"` - ExportDataBits uint32 // generate from 'ExportDatas' - ExportDataTypes []string `yaml:"export-data-types"` - ExportDataTypeBits uint32 // generate from 'ExportDataTypes' - ExportCustomK8sLabelsRegexp string `yaml:"export-custom-k8s-labels-regexp"` - ExportOnlyWithTraceID *bool `yaml:"export-only-with-traceid"` -} - -// ExporterCfg holds configs of different exporters. -type ExportersCfg struct { - Enabled bool `yaml:"enabled"` - - // global config, could be overridden by same fields under each exporter. - OverridableCfg `yaml:",inline"` - - // OtlpExporter config for OTLP exporters - OtlpExporterCfgs []OtlpExporterConfig `yaml:"otlp-exporters"` - - // other exporter configs ... -} - -func (ec *ExportersCfg) Validate() error { - for i := range ec.OtlpExporterCfgs { - if err := ec.OtlpExporterCfgs[i].Validate(ec.OverridableCfg); err != nil { - return err - } - } - return nil -} - -var DefaultOtlpExportDatas = []string{"cbpf-net-span", "ebpf-sys-span"} -var DefaultOtlpExportDataTypes = []string{"service_info", "tracing_info", "network_layer", "flow_info", "transport_layer", "application_layer", "metrics"} - -func NewDefaultExportersCfg() ExportersCfg { - return ExportersCfg{ - Enabled: false, - OverridableCfg: OverridableCfg{ - ExportDatas: DefaultOtlpExportDatas, - ExportDataTypes: DefaultOtlpExportDataTypes, - }, - OtlpExporterCfgs: []OtlpExporterConfig{NewOtlpDefaultConfig()}, - } -} - -const ( - UNKNOWN_DATA = 0 - CBPF_NET_SPAN = uint32(1 << datatype.SIGNAL_SOURCE_PACKET) - EBPF_SYS_SPAN = uint32(1 << datatype.SIGNAL_SOURCE_EBPF) - OTEL_APP_SPAN = uint32(1 << datatype.SIGNAL_SOURCE_OTEL) -) - -var exportedDataStringMap = map[string]uint32{ - "cbpf-net-span": CBPF_NET_SPAN, - "ebpf-sys-span": EBPF_SYS_SPAN, - "otel-app-span": OTEL_APP_SPAN, -} - -func bitsToString(bits uint32, strMap map[string]uint32) string { - ret := "" - for k, v := range strMap { - if bits&v != 0 { - if len(ret) == 0 { - ret = k - } else { - ret = ret + "," + k - } - } - } - return ret -} - -func ExportedDataBitsToString(bits uint32) string { - return bitsToString(bits, exportedDataStringMap) -} - -func StringToExportedData(str string) uint32 { - t, ok := exportedDataStringMap[str] - if !ok { - log.Warningf("unknown exporter data: %s", str) - return UNKNOWN_DATA - } - return t -} - -const ( - UNKNOWN_DATA_TYPE = 0 - - SERVICE_INFO uint32 = 1 << iota - TRACING_INFO - NETWORK_LAYER - FLOW_INFO - CLIENT_UNIVERSAL_TAG - SERVER_UNIVERSAL_TAG - TUNNEL_INFO - TRANSPORT_LAYER - APPLICATION_LAYER - CAPTURE_INFO - CLIENT_CUSTOM_TAG - SERVER_CUSTOM_TAG - NATIVE_TAG - METRICS - K8S_LABEL -) - -var exportedDataTypeStringMap = map[string]uint32{ - "service_info": SERVICE_INFO, - "tracing_info": TRACING_INFO, - "network_layer": NETWORK_LAYER, - "flow_info": FLOW_INFO, - "client_universal_tag": CLIENT_UNIVERSAL_TAG, - "server_universal_tag": SERVER_UNIVERSAL_TAG, - "tunnel_info": TUNNEL_INFO, - "transport_layer": TRANSPORT_LAYER, - "application_layer": APPLICATION_LAYER, - "capture_info": CAPTURE_INFO, - "client_custom_tag": CLIENT_CUSTOM_TAG, - "server_custom_tag": SERVER_CUSTOM_TAG, - "native_tag": NATIVE_TAG, - "metrics": METRICS, - "k8s_label": K8S_LABEL, -} - -func StringToExportedDataType(str string) uint32 { - t, ok := exportedDataTypeStringMap[str] - if !ok { - log.Warningf("unknown exporter data type: %s", str) - return UNKNOWN_DATA_TYPE - } - return t -} - -func ExportedDataTypeBitsToString(bits uint32) string { - return bitsToString(bits, exportedDataTypeStringMap) -} diff --git a/server/ingester/flow_log/exporters/config/config_test.go b/server/ingester/flow_log/exporters/config/config_test.go deleted file mode 100644 index cbb9ffef65d0..000000000000 --- a/server/ingester/flow_log/exporters/config/config_test.go +++ /dev/null @@ -1,63 +0,0 @@ -package config - -import ( - "io/ioutil" - "reflect" - "testing" - - "github.com/gogo/protobuf/proto" - - yaml "gopkg.in/yaml.v2" -) - -type baseConfig struct { - Config ingesterConfig `yaml:"ingester"` -} - -type ingesterConfig struct { - ExportersCfg ExportersCfg `yaml:"exporters"` -} - -func TestConfig(t *testing.T) { - ingesterCfg := baseConfig{} - configBytes, _ := ioutil.ReadFile("./config_test.yaml") - err := yaml.Unmarshal(configBytes, &ingesterCfg) - if err != nil { - t.Fatalf("yaml unmarshal failed: %v", err) - } - expect := baseConfig{ - Config: ingesterConfig{ - ExportersCfg: ExportersCfg{ - OverridableCfg: OverridableCfg{ - ExportDatas: []string{"cbpf-net-span"}, - ExportDataTypes: []string{"service_info"}, - ExportCustomK8sLabelsRegexp: "", - ExportOnlyWithTraceID: nil, - }, - Enabled: false, - OtlpExporterCfgs: []OtlpExporterConfig{ - { - Enabled: true, - Addr: "127.0.0.1:4317", - QueueCount: 4, - QueueSize: 100000, - ExportBatchCount: 32, - GrpcHeaders: map[string]string{ - "key1": "value1", - "key2": "value2", - }, - OverridableCfg: OverridableCfg{ - ExportDatas: []string{"ebpf-sys-span"}, - ExportDataTypes: []string{"tracing_info", "network_layer", "flow_info", "transport_layer", "application_layer", "metrics"}, - ExportCustomK8sLabelsRegexp: "", - ExportOnlyWithTraceID: proto.Bool(true), - }, - }, - }, - }, - }, - } - if !reflect.DeepEqual(expect, ingesterCfg) { - t.Fatalf("yaml unmarshal not equal, expect: %v, got: %v", expect, ingesterCfg) - } -} diff --git a/server/ingester/flow_log/exporters/config/config_test.yaml b/server/ingester/flow_log/exporters/config/config_test.yaml deleted file mode 100644 index bbfd662c0857..000000000000 --- a/server/ingester/flow_log/exporters/config/config_test.yaml +++ /dev/null @@ -1,19 +0,0 @@ -ingester: - exporters: - enabled: false - export-datas: [cbpf-net-span] - export-data-types: [service_info] - export-custom-k8s-labels-regexp: - otlp-exporters: - - enabled: true - addr: 127.0.0.1:4317 - queue-count: 4 - queue-size: 100000 - export-batch-count: 32 - grpc-headers: - key1: value1 - key2: value2 - export-datas: [ebpf-sys-span] - export-data-types: [ tracing_info,network_layer,flow_info,transport_layer,application_layer,metrics ] - export-custom-k8s-labels-regexp: - export-only-with-traceid: true diff --git a/server/ingester/flow_log/exporters/config/otlp_config.go b/server/ingester/flow_log/exporters/config/otlp_config.go deleted file mode 100644 index 252168eac73b..000000000000 --- a/server/ingester/flow_log/exporters/config/otlp_config.go +++ /dev/null @@ -1,124 +0,0 @@ -/* - * Copyright (c) 2024 Yunshan Networks - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package config - -// Preserved for backward compatibility ONLY -type OtlpExporterConfigDeprecated struct { - Enabled bool `yaml:"enabled"` - Addr string `yaml:"addr"` - QueueCount int `yaml:"queue-count"` - QueueSize int `yaml:"queue-size"` - ExportDatas []string `yaml:"export-datas"` - ExportDataTypes []string `yaml:"export-data-types"` - ExportCustomK8sLabelsRegexp string `yaml:"export-custom-k8s-labels-regexp"` - ExportOnlyWithTraceID bool `yaml:"export-only-with-traceid"` - ExportBatchCount int `yaml:"export-batch-count"` - GrpcHeaders map[string]string `yaml:"grpc-headers"` -} - -func NewOtlpDefaultConfigDeprecated() OtlpExporterConfigDeprecated { - return OtlpExporterConfigDeprecated{ - Enabled: false, - Addr: "127.0.0.1:4317", - QueueCount: DefaultOtlpExportQueueCount, - QueueSize: DefaultOtlpExportQueueSize, - ExportDatas: DefaultOtlpExportDatas, - ExportDataTypes: DefaultOtlpExportDataTypes, - ExportBatchCount: DefaultOtlpExportBatchCount, - GrpcHeaders: nil, - } -} - -type OtlpExporterConfig struct { - Enabled bool `yaml:"enabled"` - Addr string `yaml:"addr"` - QueueCount int `yaml:"queue-count"` - QueueSize int `yaml:"queue-size"` - ExportBatchCount int `yaml:"export-batch-count"` - GrpcHeaders map[string]string `yaml:"grpc-headers"` - - OverridableCfg `yaml:",inline"` -} - -const ( - DefaultOtlpExportBatchCount = 32 - DefaultOtlpExportQueueCount = 4 - DefaultOtlpExportQueueSize = 100000 -) - -func (cfg *OtlpExporterConfig) Validate(overridableCfg OverridableCfg) error { - if !cfg.Enabled { - return nil - } - - cfg.calcDataBits() - - if cfg.ExportBatchCount == 0 { - cfg.ExportBatchCount = DefaultOtlpExportBatchCount - } - - if cfg.QueueCount == 0 { - cfg.QueueCount = DefaultOtlpExportQueueCount - } - if cfg.QueueSize == 0 { - cfg.QueueSize = DefaultOtlpExportQueueSize - } - - // overwritten params - if cfg.ExportCustomK8sLabelsRegexp == "" { - cfg.ExportCustomK8sLabelsRegexp = overridableCfg.ExportCustomK8sLabelsRegexp - } - - if len(cfg.ExportDatas) == 0 { - cfg.ExportDatas = overridableCfg.ExportDatas - } - - if len(cfg.ExportDataTypes) == 0 { - cfg.ExportDataTypes = overridableCfg.ExportDataTypes - } - - if cfg.ExportOnlyWithTraceID == nil { - cfg.ExportOnlyWithTraceID = overridableCfg.ExportOnlyWithTraceID - } - return nil -} - -func (cfg *OtlpExporterConfig) calcDataBits() { - for _, v := range cfg.ExportDatas { - cfg.ExportDataBits |= uint32(StringToExportedData(v)) - } - log.Infof("export data bits: %08b, string: %s", cfg.ExportDataBits, ExportedDataBitsToString(cfg.ExportDataBits)) - - for _, v := range cfg.ExportDataTypes { - cfg.ExportDataTypeBits |= uint32(StringToExportedDataType(v)) - } - if cfg.ExportCustomK8sLabelsRegexp != "" { - cfg.ExportDataTypeBits |= K8S_LABEL - } - log.Infof("export data type bits: %08b, string: %s", cfg.ExportDataTypeBits, ExportedDataTypeBitsToString(cfg.ExportDataTypeBits)) -} - -func NewOtlpDefaultConfig() OtlpExporterConfig { - return OtlpExporterConfig{ - Enabled: false, - Addr: "127.0.0.1:4317", - QueueCount: DefaultOtlpExportQueueCount, - QueueSize: DefaultOtlpExportQueueSize, - ExportBatchCount: DefaultOtlpExportBatchCount, - GrpcHeaders: nil, - } -} diff --git a/server/ingester/flow_log/exporters/exporters.go b/server/ingester/flow_log/exporters/exporters.go deleted file mode 100644 index dbace008e948..000000000000 --- a/server/ingester/flow_log/exporters/exporters.go +++ /dev/null @@ -1,138 +0,0 @@ -/* - * Copyright (c) 2024 Yunshan Networks - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package exporters - -import ( - logging "github.com/op/go-logging" - - "github.com/deepflowio/deepflow/server/ingester/flow_log/config" - exporters_cfg "github.com/deepflowio/deepflow/server/ingester/flow_log/exporters/config" - "github.com/deepflowio/deepflow/server/ingester/flow_log/exporters/otlp_exporter" - "github.com/deepflowio/deepflow/server/ingester/flow_log/exporters/universal_tag" - "github.com/deepflowio/deepflow/server/ingester/flow_log/log_data" -) - -var log = logging.MustGetLogger("exporters") - -const ( - PUT_BATCH_SIZE = 1024 -) - -type Exporter interface { - // Starts an exporter worker - Start() - // Close an exporter worker - Close() - - // Put sends data to the exporter worker. Worker could decide what to do next. e.g.: - // - send it out synchronously. - // - store it in a queue and handle it later. - Put(items ...interface{}) - - // IsExportData tell the decoder if data need to be sended to specific exporter. - IsExportData(l *log_data.L7FlowLog) bool -} - -type ExportersCache [][]interface{} - -type Exporters struct { - config *exporters_cfg.ExportersCfg - universalTagsManager *universal_tag.UniversalTagsManager - exporters []Exporter - putCaches []ExportersCache // cache for batch put to exporter, multi flowlog decoders call Put(), and put to multi exporters -} - -func NewExporters(flowlogCfg *config.Config) *Exporters { - exportersCfg := &flowlogCfg.ExportersCfg - if !exportersCfg.Enabled { - log.Infof("exporters disabled") - return nil - } - log.Infof("init exporters: %v", flowlogCfg.ExportersCfg) - exporters := make([]Exporter, 0) - putCaches := make([]ExportersCache, flowlogCfg.DecoderQueueCount) - - universalTagManager := universal_tag.NewUniversalTagsManager(exportersCfg.ExportCustomK8sLabelsRegexp, flowlogCfg.Base) - - for i := range exportersCfg.OtlpExporterCfgs { - if exportersCfg.OtlpExporterCfgs[i].Enabled { - otlpExporter := otlp_exporter.NewOtlpExporter(i, exportersCfg, universalTagManager) - exporters = append(exporters, otlpExporter) - } - } - - // todo add other exporters.... - - // init caches - for i := range putCaches { - putCaches[i] = make(ExportersCache, len(exporters)) - for j := range exporters { - putCaches[i][j] = make([]interface{}, 0, PUT_BATCH_SIZE) - } - } - - return &Exporters{ - config: exportersCfg, - universalTagsManager: universalTagManager, - exporters: exporters, - putCaches: putCaches, - } -} - -func (es *Exporters) Start() { - es.universalTagsManager.Start() - for _, e := range es.exporters { - e.Start() - } -} - -func (es *Exporters) Close() { - es.universalTagsManager.Close() - for _, e := range es.exporters { - e.Close() - } -} - -// parallel put -func (es *Exporters) Put(l *log_data.L7FlowLog, decoderIndex int) { - if l == nil { - es.Flush(decoderIndex) - return - } - - exportersCache := es.putCaches[decoderIndex] - for i, e := range es.exporters { - if e.IsExportData(l) { - l.AddReferenceCount() - exportersCache[i] = append(exportersCache[i], l) - if len(exportersCache[i]) >= PUT_BATCH_SIZE { - e.Put(exportersCache[i]...) - exportersCache[i] = exportersCache[i][:0] - } - } - } -} - -func (es *Exporters) Flush(decoderIndex int) { - exportersCache := es.putCaches[decoderIndex] - for i := range exportersCache { - if len(exportersCache[i]) > 0 { - es.exporters[i].Put(exportersCache[i]...) - exportersCache[i] = exportersCache[i][:0] - } - } -} diff --git a/server/ingester/flow_log/exporters/otlp_exporter/exporter.go b/server/ingester/flow_log/exporters/otlp_exporter/exporter.go deleted file mode 100644 index deb14bd46e93..000000000000 --- a/server/ingester/flow_log/exporters/otlp_exporter/exporter.go +++ /dev/null @@ -1,249 +0,0 @@ -/* - * Copyright (c) 2024 Yunshan Networks - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package otlp_exporter - -import ( - "fmt" - "strconv" - "time" - - logging "github.com/op/go-logging" - "go.opentelemetry.io/collector/pdata/ptrace" - "go.opentelemetry.io/collector/pdata/ptrace/ptraceotlp" - "golang.org/x/net/context" - "google.golang.org/grpc" - "google.golang.org/grpc/metadata" - - "github.com/deepflowio/deepflow/server/ingester/common" - exporters_cfg "github.com/deepflowio/deepflow/server/ingester/flow_log/exporters/config" - utag "github.com/deepflowio/deepflow/server/ingester/flow_log/exporters/universal_tag" - "github.com/deepflowio/deepflow/server/ingester/flow_log/log_data" - "github.com/deepflowio/deepflow/server/ingester/ingesterctl" - "github.com/deepflowio/deepflow/server/libs/datatype" - "github.com/deepflowio/deepflow/server/libs/debug" - "github.com/deepflowio/deepflow/server/libs/queue" - "github.com/deepflowio/deepflow/server/libs/stats" - "github.com/deepflowio/deepflow/server/libs/utils" -) - -var log = logging.MustGetLogger("otlp_exporter") - -const ( - QUEUE_BATCH_COUNT = 1024 -) - -type OtlpExporter struct { - index int - Addr string - dataQueues queue.FixedMultiQueue - queueCount int - grpcExporters []ptraceotlp.GRPCClient - grpcConns []*grpc.ClientConn - universalTagsManager *utag.UniversalTagsManager - config *exporters_cfg.OtlpExporterConfig - counter *Counter - lastCounter Counter - running bool - - utils.Closable -} - -type Counter struct { - RecvCounter int64 `statsd:"recv-count"` - SendCounter int64 `statsd:"send-count"` - SendBatchCounter int64 `statsd:"send-batch-count"` - ExportUsedTimeNs int64 `statsd:"export-used-time-ns"` - DropCounter int64 `statsd:"drop-count"` - DropBatchCounter int64 `statsd:"drop-batch-count"` - DropNoTraceIDCounter int64 `statsd:"drop-no-traceid-count"` -} - -func (e *OtlpExporter) GetCounter() interface{} { - var counter Counter - counter, *e.counter = *e.counter, Counter{} - e.lastCounter = counter - return &counter -} - -type ExportItem interface { - Release() -} - -func NewOtlpExporter(index int, config *exporters_cfg.ExportersCfg, universalTagsManager *utag.UniversalTagsManager) *OtlpExporter { - otlpConfig := config.OtlpExporterCfgs[index] - - dataQueues := queue.NewOverwriteQueues( - fmt.Sprintf("otlp_exporter_%d", index), queue.HashKey(otlpConfig.QueueCount), otlpConfig.QueueSize, - queue.OptionFlushIndicator(time.Second), - queue.OptionRelease(func(p interface{}) { p.(ExportItem).Release() }), - common.QUEUE_STATS_MODULE_INGESTER) - - exporter := &OtlpExporter{ - index: index, - dataQueues: dataQueues, - queueCount: otlpConfig.QueueCount, - universalTagsManager: universalTagsManager, - grpcConns: make([]*grpc.ClientConn, otlpConfig.QueueCount), - grpcExporters: make([]ptraceotlp.GRPCClient, otlpConfig.QueueCount), - config: &otlpConfig, - counter: &Counter{}, - } - debug.ServerRegisterSimple(ingesterctl.CMD_OTLP_EXPORTER, exporter) - common.RegisterCountableForIngester("exporter", exporter, stats.OptionStatTags{ - "type": "otlp", "index": strconv.Itoa(index)}) - log.Infof("otlp exporter %d created", index) - return exporter -} - -func (e *OtlpExporter) IsExportData(l *log_data.L7FlowLog) bool { - if e.config.ExportOnlyWithTraceID != nil && *e.config.ExportOnlyWithTraceID && l.TraceId == "" { - return false - } - - if (1< 0 { - ctx = metadata.NewOutgoingContext(ctx, metadata.New(e.config.GrpcHeaders)) - } - for e.running { - n := e.dataQueues.Gets(queue.HashKey(queueID), flows) - for _, flow := range flows[:n] { - if flow == nil { - if batchCount > 0 { - if err := e.grpcExport(ctx, queueID, ptraceotlp.NewExportRequestFromTraces(traces)); err == nil { - e.counter.SendCounter += int64(batchCount) - } - batchCount = 0 - traces = ptrace.NewTraces() - } - continue - } - switch t := flow.(type) { - case (*log_data.L7FlowLog): - f := flow.(*log_data.L7FlowLog) - L7FlowLogToExportResourceSpans(f, e.universalTagsManager, e.config.ExportDataTypeBits, traces.ResourceSpans().AppendEmpty()) - batchCount++ - if batchCount >= e.config.ExportBatchCount { - if err := e.grpcExport(ctx, queueID, ptraceotlp.NewExportRequestFromTraces(traces)); err == nil { - e.counter.SendCounter += int64(batchCount) - } - batchCount = 0 - traces = ptrace.NewTraces() - } - - f.Release() - default: - log.Warningf("flow type(%T) unsupport", t) - continue - } - } - } -} - -func (e *OtlpExporter) grpcExport(ctx context.Context, i int, req ptraceotlp.ExportRequest) error { - defer func() { - if r := recover(); r != nil { - log.Warningf("otlp grpc export error: %s", r) - if j, err := req.MarshalJSON(); err == nil { - log.Infof("otlp request: %s", string(j)) - } - } - }() - - now := time.Now() - - if e.grpcExporters[i] == nil { - if err := e.newGrpcExporter(i); err != nil { - if e.counter.DropCounter == 0 { - log.Warningf("new grpc exporter failed. err: %s", err) - } - e.counter.DropCounter++ - return err - } - } - _, err := e.grpcExporters[i].Export(ctx, req) - if err != nil { - if e.counter.DropCounter == 0 { - log.Warningf("exporter %d send grpc traces failed. err: %s", e.index, err) - } - e.counter.DropCounter++ - e.grpcExporters[i] = nil - return err - } else { - e.counter.SendBatchCounter++ - } - e.counter.ExportUsedTimeNs += int64(time.Since(now)) - return nil -} - -func (e *OtlpExporter) newGrpcExporter(i int) error { - if e.grpcConns[i] != nil { - e.grpcConns[i].Close() - e.grpcConns[i] = nil - } - var options = []grpc.DialOption{grpc.WithInsecure(), grpc.WithTimeout(time.Minute)} - conn, err := grpc.Dial(e.config.Addr, options...) - if err != nil { - return fmt.Errorf("grpc dial %s failed, err: %s", e.config.Addr, err) - } - log.Debugf("new grpc otlp exporter: %s", e.config.Addr) - e.grpcConns[i] = conn - e.grpcExporters[i] = ptraceotlp.NewGRPCClient(conn) - return nil -} - -func (e *OtlpExporter) HandleSimpleCommand(op uint16, arg string) string { - return fmt.Sprintf("otlp exporter %d last 10s counter: %+v", e.index, e.lastCounter) -} diff --git a/server/ingester/flow_log/flow_log/flow_log.go b/server/ingester/flow_log/flow_log/flow_log.go index d2fc8fc797e3..612519f46114 100644 --- a/server/ingester/flow_log/flow_log/flow_log.go +++ b/server/ingester/flow_log/flow_log/flow_log.go @@ -22,12 +22,12 @@ import ( "strings" "time" - "github.com/deepflowio/deepflow/server/ingester/flow_log/exporters" - + logging "github.com/op/go-logging" _ "golang.org/x/net/context" _ "google.golang.org/grpc" dropletqueue "github.com/deepflowio/deepflow/server/ingester/droplet/queue" + "github.com/deepflowio/deepflow/server/ingester/exporters" "github.com/deepflowio/deepflow/server/ingester/flow_log/common" "github.com/deepflowio/deepflow/server/ingester/flow_log/config" "github.com/deepflowio/deepflow/server/ingester/flow_log/dbwriter" @@ -43,7 +43,6 @@ import ( "github.com/deepflowio/deepflow/server/libs/queue" libqueue "github.com/deepflowio/deepflow/server/libs/queue" "github.com/deepflowio/deepflow/server/libs/receiver" - logging "github.com/op/go-logging" ) var log = logging.MustGetLogger("flow_log") @@ -65,12 +64,10 @@ type Logger struct { FlowLogWriter *dbwriter.FlowLogWriter } -func NewFlowLog(config *config.Config, recv *receiver.Receiver, platformDataManager *grpc.PlatformDataManager) (*FlowLog, error) { +func NewFlowLog(config *config.Config, recv *receiver.Receiver, platformDataManager *grpc.PlatformDataManager, exporters *exporters.Exporters) (*FlowLog, error) { manager := dropletqueue.NewManager(ingesterctl.INGESTERCTL_FLOW_LOG_QUEUE) if config.Base.StorageDisabled { - exporters := exporters.NewExporters(config) - l7FlowLogger, err := NewL7FlowLogger(config, platformDataManager, manager, recv, nil, exporters) if err != nil { return nil, err @@ -90,10 +87,8 @@ func NewFlowLog(config *config.Config, recv *receiver.Receiver, platformDataMana if err != nil { return nil, err } - l4FlowLogger := NewL4FlowLogger(config, platformDataManager, manager, recv, flowLogWriter) + l4FlowLogger := NewL4FlowLogger(config, platformDataManager, manager, recv, flowLogWriter, exporters) - // the exporters cannot be shared by multiple logger decoder. - exporters := exporters.NewExporters(config) l7FlowLogger, err := NewL7FlowLogger(config, platformDataManager, manager, recv, flowLogWriter, exporters) if err != nil { return nil, err @@ -172,7 +167,7 @@ func NewLogger(msgType datatype.MessageType, config *config.Config, platformData }, nil } -func NewL4FlowLogger(config *config.Config, platformDataManager *grpc.PlatformDataManager, manager *dropletqueue.Manager, recv *receiver.Receiver, flowLogWriter *dbwriter.FlowLogWriter) *Logger { +func NewL4FlowLogger(config *config.Config, platformDataManager *grpc.PlatformDataManager, manager *dropletqueue.Manager, recv *receiver.Receiver, flowLogWriter *dbwriter.FlowLogWriter, exporters *exporters.Exporters) *Logger { msgType := datatype.MESSAGE_TYPE_TAGGEDFLOW queueCount := config.DecoderQueueCount queueSuffix := "-l4" @@ -213,7 +208,7 @@ func NewL4FlowLogger(config *config.Config, platformDataManager *grpc.PlatformDa queue.QueueReader(decodeQueues.FixedMultiQueue[i]), throttlers[i], nil, - nil, + exporters, config, ) } diff --git a/server/ingester/flow_log/log_data/export.go b/server/ingester/flow_log/log_data/export.go new file mode 100644 index 000000000000..e36852dc18c2 --- /dev/null +++ b/server/ingester/flow_log/log_data/export.go @@ -0,0 +1,88 @@ +package log_data + +import ( + "fmt" + "reflect" + "unsafe" + + "github.com/deepflowio/deepflow/server/ingester/exporters/common" + config "github.com/deepflowio/deepflow/server/ingester/exporters/config" + utag "github.com/deepflowio/deepflow/server/ingester/exporters/universal_tag" + "github.com/deepflowio/deepflow/server/libs/utils" +) + +func (l4 *L4FlowLog) QueryUniversalTags(utags *utag.UniversalTagsManager) (*utag.UniversalTags, *utag.UniversalTags) { + return utags.QueryUniversalTags( + l4.RegionID0, l4.AZID0, l4.HostID0, l4.PodNSID0, l4.PodClusterID0, l4.SubnetID0, l4.VtapID, + l4.L3DeviceType0, l4.AutoServiceType0, l4.AutoInstanceType0, + l4.L3DeviceID0, l4.AutoServiceID0, l4.AutoInstanceID0, l4.PodNodeID0, l4.PodGroupID0, l4.PodID0, uint32(l4.L3EpcID0), l4.GPID0, l4.ServiceID0, + l4.IsIPv4, l4.IP40, l4.IP60, + ), utags.QueryUniversalTags( + l4.RegionID1, l4.AZID1, l4.HostID1, l4.PodNSID1, l4.PodClusterID1, l4.SubnetID1, l4.VtapID, + l4.L3DeviceType1, l4.AutoServiceType1, l4.AutoInstanceType1, + l4.L3DeviceID1, l4.AutoServiceID1, l4.AutoInstanceID1, l4.PodNodeID1, l4.PodGroupID1, l4.PodID1, uint32(l4.L3EpcID1), l4.GPID1, l4.ServiceID1, + l4.IsIPv4, l4.IP41, l4.IP61, + ) +} + +func (l4 *L4FlowLog) EncodeTo(protocol config.ExportProtocol, utags *utag.UniversalTagsManager, cfg *config.ExporterCfg) (interface{}, error) { + switch protocol { + case config.PROTOCOL_KAFKA: + tags0, tags1 := l4.QueryUniversalTags(utags) + k8sLabels0, k8sLabels1 := utags.QueryCustomK8sLabels(l4.PodID0), utags.QueryCustomK8sLabels(l4.PodID1) + return common.EncodeToJson(l4, int(l4.DataSource()), cfg, tags0, tags1, k8sLabels0, k8sLabels1), nil + default: + return nil, fmt.Errorf("l4_flow_log unsupport export to %s", protocol) + } +} + +func (l4 *L4FlowLog) DataSource() uint32 { + return uint32(config.L4_FLOW_LOG) +} + +func (l4 *L4FlowLog) TimestampUs() int64 { + return int64(l4.FlowInfo.EndTime) +} + +func (l7 *L4FlowLog) GetFieldValueByOffsetAndKind(offset uintptr, kind reflect.Kind, fieldName string) interface{} { + return utils.GetValueByOffsetAndKind(uintptr(unsafe.Pointer(l7)), offset, kind, fieldName) +} + +func (l7 *L7FlowLog) EncodeTo(protocol config.ExportProtocol, utags *utag.UniversalTagsManager, cfg *config.ExporterCfg) (interface{}, error) { + switch protocol { + case config.PROTOCOL_OTLP: + return l7.EncodeToOtlp(utags, cfg.ExportFieldCategoryBits), nil + case config.PROTOCOL_KAFKA: + tags0, tags1 := l7.QueryUniversalTags(utags) + k8sLabels0, k8sLabels1 := utags.QueryCustomK8sLabels(l7.PodID0), utags.QueryCustomK8sLabels(l7.PodID1) + return common.EncodeToJson(l7, int(l7.DataSource()), cfg, tags0, tags1, k8sLabels0, k8sLabels1), nil + default: + return nil, fmt.Errorf("l7_flow_log unsupport export to %s", protocol) + } +} + +func (l7 *L7FlowLog) DataSource() uint32 { + return uint32(config.L7_FLOW_LOG) +} + +func (l7 *L7FlowLog) TimestampUs() int64 { + return int64(l7.L7Base.EndTime) +} + +func (l7 *L7FlowLog) GetFieldValueByOffsetAndKind(offset uintptr, kind reflect.Kind, fieldName string) interface{} { + return utils.GetValueByOffsetAndKind(uintptr(unsafe.Pointer(l7)), offset, kind, fieldName) +} + +func (l7 *L7FlowLog) QueryUniversalTags(utags *utag.UniversalTagsManager) (*utag.UniversalTags, *utag.UniversalTags) { + return utags.QueryUniversalTags( + l7.RegionID0, l7.AZID0, l7.HostID0, l7.PodNSID0, l7.PodClusterID0, l7.SubnetID0, l7.VtapID, + l7.L3DeviceType0, l7.AutoServiceType0, l7.AutoInstanceType0, + l7.L3DeviceID0, l7.AutoServiceID0, l7.AutoInstanceID0, l7.PodNodeID0, l7.PodGroupID0, l7.PodID0, uint32(l7.L3EpcID0), l7.GPID0, l7.ServiceID0, + l7.IsIPv4, uint32(l7.IP40), l7.IP60, + ), utags.QueryUniversalTags( + l7.RegionID1, l7.AZID1, l7.HostID1, l7.PodNSID1, l7.PodClusterID1, l7.SubnetID1, l7.VtapID, + l7.L3DeviceType1, l7.AutoServiceType1, l7.AutoInstanceType1, + l7.L3DeviceID1, l7.AutoServiceID1, l7.AutoInstanceID1, l7.PodNodeID1, l7.PodGroupID1, l7.PodID1, uint32(l7.L3EpcID1), l7.GPID1, l7.ServiceID1, + l7.IsIPv4, uint32(l7.IP41), l7.IP61, + ) +} diff --git a/server/ingester/flow_log/log_data/l4_flow_log.go b/server/ingester/flow_log/log_data/l4_flow_log.go index 595ea717cdb4..c3940fc48252 100644 --- a/server/ingester/flow_log/log_data/l4_flow_log.go +++ b/server/ingester/flow_log/log_data/l4_flow_log.go @@ -42,7 +42,7 @@ const ( type L4FlowLog struct { pool.ReferenceCount - _id uint64 // 用来标记全局(多节点)唯一的记录 + _id uint64 `json:"_id" category:"tag" sub:"flow_info"` // 用来标记全局(多节点)唯一的记录 DataLinkLayer NetworkLayer @@ -55,10 +55,10 @@ type L4FlowLog struct { } type DataLinkLayer struct { - MAC0 uint64 `json:"mac_0"` - MAC1 uint64 `json:"mac_1"` - EthType uint16 `json:"eth_type"` - VLAN uint16 `json:"vlan,omitempty"` + MAC0 uint64 `json:"mac_0" category:"tag" sub:"data_link_layer" to_string:"MacString"` + MAC1 uint64 `json:"mac_1" category:"tag" to_string:"MacString"` + EthType uint16 `json:"eth_type" category:"tag" sub:"data_link_layer"` + VLAN uint16 `json:"vlan" category:"tag" sub:"data_link_layer"` } var DataLinkLayerColumns = []*ckdb.Column{ @@ -76,30 +76,39 @@ func (f *DataLinkLayer) WriteBlock(block *ckdb.Block) { f.VLAN) } +func DF_IPv4String(ip4 uint32) string { + ip := make(net.IP, 4) + ip[0] = byte(ip4 >> 24) + ip[1] = byte(ip4 >> 16) + ip[2] = byte(ip4 >> 8) + ip[3] = byte(ip4) + return ip.String() +} + type NetworkLayer struct { - IP40 uint32 `json:"ip4_0"` - IP41 uint32 `json:"ip4_1"` - IP60 net.IP `json:"ip6_0"` - IP61 net.IP `json:"ip6_1"` - IsIPv4 bool `json:"is_ipv4"` - Protocol uint8 `json:"protocol"` - TunnelTier uint8 `json:"tunnel_tier,omitempty"` - TunnelType uint16 `json:"tunnel_type,omitempty"` - TunnelTxID uint32 `json:"tunnel_tx_id,omitempty"` - TunnelRxID uint32 `json:"tunnel_rx_id,omitempty"` - TunnelTxIP40 uint32 `json:"tunnel_tx_ip4_0,omitempty"` - TunnelTxIP41 uint32 `json:"tunnel_tx_ip4_1,omitempty"` - TunnelRxIP40 uint32 `json:"tunnel_rx_ip4_0,omitempty"` - TunnelRxIP41 uint32 `json:"tunnel_rx_ip4_1,omitempty"` - TunnelTxIP60 net.IP `json:"tunnel_tx_ip6_0,omitempty"` - TunnelTxIP61 net.IP `json:"tunnel_tx_ip6_1,omitempty"` - TunnelRxIP60 net.IP `json:"tunnel_rx_ip6_0,omitempty"` - TunnelRxIP61 net.IP `json:"tunnel_rx_ip6_1,omitempty"` - TunnelIsIPv4 bool `json:"tunnel_is_ipv4"` - TunnelTxMac0 uint32 `json:"tunnel_tx_mac_0,omitempty"` - TunnelTxMac1 uint32 `json:"tunnel_tx_mac_1,omitempty"` - TunnelRxMac0 uint32 `json:"tunnel_rx_mac_0,omitempty"` - TunnelRxMac1 uint32 `json:"tunnel_rx_mac_1,omitempty"` + IP40 uint32 `json:"ip4_0" category:"tag" sub:"network_layer" to_string:"IPv4String"` + IP41 uint32 `json:"ip4_1" category:"tag" sub:"network_layer" to_string:"IPv4String"` + IP60 net.IP `json:"ip6_0" category:"tag" sub:"network_layer"` + IP61 net.IP `json:"ip6_1" category:"tag" sub:"network_layer"` + IsIPv4 bool `json:"is_ipv4" category:"tag" sub:"network_layer"` + Protocol uint8 `json:"protocol" category:"tag" sub:"network_layer" enumfile:"protocol" tranlate:"tunnel_tier"` + TunnelTier uint8 `json:"tunnel_tier" category:"tag" sub:"tunnel_info" tranlate:"tunnel_type"` + TunnelType uint16 `json:"tunnel_type" category:"tag" sub:"tunnel_info"` + TunnelTxID uint32 `json:"tunnel_tx_id" category:"tag" sub:"tunnel_info"` + TunnelRxID uint32 `json:"tunnel_rx_id" category:"tag" sub:"tunnel_info"` + TunnelTxIP40 uint32 `json:"tunnel_tx_ip4_0" category:"tag" sub:"tunnel_info" to_string:"IPv4String"` + TunnelTxIP41 uint32 `json:"tunnel_tx_ip4_1" category:"tag" sub:"tunnel_info" to_string:"IPv4String"` + TunnelRxIP40 uint32 `json:"tunnel_rx_ip4_0" category:"tag" sub:"tunnel_info" to_string:"IPv4String"` + TunnelRxIP41 uint32 `json:"tunnel_rx_ip4_1" category:"tag" sub:"tunnel_info" to_string:"IPv4String"` + TunnelTxIP60 net.IP `json:"tunnel_tx_ip6_0" category:"tag" sub:"tunnel_info" to_string:"IPv6String"` + TunnelTxIP61 net.IP `json:"tunnel_tx_ip6_1" category:"tag" sub:"tunnel_info" to_string:"IPv6String"` + TunnelRxIP60 net.IP `json:"tunnel_rx_ip6_0" category:"tag" sub:"tunnel_info" to_string:"IPv6String"` + TunnelRxIP61 net.IP `json:"tunnel_rx_ip6_1" category:"tag" sub:"tunnel_info" to_string:"IPv6String"` + TunnelIsIPv4 bool `json:"tunnel_is_ipv4" category:"tag" sub:"tunnel_info"` + TunnelTxMac0 uint32 `json:"tunnel_tx_mac_0" category:"tag" sub:"tunnel_info"` + TunnelTxMac1 uint32 `json:"tunnel_tx_mac_1" category:"tag" sub:"tunnel_info"` + TunnelRxMac0 uint32 `json:"tunnel_rx_mac_0" category:"tag" sub:"tunnel_info"` + TunnelRxMac1 uint32 `json:"tunnel_rx_mac_1" category:"tag" sub:"tunnel_info"` } var NetworkLayerColumns = []*ckdb.Column{ @@ -161,14 +170,14 @@ func (n *NetworkLayer) WriteBlock(block *ckdb.Block) { } type TransportLayer struct { - ClientPort uint16 `json:"client_port"` - ServerPort uint16 `json:"server_port"` - TCPFlagsBit0 uint16 `json:"tcp_flags_bit_0,omitempty"` - TCPFlagsBit1 uint16 `json:"tcp_flags_bit_1,omitempty"` - SynSeq uint32 `json:"syn_seq"` - SynAckSeq uint32 `json:"syn_ack_seq"` - LastKeepaliveSeq uint32 `json:"last_keepalive_seq"` - LastKeepaliveAck uint32 `json:"last_keepalive_ack"` + ClientPort uint16 `json:"client_port" category:"tag" sub:"transport_layer"` + ServerPort uint16 `json:"server_port" category:"tag" sub:"transport_layer"` + TCPFlagsBit0 uint16 `json:"tcp_flags_bit_0" category:"tag" sub:"transport_layer"` + TCPFlagsBit1 uint16 `json:"tcp_flags_bit_1" category:"tag" sub:"transport_layer"` + SynSeq uint32 `json:"syn_seq" category:"tag" sub:"transport_layer"` + SynAckSeq uint32 `json:"syn_ack_seq" category:"tag" sub:"transport_layer"` + LastKeepaliveSeq uint32 `json:"last_keepalive_seq" category:"tag" sub:"transport_layer"` + LastKeepaliveAck uint32 `json:"last_keepalive_ack" category:"tag" sub:"transport_layer"` } var TransportLayerColumns = []*ckdb.Column{ @@ -196,7 +205,7 @@ func (t *TransportLayer) WriteBlock(block *ckdb.Block) { } type ApplicationLayer struct { - L7Protocol uint8 `json:"l7_protocol,omitempty"` // HTTP, DNS, others + L7Protocol uint8 `json:"l7_protocol"` // HTTP, DNS, others } var ApplicationLayerColumns = []*ckdb.Column{ @@ -209,8 +218,8 @@ func (a *ApplicationLayer) WriteBlock(block *ckdb.Block) { } type Internet struct { - Province0 string `json:"province_0"` - Province1 string `json:"province_1"` + Province0 string `json:"province_0" category:"tag" sub:"network_layer"` + Province1 string `json:"province_1" category:"tag" sub:"network_layer"` } var InternetColumns = []*ckdb.Column{ @@ -224,46 +233,46 @@ func (i *Internet) WriteBlock(block *ckdb.Block) { } type KnowledgeGraph struct { - RegionID0 uint16 `json:"region_id_0"` - RegionID1 uint16 `json:"region_id_1"` - AZID0 uint16 `json:"az_id_0"` - AZID1 uint16 `json:"az_id_1"` - HostID0 uint16 `json:"host_id_0"` - HostID1 uint16 `json:"host_id_1"` - L3DeviceType0 uint8 `json:"l3_device_type_0"` - L3DeviceType1 uint8 `json:"l3_device_type_1"` - L3DeviceID0 uint32 `json:"l3_device_id_0"` - L3DeviceID1 uint32 `json:"l3_device_id_1"` - PodNodeID0 uint32 `json:"pod_node_id_0"` - PodNodeID1 uint32 `json:"pod_node_id_1"` - PodNSID0 uint16 `json:"pod_ns_id_0"` - PodNSID1 uint16 `json:"pod_ns_id_1"` - PodGroupID0 uint32 `json:"pod_group_id_0"` - PodGroupID1 uint32 `json:"pod_group_id_1"` - PodGroupType0 uint8 `json:"pod_group_type_0"` // no need to store - PodGroupType1 uint8 `json:"pod_group_type_1"` // no need to store - PodID0 uint32 `json:"pod_id_0"` - PodID1 uint32 `json:"pod_id_1"` - PodClusterID0 uint16 `json:"pod_cluster_id_0"` - PodClusterID1 uint16 `json:"pod_cluster_id_1"` - L3EpcID0 int32 `json:"l3_epc_id_0"` - L3EpcID1 int32 `json:"l3_epc_id_1"` - EpcID0 int32 `json:"epc_id_0"` - EpcID1 int32 `json:"epc_id_1"` - SubnetID0 uint16 `json:"subnet_id_0"` - SubnetID1 uint16 `json:"subnet_id_1"` - ServiceID0 uint32 `json:"service_id_0"` - ServiceID1 uint32 `json:"service_id_1"` - - AutoInstanceID0 uint32 - AutoInstanceType0 uint8 - AutoServiceID0 uint32 - AutoServiceType0 uint8 - - AutoInstanceID1 uint32 - AutoInstanceType1 uint8 - AutoServiceID1 uint32 - AutoServiceType1 uint8 + RegionID0 uint16 `json:"region_id_0" category:"tag" sub:"universal_tag"` + RegionID1 uint16 `json:"region_id_1" category:"tag" sub:"universal_tag"` + AZID0 uint16 `json:"az_id_0" category:"tag" sub:"universal_tag"` + AZID1 uint16 `json:"az_id_1" category:"tag" sub:"universal_tag"` + HostID0 uint16 `json:"host_id_0" category:"tag" sub:"universal_tag"` + HostID1 uint16 `json:"host_id_1" category:"tag" sub:"universal_tag"` + L3DeviceType0 uint8 `json:"l3_device_type_0" category:"tag" sub:"universal_tag"` + L3DeviceType1 uint8 `json:"l3_device_type_1" category:"tag" sub:"universal_tag"` + L3DeviceID0 uint32 `json:"l3_device_id_0" category:"tag" sub:"universal_tag"` + L3DeviceID1 uint32 `json:"l3_device_id_1" category:"tag" sub:"universal_tag"` + PodNodeID0 uint32 `json:"pod_node_id_0" category:"tag" sub:"universal_tag"` + PodNodeID1 uint32 `json:"pod_node_id_1" category:"tag" sub:"universal_tag"` + PodNSID0 uint16 `json:"pod_ns_id_0" category:"tag" sub:"universal_tag"` + PodNSID1 uint16 `json:"pod_ns_id_1" category:"tag" sub:"universal_tag"` + PodGroupID0 uint32 `json:"pod_group_id_0" category:"tag" sub:"universal_tag"` + PodGroupID1 uint32 `json:"pod_group_id_1" category:"tag" sub:"universal_tag"` + PodGroupType0 uint8 `json:"pod_group_type_0" category:"tag" sub:"universal_tag" enumfile:"pod_group_type"` // no need to store + PodGroupType1 uint8 `json:"pod_group_type_1" category:"tag" sub:"universal_tag" enumfile:"pod_group_type"` // no need to store + PodID0 uint32 `json:"pod_id_0" category:"tag" sub:"universal_tag"` + PodID1 uint32 `json:"pod_id_1" category:"tag" sub:"universal_tag"` + PodClusterID0 uint16 `json:"pod_cluster_id_0" category:"tag" sub:"universal_tag"` + PodClusterID1 uint16 `json:"pod_cluster_id_1" category:"tag" sub:"universal_tag"` + L3EpcID0 int32 `json:"l3_epc_id_0" category:"tag" sub:"universal_tag"` + L3EpcID1 int32 `json:"l3_epc_id_1" category:"tag" sub:"universal_tag"` + EpcID0 int32 `json:"epc_id_0" category:"tag" sub:"universal_tag"` + EpcID1 int32 `json:"epc_id_1" category:"tag" sub:"universal_tag"` + SubnetID0 uint16 `json:"subnet_id_0" category:"tag" sub:"universal_tag"` + SubnetID1 uint16 `json:"subnet_id_1" category:"tag" sub:"universal_tag"` + ServiceID0 uint32 `json:"service_id_0" category:"tag" sub:"universal_tag"` + ServiceID1 uint32 `json:"service_id_1" category:"tag" sub:"universal_tag"` + + AutoInstanceID0 uint32 `json:"auto_instance_id_0" category:"tag" sub:"universal_tag"` + AutoInstanceType0 uint8 `json:"auto_instance_type_0" category:"tag" sub:"universal_tag" enumfile:"auto_instance_type"` + AutoServiceID0 uint32 `json:"auto_service_id_0" category:"tag" sub:"universal_tag"` + AutoServiceType0 uint8 `json:"auto_service_type_0" category:"tag" sub:"universal_tag" enumfile:"auto_service_type"` + + AutoInstanceID1 uint32 `json:"auto_instance_id_1" category:"tag" sub:"universal_tag"` + AutoInstanceType1 uint8 `json:"auto_instance_type_1" category:"tag" sub:"universal_tag" enumfile:"auto_instance_type"` + AutoServiceID1 uint32 `json:"auto_service_id_1" category:"tag" sub:"universal_tag"` + AutoServiceType1 uint8 `json:"auto_service_type_1" category:"tag" sub:"universal_tag" enumfile:"auto_service_type"` TagSource0 uint8 TagSource1 uint8 @@ -367,35 +376,36 @@ func (k *KnowledgeGraph) WriteBlock(block *ckdb.Block) { } type FlowInfo struct { - CloseType uint16 `json:"close_type"` - SignalSource uint16 `json:"signal_source"` - FlowID uint64 `json:"flow_id"` - TapType uint8 `json:"capture_network_type_id"` - NatSource uint8 `json:"nat_source"` - TapPortType uint8 `json:"capture_nic_type"` // 0: MAC, 1: IPv4, 2:IPv6, 3: ID - TapPort uint32 `json:"capture_nic"` - TapSide string `json:"observation_point"` - VtapID uint16 `json:"agent_id"` - L2End0 bool `json:"l2_end_0"` - L2End1 bool `json:"l2_end_1"` - L3End0 bool `json:"l3_end_0"` - L3End1 bool `json:"l3_end_1"` - StartTime int64 `json:"start_time"` // us - EndTime int64 `json:"end_time"` // us - Duration uint64 `json:"duration"` // us - IsNewFlow uint8 `json:"is_new_flow"` - Status uint8 `json:"status"` - AclGids []uint16 - GPID0 uint32 - GPID1 uint32 - - NatRealIP0 uint32 - NatRealIP1 uint32 - NatRealPort0 uint16 - NatRealPort1 uint16 - - DirectionScore uint8 - RequestDomain string + Time uint32 `json:"time" category:"tag" sub:"flow_info"` // s + CloseType uint16 `json:"close_type" category:"tag" sub:"flow_info" enumfile:"close_type"` + SignalSource uint16 `json:"signal_source" category:"tag" sub:"capture_info" enumfile:"l4_signal_source"` + FlowID uint64 `json:"flow_id" category:"tag" sub:"flow_info"` + TapType uint8 `json:"capture_network_type_id" category:"tag" sub:"capture_info"` + NatSource uint8 `json:"nat_source" category:"tag" sub:"capture_info" enumfile:"nat_source"` + TapPortType uint8 `json:"capture_nic_type" category:"tag" sub:"capture_info" enumfile:"capture_nic_type"` // 0: MAC, 1: IPv4, 2:IPv6, 3: ID + TapPort uint32 `json:"capture_nic" category:"tag" sub:"capture_info"` + TapSide string `json:"observation_point" category:"tag" sub:"capture_info" enumfile:"observation_point"` + VtapID uint16 `json:"agent_id" category:"tag" sub:"capture_info"` + L2End0 bool `json:"l2_end_0" category:"tag" sub:"capture_info"` + L2End1 bool `json:"l2_end_1" category:"tag" sub:"capture_info"` + L3End0 bool `json:"l3_end_0" category:"tag" sub:"capture_info"` + L3End1 bool `json:"l3_end_1" category:"tag" sub:"capture_info"` + StartTime int64 `json:"start_time" category:"tag" sub:"flow_info"` // us + EndTime int64 `json:"end_time" category:"tag" sub:"flow_info"` // us + Duration uint64 `json:"duration" category:"metrics" sub:"delay"` // us + IsNewFlow uint8 `json:"is_new_flow" category:"tag" sub:"flow_info"` + Status uint8 `json:"status" category:"tag" sub:"flow_info" enumfile:"status"` + AclGids []uint16 `json:"acl_gids" category:"tag" sub:"flow_info"` + GPID0 uint32 `json:"gprocess_id_0" category:"tag" sub:"universal_tag"` + GPID1 uint32 `json:"gprocess_id_1" category:"tag" sub:"universal_tag"` + + NatRealIP0 uint32 `json:"nat_real_ip_0" category:"tag" sub:"capture_info" to_string:"IPv4String"` + NatRealIP1 uint32 `json:"nat_real_ip_1" category:"tag" sub:"capture_info" to_string:"IPv4String"` + NatRealPort0 uint16 `json:"nat_real_port_0" category:"tag" sub:"capture_info"` + NatRealPort1 uint16 `json:"nat_real_port_1" category:"tag" sub:"capture_info"` + + DirectionScore uint8 `json:"direction_score" category:"metrics" sub:"l4_throughput"` + RequestDomain string `json:"request_domain" category:"tag" sub:"application_layer"` } var FlowInfoColumns = []*ckdb.Column{ @@ -431,7 +441,7 @@ var FlowInfoColumns = []*ckdb.Column{ } func (f *FlowInfo) WriteBlock(block *ckdb.Block) { - block.WriteDateTime(uint32(f.EndTime / US_TO_S_DEVISOR)) + block.WriteDateTime(f.Time) block.Write( f.CloseType, f.SignalSource, @@ -462,54 +472,54 @@ func (f *FlowInfo) WriteBlock(block *ckdb.Block) { } type Metrics struct { - PacketTx uint64 `json:"packet_tx,omitempty"` - PacketRx uint64 `json:"packet_rx,omitempty"` - ByteTx uint64 `json:"byte_tx,omitempty"` - ByteRx uint64 `json:"byte_rx,omitempty"` - L3ByteTx uint64 `json:"l3_byte_tx,omitempty"` - L3ByteRx uint64 `json:"l3_byte_rx,omitempty"` - L4ByteTx uint64 `json:"l4_byte_tx,omitempty"` - L4ByteRx uint64 `json:"l4_byte_rx,omitempty"` - TotalPacketTx uint64 `json:"total_packet_tx,omitempty"` - TotalPacketRx uint64 `json:"total_packet_rx,omitempty"` - TotalByteTx uint64 `json:"total_byte_tx,omitempty"` - TotalByteRx uint64 `json:"total_byte_rx,omitempty"` - L7Request uint32 `json:"l7_request,omitempty"` - L7Response uint32 `json:"l7_response,omitempty"` - L7ParseFailed uint32 `json:"l7_parse_failed,omitempty"` - - RTT uint32 `json:"rtt,omitempty"` // us - RTTClient uint32 `json:"rtt_client,omitempty"` // us - RTTServer uint32 `json:"rtt_server,omitempty"` // us - TLSRTT uint32 `json:"tls_rtt_sum,omitempty"` // us - - SRTSum uint32 `json:"srt_sum,omitempty"` - ARTSum uint32 `json:"art_sum,omitempty"` - RRTSum uint64 `json:"rrt_sum,omitempty"` - CITSum uint32 `json:"cit_sum,omitempty"` - - SRTCount uint32 `json:"srt_count,omitempty"` - ARTCount uint32 `json:"art_count,omitempty"` - RRTCount uint32 `json:"rrt_count,omitempty"` - CITCount uint32 `json:"cit_count,omitempty"` - - SRTMax uint32 `json:"srt_max,omitempty"` // us - ARTMax uint32 `json:"art_max,omitempty"` // us - RRTMax uint32 `json:"rrt_max,omitempty"` // us - CITMax uint32 `json:"cit_max,omitempty"` // us - - RetransTx uint32 `json:"retrans_tx,omitempty"` - RetransRx uint32 `json:"retrans_rx,omitempty"` - ZeroWinTx uint32 `json:"zero_win_tx,omitempty"` - ZeroWinRx uint32 `json:"zero_win_rx,omitempty"` - SynCount uint32 `json:"syn_count,omitempty"` - SynackCount uint32 `json:"synack_count,omitempty"` - RetransSyn uint32 `json:"retrans_syn,omitempty"` - RetransSynack uint32 `json:"retrans_synack,omitempty"` - L7ClientError uint32 `json:"l7_client_error,omitempty"` - L7ServerError uint32 `json:"l7_server_error,omitempty"` - L7ServerTimeout uint32 `json:"l7_server_timeout,omitempty"` - L7Error uint32 `json:"l7_error,omitempty"` + PacketTx uint64 `json:"packet_tx" category:"metrics" sub:"l3_throughput"` + PacketRx uint64 `json:"packet_rx" category:"metrics" sub:"l3_throughput"` + ByteTx uint64 `json:"byte_tx" category:"metrics" sub:"l3_throughput"` + ByteRx uint64 `json:"byte_rx" category:"metrics" sub:"l3_throughput"` + L3ByteTx uint64 `json:"l3_byte_tx" category:"metrics" sub:"l3_throughput"` + L3ByteRx uint64 `json:"l3_byte_rx" category:"metrics" sub:"l3_throughput"` + L4ByteTx uint64 `json:"l4_byte_tx" category:"metrics" sub:"l4_throughput"` + L4ByteRx uint64 `json:"l4_byte_rx" category:"metrics" sub:"l4_throughput"` + TotalPacketTx uint64 `json:"total_packet_tx" category:"metrics" sub:"l3_throughput"` + TotalPacketRx uint64 `json:"total_packet_rx" category:"metrics" sub:"l3_throughput"` + TotalByteTx uint64 `json:"total_byte_tx" category:"metrics" sub:"l3_throughput"` + TotalByteRx uint64 `json:"total_byte_rx" category:"metrics" sub:"l3_throughput"` + L7Request uint32 `json:"l7_request" category:"metrics" sub:"application"` + L7Response uint32 `json:"l7_response" category:"metrics" sub:"application"` + L7ParseFailed uint32 `json:"l7_parse_failed" category:"metrics" sub:"application"` + + RTT uint32 `json:"rtt" category:"metrics" sub:"delay"` // us + RTTClient uint32 `json:"rtt_client" category:"metrics" sub:"delay"` // us + RTTServer uint32 `json:"rtt_server" category:"metrics" sub:"delay"` // us + TLSRTT uint32 `json:"tls_rtt_sum" category:"metrics" sub:"delay"` // us + + SRTSum uint32 `json:"srt_sum" category:"metrics" sub:"delay"` + ARTSum uint32 `json:"art_sum" category:"metrics" sub:"delay"` + RRTSum uint64 `json:"rrt_sum" category:"metrics" sub:"delay"` + CITSum uint32 `json:"cit_sum" category:"metrics" sub:"delay"` + + SRTCount uint32 `json:"srt_count" category:"metrics" sub:"delay"` + ARTCount uint32 `json:"art_count" category:"metrics" sub:"delay"` + RRTCount uint32 `json:"rrt_count" category:"metrics" sub:"delay"` + CITCount uint32 `json:"cit_count" category:"metrics" sub:"delay"` + + SRTMax uint32 `json:"srt_max" category:"metrics" sub:"delay"` // us + ARTMax uint32 `json:"art_max" category:"metrics" sub:"delay"` // us + RRTMax uint32 `json:"rrt_max" category:"metrics" sub:"delay"` // us + CITMax uint32 `json:"cit_max" category:"metrics" sub:"delay"` // us + + RetransTx uint32 `json:"retrans_tx" category:"metrics" sub:"tcp_slow"` + RetransRx uint32 `json:"retrans_rx" category:"metrics" sub:"tcp_slow"` + ZeroWinTx uint32 `json:"zero_win_tx" category:"metrics" sub:"tcp_slow"` + ZeroWinRx uint32 `json:"zero_win_rx" category:"metrics" sub:"tcp_slow"` + SynCount uint32 `json:"syn_count" category:"metrics" sub:"l4_throughput"` + SynackCount uint32 `json:"synack_count" category:"metrics" sub:"l4_throughput"` + RetransSyn uint32 `json:"retrans_syn" category:"metrics" sub:"tcp_slow"` + RetransSynack uint32 `json:"retrans_synack" category:"metrics" sub:"tcp_slow"` + L7ClientError uint32 `json:"l7_client_error" category:"metrics" sub:"application"` + L7ServerError uint32 `json:"l7_server_error" category:"metrics" sub:"application"` + L7ServerTimeout uint32 `json:"l7_server_timeout" category:"metrics" sub:"application"` + L7Error uint32 `json:"l7_error" category:"metrics" sub:"application"` } var MetricsColumns = []*ckdb.Column{ @@ -897,6 +907,7 @@ func (i *FlowInfo) Fill(f *pb.Flow) { i.StartTime = int64(f.StartTime) / int64(time.Microsecond) i.EndTime = int64(f.EndTime) / int64(time.Microsecond) + i.Time = uint32(f.EndTime / uint64(time.Second)) i.Duration = f.Duration / uint64(time.Microsecond) i.IsNewFlow = uint8(f.IsNewFlow) i.Status = uint8(getStatus(datatype.CloseType(i.CloseType), layers.IPProtocol(f.FlowKey.Proto))) diff --git a/server/ingester/flow_log/log_data/l7_flow_log.go b/server/ingester/flow_log/log_data/l7_flow_log.go index dff1ac4770ef..6d7f02101f0e 100644 --- a/server/ingester/flow_log/log_data/l7_flow_log.go +++ b/server/ingester/flow_log/log_data/l7_flow_log.go @@ -45,48 +45,49 @@ type L7Base struct { // 知识图谱 KnowledgeGraph + Time uint32 `json:"time" category:"tag" sub:"flow_info"` // s // 网络层 - IP40 uint32 `json:"ip4_0"` - IP41 uint32 `json:"ip4_1"` - IP60 net.IP `json:"ip6_0"` - IP61 net.IP `json:"ip6_1"` - IsIPv4 bool `json:"is_ipv4"` - Protocol uint8 + IP40 uint32 `json:"ip4_0" category:"tag" sub:"network_layer" to_string:"IPv4String"` + IP41 uint32 `json:"ip4_1" category:"tag" sub:"network_layer" to_string:"IPv4String"` + IP60 net.IP `json:"ip6_0" category:"tag" sub:"network_layer" to_string:"IPv6String"` + IP61 net.IP `json:"ip6_1" category:"tag" sub:"network_layer" to_string:"IPv6String"` + IsIPv4 bool `json:"is_ipv4" category:"tag" sub:"network_layer"` + Protocol uint8 `json:"protocol" category:"tag" sub:"network_layer" enumfile:"l7_ip_protocol"` // 传输层 - ClientPort uint16 `json:"client_port"` - ServerPort uint16 `json:"server_port"` + ClientPort uint16 `json:"client_port" category:"tag" sub:"transport_layer" ` + ServerPort uint16 `json:"server_port" category:"tag" sub:"transport_layer"` // 流信息 - FlowID uint64 `json:"flow_id"` - TapType uint8 `json:"capture_network_type_id"` - NatSource uint8 `json:"nat_source"` - TapPortType uint8 `json:"capture_nic_type"` - SignalSource uint16 `json:"signal_source"` - TunnelType uint8 `json:"tunnel_type"` - TapPort uint32 `json:"capture_nic"` - TapSide string `json:"observation_point"` - VtapID uint16 `json:"agent_id"` - ReqTcpSeq uint32 `json:"req_tcp_seq"` - RespTcpSeq uint32 `json:"resp_tcp_seq"` - StartTime int64 `json:"start_time"` // us - EndTime int64 `json:"end_time"` // us - GPID0 uint32 - GPID1 uint32 - BizType uint8 - - ProcessID0 uint32 - ProcessID1 uint32 - ProcessKName0 string - ProcessKName1 string - SyscallTraceIDRequest uint64 - SyscallTraceIDResponse uint64 - SyscallThread0 uint32 - SyscallThread1 uint32 - SyscallCoroutine0 uint64 - SyscallCoroutine1 uint64 - SyscallCapSeq0 uint32 - SyscallCapSeq1 uint32 + FlowID uint64 `json:"flow_id" category:"tag" sub:"flow_info"` + TapType uint8 `json:"capture_network_type" category:"tag" sub:"flow_info"` + NatSource uint8 `json:"nat_source" category:"tag" sub:"flow_info" enumfile:"nat_source"` + TapPortType uint8 `json:"capture_nic_type category:"tag" sub:"flow_info"` + SignalSource uint16 `json:"signal_source" category:"tag" sub:"capture_info" enumfile:"l7_signal_source"` + TunnelType uint8 `json:"tunnel_type" category:"tag" sub:"flow_info"` + TapPort uint32 `json:"capture_nic" category:"tag" sub:"capture_info"` + TapSide string `json:"observation_point" category:"tag" sub:"capture_info"` + VtapID uint16 `json:"agent_id" category:"tag" sub:"capture_info"` + ReqTcpSeq uint32 `json:"req_tcp_seq" category:"tag" sub:"transport_layer"` + RespTcpSeq uint32 `json:"resp_tcp_seq" category:"tag" sub:"transport_layer"` + StartTime int64 `json:"start_time" category:"tag" sub:"flow_info"` // us + EndTime int64 `json:"end_time" category:"tag" sub:"flow_info"` // us + GPID0 uint32 `json:"gprocess_id_0" category:"tag" sub:"universal_tag"` + GPID1 uint32 `json:"gprocess_id_1" category:"tag" sub:"universal_tag"` + BizType uint8 `json:"biz_type" category:"tag" sub:"capture_info"` + + ProcessID0 uint32 `json:"process_id_0" category:"tag" sub:"tracing_info"` + ProcessID1 uint32 `json:"process_id_1" category:"tag" sub:"tracing_info"` + ProcessKName0 string `json:"process_kname_0" category:"tag" sub:"tracing_info"` + ProcessKName1 string `json:"process_kname_1" category:"tag" sub:"tracing_info"` + SyscallTraceIDRequest uint64 `json:"syscall_trace_id_request" category:"tag" sub:"tracing_info"` + SyscallTraceIDResponse uint64 `json:"syscall_trace_id_response" category:"tag" sub:"tracing_info"` + SyscallThread0 uint32 `json:"syscall_thread_0" category:"tag" sub:"tracing_info"` + SyscallThread1 uint32 `json:"syscall_thread_1" category:"tag" sub:"tracing_info"` + SyscallCoroutine0 uint64 `json:"syscall_coroutine_0" category:"tag" sub:"tracing_info"` + SyscallCoroutine1 uint64 `json:"syscall_coroutine_1" category:"tag" sub:"tracing_info"` + SyscallCapSeq0 uint32 `json:"syscall_cap_seq_0" category:"tag" sub:"tracing_info"` + SyscallCapSeq1 uint32 `json:"syscall_cap_seq_1" category:"tag" sub:"tracing_info"` } func L7BaseColumns() []*ckdb.Column { @@ -145,7 +146,7 @@ func L7BaseColumns() []*ckdb.Column { func (f *L7Base) WriteBlock(block *ckdb.Block) { f.KnowledgeGraph.WriteBlock(block) - block.WriteDateTime(uint32(f.EndTime / US_TO_S_DEVISOR)) + block.WriteDateTime(f.Time) block.WriteIPv4(f.IP40) block.WriteIPv4(f.IP41) block.WriteIPv6(f.IP60) @@ -189,57 +190,57 @@ func (f *L7Base) WriteBlock(block *ckdb.Block) { type L7FlowLog struct { pool.ReferenceCount - _id uint64 + _id uint64 `json:"_id" category:"tag" sub:"flow_info"` L7Base - L7Protocol uint8 - L7ProtocolStr string - Version string - Type uint8 - IsTLS uint8 + L7Protocol uint8 `json:"l7_protocol" category:"tag" sub:"application_layer" enumfile:"l7_protocol"` + L7ProtocolStr string `json:"l7_protocol_str" category:"tag" sub:"application_layer"` + Version string `json:"version" category:"tag" sub:"application_layer"` + Type uint8 `json:"type" category:"tag" sub:"application_layer" enumfile:"l7_log_type"` + IsTLS uint8 `json:"is_tls" category:"tag" sub:"application_layer"` - RequestType string - RequestDomain string - RequestResource string - Endpoint string + RequestType string `json:"request_type" category:"tag" sub:"application_layer"` + RequestDomain string `json:"request_domain" category:"tag" sub:"application_layer"` + RequestResource string `json:"request_resource" category:"tag" sub:"application_layer"` + Endpoint string `json:"end_point" category:"tag" sub:"service_info"` // 数据库nullabled类型的字段, 需使用指针传值写入。如果值无意义,应传递nil. - RequestId *uint64 + RequestId *uint64 `json:"request_id" category:"tag" sub:"application_layer"` requestId uint64 - ResponseStatus uint8 - ResponseCode *int32 + ResponseStatus uint8 `json:"response_status" category:"tag" sub:"application_layer" enumfile:"response_status"` + ResponseCode *int32 `json:"response_code" category:"tag" sub:"application_layer"` responseCode int32 - ResponseException string - ResponseResult string + ResponseException string `json:"response_exception" category:"tag" sub:"application_layer"` + ResponseResult string `json:"response_result" category:"tag" sub:"application_layer"` - HttpProxyClient string - XRequestId0 string - XRequestId1 string - TraceId string + HttpProxyClient string `json:"http_proxy_client" category:"tag" sub:"tracing_info"` + XRequestId0 string `json:"x_request_id_0" category:"tag" sub:"tracing_info"` + XRequestId1 string `json:"x_request_id_1" category:"tag" sub:"tracing_info"` + TraceId string `json:"trace_id" category:"tag" sub:"tracing_info"` TraceIdIndex uint64 - SpanId string - ParentSpanId string + SpanId string `json:"span_id" category:"tag" sub:"tracing_info"` + ParentSpanId string `json:"parent_span_id" category:"tag" sub:"tracing_info"` SpanKind uint8 - spanKind *uint8 - AppService string - AppInstance string + spanKind *uint8 `json:"span_kind" category:"tag" sub:"tracing_info" enumfile:"span_kind"` + AppService string `json:"app_service" category:"tag" sub:"service_info"` + AppInstance string `json:"app_instance" category:"tag" sub:"service_info"` - ResponseDuration uint64 - RequestLength *int64 + ResponseDuration uint64 `json:"response_duration" category:"metrics" sub:"delay"` + RequestLength *int64 `json:"request_length" category:"metrics" sub:"throughput"` requestLength int64 - ResponseLength *int64 + ResponseLength *int64 `json:"response_length" category:"metrics" sub:"throughput"` responseLength int64 - SqlAffectedRows *uint64 + SqlAffectedRows *uint64 `json:"sql_affected_rows" category:"metrics" sub:"throughput"` sqlAffectedRows uint64 - DirectionScore uint8 + DirectionScore uint8 `json:"direction_score" category:"metrics" sub:"throughput"` - AttributeNames []string - AttributeValues []string + AttributeNames []string `json:"attribute_names" category:"tag" sub:"native_tag"` + AttributeValues []string `json:"attribute_values" category:"tag" sub:"native_tag"` - MetricsNames []string - MetricsValues []float64 + MetricsNames []string `json:"metrics_names" category:"tag" sub:"application_layer"` + MetricsValues []float64 `json:"metrics_values" category:"tag" sub:"application_layer"` Events string } @@ -569,6 +570,7 @@ func (b *L7Base) Fill(log *pb.AppProtoLogsData, platformData *grpc.PlatformInfoT b.RespTcpSeq = l.RespTcpSeq b.StartTime = int64(l.StartTime) / int64(time.Microsecond) b.EndTime = int64(l.EndTime) / int64(time.Microsecond) + b.Time = uint32(l.EndTime / uint64(time.Second)) b.GPID0 = l.Gpid_0 b.GPID1 = l.Gpid_1 b.BizType = uint8(l.BizType) diff --git a/server/ingester/flow_log/log_data/otel.go b/server/ingester/flow_log/log_data/otel_import.go similarity index 99% rename from server/ingester/flow_log/log_data/otel.go rename to server/ingester/flow_log/log_data/otel_import.go index 14a154f188f2..4183b78b2f67 100644 --- a/server/ingester/flow_log/log_data/otel.go +++ b/server/ingester/flow_log/log_data/otel_import.go @@ -367,7 +367,7 @@ func (k *KnowledgeGraph) FillOTel(l *L7FlowLog, platformData *grpc.PlatformInfoT // fill Epc0 with the Epc the Vtap belongs to k.L3EpcID0 = platformData.QueryVtapEpc0(l.VtapID) // fill in Epc1 with other rules, see function description for details - k.L3EpcID1 = platformData.QueryVtapEpc1(l.VtapID, l.IsIPv4, l.IP41, l.IP61) + k.L3EpcID1 = platformData.QueryVtapEpc1(l.VtapID, l.IsIPv4, uint32(l.IP41), l.IP61) case "s-app": // fill Epc1 with the Epc the Vtap belongs to k.L3EpcID1 = platformData.QueryVtapEpc0(l.VtapID) diff --git a/server/ingester/flow_log/exporters/otlp_exporter/otlp.go b/server/ingester/flow_log/log_data/otlp_export.go similarity index 80% rename from server/ingester/flow_log/exporters/otlp_exporter/otlp.go rename to server/ingester/flow_log/log_data/otlp_export.go index 46a50f528b2f..6798023b6efa 100644 --- a/server/ingester/flow_log/exporters/otlp_exporter/otlp.go +++ b/server/ingester/flow_log/log_data/otlp_export.go @@ -14,7 +14,7 @@ * limitations under the License. */ -package otlp_exporter +package log_data import ( crand "crypto/rand" @@ -32,9 +32,8 @@ import ( "github.com/google/gopacket/layers" "github.com/deepflowio/deepflow/server/ingester/common" - "github.com/deepflowio/deepflow/server/ingester/flow_log/exporters/config" - utag "github.com/deepflowio/deepflow/server/ingester/flow_log/exporters/universal_tag" - "github.com/deepflowio/deepflow/server/ingester/flow_log/log_data" + "github.com/deepflowio/deepflow/server/ingester/exporters/config" + utag "github.com/deepflowio/deepflow/server/ingester/exporters/universal_tag" "github.com/deepflowio/deepflow/server/libs/datatype" "github.com/deepflowio/deepflow/server/libs/utils" ) @@ -51,56 +50,57 @@ func putIntWithoutZero(attrs pcommon.Map, key string, value int64) { } } -func putUniversalTags(attrs pcommon.Map, tags0, tags1 *utag.UniversalTags, dataTypeBits uint32) { - if dataTypeBits&config.CLIENT_UNIVERSAL_TAG != 0 { - putStrWithoutEmpty(attrs, "df.universal_tag.region_0", tags0.Region) - putStrWithoutEmpty(attrs, "df.universal_tag.az_0", tags0.AZ) - putStrWithoutEmpty(attrs, "df.universal_tag.host_0", tags0.Host) - putStrWithoutEmpty(attrs, "df.universal_tag.vpc_0", tags0.L3Epc) - putStrWithoutEmpty(attrs, "df.universal_tag.subnet_0", tags0.Subnet) - putStrWithoutEmpty(attrs, "df.universal_tag.pod_cluster_0", tags0.PodCluster) - putStrWithoutEmpty(attrs, "df.universal_tag.pod_ns_0", tags0.PodNS) - putStrWithoutEmpty(attrs, "df.universal_tag.pod_node_0", tags0.PodNode) - putStrWithoutEmpty(attrs, "df.universal_tag.pod_group_0", tags0.PodGroup) - putStrWithoutEmpty(attrs, "df.universal_tag.pod_0", tags0.Pod) - putStrWithoutEmpty(attrs, "df.universal_tag.service_0", tags0.Service) - putStrWithoutEmpty(attrs, "df.universal_tag.chost_0", tags0.CHost) - putStrWithoutEmpty(attrs, "df.universal_tag.router_0", tags0.Router) - putStrWithoutEmpty(attrs, "df.universal_tag.dhcpgw_0", tags0.DhcpGW) - putStrWithoutEmpty(attrs, "df.universal_tag.pod_service_0", tags0.PodService) - putStrWithoutEmpty(attrs, "df.universal_tag.redis_0", tags0.Redis) - putStrWithoutEmpty(attrs, "df.universal_tag.rds_0", tags0.RDS) - putStrWithoutEmpty(attrs, "df.universal_tag.lb_0", tags0.LB) - putStrWithoutEmpty(attrs, "df.universal_tag.natgw_0", tags0.NatGW) - putStrWithoutEmpty(attrs, "df.universal_tag.auto_instance_type_0", tags0.AutoInstanceType) - putStrWithoutEmpty(attrs, "df.universal_tag.auto_instance_0", tags0.AutoInstance) - putStrWithoutEmpty(attrs, "df.universal_tag.auto_service_type_0", tags0.AutoServiceType) - putStrWithoutEmpty(attrs, "df.universal_tag.auto_service_0", tags0.AutoService) - } - if dataTypeBits&config.SERVER_UNIVERSAL_TAG != 0 { - putStrWithoutEmpty(attrs, "df.universal_tag.region_1", tags1.Region) - putStrWithoutEmpty(attrs, "df.universal_tag.az_1", tags1.AZ) - putStrWithoutEmpty(attrs, "df.universal_tag.host_1", tags1.Host) - putStrWithoutEmpty(attrs, "df.universal_tag.vpc_1", tags1.L3Epc) - putStrWithoutEmpty(attrs, "df.universal_tag.subnet_1", tags1.Subnet) - putStrWithoutEmpty(attrs, "df.universal_tag.pod_cluster_1", tags1.PodCluster) - putStrWithoutEmpty(attrs, "df.universal_tag.pod_ns_1", tags1.PodNS) - putStrWithoutEmpty(attrs, "df.universal_tag.pod_node_1", tags1.PodNode) - putStrWithoutEmpty(attrs, "df.universal_tag.pod_group_1", tags1.PodGroup) - putStrWithoutEmpty(attrs, "df.universal_tag.pod_1", tags1.Pod) - putStrWithoutEmpty(attrs, "df.universal_tag.service_1", tags1.Service) - putStrWithoutEmpty(attrs, "df.universal_tag.chost_1", tags1.CHost) - putStrWithoutEmpty(attrs, "df.universal_tag.router_1", tags1.Router) - putStrWithoutEmpty(attrs, "df.universal_tag.dhcpgw_1", tags1.DhcpGW) - putStrWithoutEmpty(attrs, "df.universal_tag.pod_service_1", tags1.PodService) - putStrWithoutEmpty(attrs, "df.universal_tag.redis_1", tags1.Redis) - putStrWithoutEmpty(attrs, "df.universal_tag.rds_1", tags1.RDS) - putStrWithoutEmpty(attrs, "df.universal_tag.lb_1", tags1.LB) - putStrWithoutEmpty(attrs, "df.universal_tag.natgw_1", tags1.NatGW) - putStrWithoutEmpty(attrs, "df.universal_tag.auto_instance_type_1", tags1.AutoInstanceType) - putStrWithoutEmpty(attrs, "df.universal_tag.auto_instance_1", tags1.AutoInstance) - putStrWithoutEmpty(attrs, "df.universal_tag.auto_service_type_1", tags1.AutoServiceType) - putStrWithoutEmpty(attrs, "df.universal_tag.auto_service_1", tags1.AutoService) +func putUniversalTags(attrs pcommon.Map, tags0, tags1 *utag.UniversalTags, dataTypeBits uint64) { + if dataTypeBits&config.UNIVERSAL_TAG != 0 { + putStrWithoutEmpty(attrs, "df.universal_tag.region_0", tags0[utag.Region]) + putStrWithoutEmpty(attrs, "df.universal_tag.az_0", tags0[utag.AZ]) + putStrWithoutEmpty(attrs, "df.universal_tag.host_0", tags0[utag.Host]) + putStrWithoutEmpty(attrs, "df.universal_tag.vpc_0", tags0[utag.L3Epc]) + putStrWithoutEmpty(attrs, "df.universal_tag.subnet_0", tags0[utag.Subnet]) + putStrWithoutEmpty(attrs, "df.universal_tag.pod_cluster_0", tags0[utag.PodCluster]) + putStrWithoutEmpty(attrs, "df.universal_tag.pod_ns_0", tags0[utag.PodNS]) + putStrWithoutEmpty(attrs, "df.universal_tag.pod_node_0", tags0[utag.PodNode]) + putStrWithoutEmpty(attrs, "df.universal_tag.pod_group_0", tags0[utag.PodGroup]) + putStrWithoutEmpty(attrs, "df.universal_tag.pod_0", tags0[utag.Pod]) + putStrWithoutEmpty(attrs, "df.universal_tag.service_0", tags0[utag.Service]) + + putStrWithoutEmpty(attrs, "df.universal_tag.chost_0", tags0[utag.CHost]) + putStrWithoutEmpty(attrs, "df.universal_tag.router_0", tags0[utag.Router]) + putStrWithoutEmpty(attrs, "df.universal_tag.dhcpgw_0", tags0[utag.DhcpGW]) + putStrWithoutEmpty(attrs, "df.universal_tag.pod_service_0", tags0[utag.PodService]) + putStrWithoutEmpty(attrs, "df.universal_tag.redis_0", tags0[utag.Redis]) + putStrWithoutEmpty(attrs, "df.universal_tag.rds_0", tags0[utag.RDS]) + putStrWithoutEmpty(attrs, "df.universal_tag.lb_0", tags0[utag.LB]) + + putStrWithoutEmpty(attrs, "df.universal_tag.natgw_0", tags0[utag.NatGW]) + putStrWithoutEmpty(attrs, "df.universal_tag.auto_instance_type_0", tags0[utag.AutoInstanceType]) + putStrWithoutEmpty(attrs, "df.universal_tag.auto_instance_0", tags0[utag.AutoInstance]) + putStrWithoutEmpty(attrs, "df.universal_tag.auto_service_type_0", tags0[utag.AutoServiceType]) + putStrWithoutEmpty(attrs, "df.universal_tag.auto_service_0", tags0[utag.AutoService]) + + putStrWithoutEmpty(attrs, "df.universal_tag.region_1", tags1[utag.Region]) + putStrWithoutEmpty(attrs, "df.universal_tag.az_1", tags1[utag.AZ]) + putStrWithoutEmpty(attrs, "df.universal_tag.host_1", tags1[utag.Host]) + putStrWithoutEmpty(attrs, "df.universal_tag.vpc_1", tags1[utag.L3Epc]) + putStrWithoutEmpty(attrs, "df.universal_tag.subnet_1", tags1[utag.Subnet]) + putStrWithoutEmpty(attrs, "df.universal_tag.pod_cluster_1", tags1[utag.PodCluster]) + putStrWithoutEmpty(attrs, "df.universal_tag.pod_ns_1", tags1[utag.PodNS]) + putStrWithoutEmpty(attrs, "df.universal_tag.pod_node_1", tags1[utag.PodNode]) + putStrWithoutEmpty(attrs, "df.universal_tag.pod_group_1", tags1[utag.PodGroup]) + putStrWithoutEmpty(attrs, "df.universal_tag.pod_1", tags1[utag.Pod]) + putStrWithoutEmpty(attrs, "df.universal_tag.service_1", tags1[utag.Service]) + putStrWithoutEmpty(attrs, "df.universal_tag.chost_1", tags1[utag.CHost]) + putStrWithoutEmpty(attrs, "df.universal_tag.router_1", tags1[utag.Router]) + putStrWithoutEmpty(attrs, "df.universal_tag.dhcpgw_1", tags1[utag.DhcpGW]) + putStrWithoutEmpty(attrs, "df.universal_tag.pod_service_1", tags1[utag.PodService]) + putStrWithoutEmpty(attrs, "df.universal_tag.redis_1", tags1[utag.Redis]) + putStrWithoutEmpty(attrs, "df.universal_tag.rds_1", tags1[utag.RDS]) + putStrWithoutEmpty(attrs, "df.universal_tag.lb_1", tags1[utag.LB]) + putStrWithoutEmpty(attrs, "df.universal_tag.natgw_1", tags1[utag.NatGW]) + putStrWithoutEmpty(attrs, "df.universal_tag.auto_instance_type_1", tags1[utag.AutoInstanceType]) + putStrWithoutEmpty(attrs, "df.universal_tag.auto_instance_1", tags1[utag.AutoInstance]) + putStrWithoutEmpty(attrs, "df.universal_tag.auto_service_type_1", tags1[utag.AutoServiceType]) + putStrWithoutEmpty(attrs, "df.universal_tag.auto_service_1", tags1[utag.AutoService]) } } @@ -121,16 +121,17 @@ func putK8sLabels(attrs pcommon.Map, podID uint32, universalTagsManager *utag.Un } } -func L7FlowLogToExportResourceSpans(l7 *log_data.L7FlowLog, universalTagsManager *utag.UniversalTagsManager, dataTypeBits uint32, resSpan ptrace.ResourceSpans) { - tags0, tags1 := universalTagsManager.QueryUniversalTags(l7) - +func (l7 *L7FlowLog) EncodeToOtlp(utags *utag.UniversalTagsManager, dataTypeBits uint64) interface{} { + spanSlice := ptrace.NewResourceSpansSlice() + resSpan := spanSlice.AppendEmpty() + tags0, tags1 := l7.QueryUniversalTags(utags) resAttrs := resSpan.Resource().Attributes() putUniversalTags(resAttrs, tags0, tags1, dataTypeBits) if dataTypeBits&config.K8S_LABEL != 0 && l7.PodID0 != 0 { - putK8sLabels(resAttrs, l7.PodID0, universalTagsManager, "_0") + putK8sLabels(resAttrs, l7.PodID0, utags, "_0") } if dataTypeBits&config.K8S_LABEL != 0 && l7.PodID1 != 0 { - putK8sLabels(resAttrs, l7.PodID1, universalTagsManager, "_1") + putK8sLabels(resAttrs, l7.PodID1, utags, "_1") } span := resSpan.ScopeSpans().AppendEmpty().Spans().AppendEmpty() @@ -181,11 +182,11 @@ func L7FlowLogToExportResourceSpans(l7 *log_data.L7FlowLog, universalTagsManager if dataTypeBits&config.SERVICE_INFO != 0 { if isClientSide(l7.TapSide) { - putStrWithoutEmpty(resAttrs, "service.name", tags0.AutoService) - putStrWithoutEmpty(resAttrs, "service.instance.id", tags0.AutoInstance) + putStrWithoutEmpty(resAttrs, "service.name", tags0[utag.AutoService]) + putStrWithoutEmpty(resAttrs, "service.instance.id", tags0[utag.AutoInstance]) } else { - putStrWithoutEmpty(resAttrs, "service.name", tags1.AutoService) - putStrWithoutEmpty(resAttrs, "service.instance.id", tags1.AutoInstance) + putStrWithoutEmpty(resAttrs, "service.name", tags1[utag.AutoService]) + putStrWithoutEmpty(resAttrs, "service.instance.id", tags1[utag.AutoInstance]) } // if l7.AppService/l7.AppInstance is not empty, overwrite the value putStrWithoutEmpty(resAttrs, "service.name", l7.AppService) @@ -210,9 +211,10 @@ func L7FlowLogToExportResourceSpans(l7 *log_data.L7FlowLog, universalTagsManager putStrWithoutEmpty(resAttrs, "df.capture_info.nat_source", datatype.NATSource(l7.NatSource).String()) putStrWithoutEmpty(resAttrs, "df.capture_info.capture_nic", datatype.TapPort(l7.TapPort).String()) putStrWithoutEmpty(resAttrs, "df.capture_info.capture_nic_type", tapPortTypeToString(l7.TapPortType)) - putStrWithoutEmpty(resAttrs, "df.capture_info.capture_nic_name", tags0.TapPortName) + // todo suport TapPortName + // putStrWithoutEmpty(resAttrs, "df.capture_info.capture_nic_name", tags0.TapPortName) putStrWithoutEmpty(resAttrs, "df.capture_info.observation_point", tapSideToName(l7.TapSide)) - putStrWithoutEmpty(resAttrs, "df.capture_info.agent", tags0.Vtap) + putStrWithoutEmpty(resAttrs, "df.capture_info.agent", tags0[utag.Vtap]) } if dataTypeBits&config.NETWORK_LAYER != 0 { @@ -220,6 +222,7 @@ func L7FlowLogToExportResourceSpans(l7 *log_data.L7FlowLog, universalTagsManager resAttrs.PutBool("df.network.is_internet_0", l7.L3EpcID0 == datatype.EPC_FROM_INTERNET) resAttrs.PutBool("df.network.is_internet_1", l7.L3EpcID1 == datatype.EPC_FROM_INTERNET) if l7.IsIPv4 { + // resAttrs.PutStr("df.network.ip_0", utils.IpFromUint32(l7.IP40).String()) resAttrs.PutStr("df.network.ip_0", utils.IpFromUint32(l7.IP40).String()) resAttrs.PutStr("df.network.ip_1", utils.IpFromUint32(l7.IP41).String()) } else { @@ -293,6 +296,7 @@ func L7FlowLogToExportResourceSpans(l7 *log_data.L7FlowLog, universalTagsManager spanAttrs.PutDouble(l7.MetricsNames[i], l7.MetricsValues[i]) } } + return spanSlice } func getTraceID(traceID string, id uint64) pcommon.TraceID { @@ -333,11 +337,11 @@ func newSpanId() pcommon.SpanID { } // use server info (_1) to fill in 'host' information, use client info (_0) to fill in 'peer' information -func setServerSpanKindHostAndPeer(spanAttrs pcommon.Map, l7 *log_data.L7FlowLog, tags0, tags1 *utag.UniversalTags) { - if tags1.CHost != "" { - putStrWithoutEmpty(spanAttrs, "net.host.name", tags1.CHost) +func setServerSpanKindHostAndPeer(spanAttrs pcommon.Map, l7 *L7FlowLog, tags0, tags1 *utag.UniversalTags) { + if tags1[utag.CHost] != "" { + putStrWithoutEmpty(spanAttrs, "net.host.name", tags1[utag.CHost]) } else { - putStrWithoutEmpty(spanAttrs, "net.host.name", tags1.PodNode) + putStrWithoutEmpty(spanAttrs, "net.host.name", tags1[utag.PodNode]) } putIntWithoutZero(spanAttrs, "net.host.port", int64(l7.ServerPort)) if l7.IsIPv4 { @@ -350,10 +354,10 @@ func setServerSpanKindHostAndPeer(spanAttrs pcommon.Map, l7 *log_data.L7FlowLog, } } - if tags0.CHost != "" { - putStrWithoutEmpty(spanAttrs, "net.peer.name", tags0.CHost) + if tags0[utag.CHost] != "" { + putStrWithoutEmpty(spanAttrs, "net.peer.name", tags0[utag.CHost]) } else { - putStrWithoutEmpty(spanAttrs, "net.peer.name", tags0.PodNode) + putStrWithoutEmpty(spanAttrs, "net.peer.name", tags0[utag.PodNode]) } putIntWithoutZero(spanAttrs, "net.peer.port", int64(l7.ClientPort)) if l7.IsIPv4 { @@ -368,11 +372,11 @@ func setServerSpanKindHostAndPeer(spanAttrs pcommon.Map, l7 *log_data.L7FlowLog, } // use client info (_0) to fill in 'host' information, use server info (_1) to fill in 'peer' information -func setOtherSpanKindHostAndPeer(spanAttrs pcommon.Map, l7 *log_data.L7FlowLog, tags0, tags1 *utag.UniversalTags) { - if tags0.CHost != "" { - putStrWithoutEmpty(spanAttrs, "net.host.name", tags0.CHost) +func setOtherSpanKindHostAndPeer(spanAttrs pcommon.Map, l7 *L7FlowLog, tags0, tags1 *utag.UniversalTags) { + if tags0[utag.CHost] != "" { + putStrWithoutEmpty(spanAttrs, "net.host.name", tags0[utag.CHost]) } else { - putStrWithoutEmpty(spanAttrs, "net.host.name", tags0.PodNode) + putStrWithoutEmpty(spanAttrs, "net.host.name", tags0[utag.PodNode]) } putIntWithoutZero(spanAttrs, "net.host.port", int64(l7.ClientPort)) if l7.IsIPv4 { @@ -385,10 +389,10 @@ func setOtherSpanKindHostAndPeer(spanAttrs pcommon.Map, l7 *log_data.L7FlowLog, } } - if tags1.CHost != "" { - putStrWithoutEmpty(spanAttrs, "net.peer.name", tags1.CHost) + if tags1[utag.CHost] != "" { + putStrWithoutEmpty(spanAttrs, "net.peer.name", tags1[utag.CHost]) } else { - putStrWithoutEmpty(spanAttrs, "net.peer.name", tags1.PodNode) + putStrWithoutEmpty(spanAttrs, "net.peer.name", tags1[utag.PodNode]) } putIntWithoutZero(spanAttrs, "net.peer.port", int64(l7.ServerPort)) if l7.IsIPv4 { @@ -402,7 +406,7 @@ func setOtherSpanKindHostAndPeer(spanAttrs pcommon.Map, l7 *log_data.L7FlowLog, } } -func setDNS(span *ptrace.Span, spanAttrs pcommon.Map, l7 *log_data.L7FlowLog) { +func setDNS(span *ptrace.Span, spanAttrs pcommon.Map, l7 *L7FlowLog) { putStrWithoutEmpty(spanAttrs, "df.dns.request_type", l7.RequestType) putStrWithoutEmpty(spanAttrs, "df.dns.request_resource", l7.RequestResource) if l7.RequestId != nil { @@ -420,7 +424,7 @@ func setDNS(span *ptrace.Span, spanAttrs pcommon.Map, l7 *log_data.L7FlowLog) { } } -func setHTTP(span *ptrace.Span, spanAttrs pcommon.Map, l7 *log_data.L7FlowLog) { +func setHTTP(span *ptrace.Span, spanAttrs pcommon.Map, l7 *L7FlowLog) { putStrWithoutEmpty(spanAttrs, "http.flavor", l7.Version) putStrWithoutEmpty(spanAttrs, "http.method", l7.RequestType) putStrWithoutEmpty(spanAttrs, "net.peer.name", l7.RequestDomain) @@ -438,7 +442,7 @@ func setHTTP(span *ptrace.Span, spanAttrs pcommon.Map, l7 *log_data.L7FlowLog) { span.SetName(strings.Join([]string{l7.RequestType, l7.RequestResource}, " ")) } -func setDubbo(span *ptrace.Span, spanAttrs, resAttrs pcommon.Map, l7 *log_data.L7FlowLog) { +func setDubbo(span *ptrace.Span, spanAttrs, resAttrs pcommon.Map, l7 *L7FlowLog) { spanAttrs.PutStr("rpc.system", "apache_dubbo") putStrWithoutEmpty(spanAttrs, "rpc.service", l7.RequestResource) putStrWithoutEmpty(spanAttrs, "rpc.method", l7.RequestType) @@ -457,7 +461,7 @@ func setDubbo(span *ptrace.Span, spanAttrs, resAttrs pcommon.Map, l7 *log_data.L } } -func setGRPC(span *ptrace.Span, spanAttrs pcommon.Map, l7 *log_data.L7FlowLog) { +func setGRPC(span *ptrace.Span, spanAttrs pcommon.Map, l7 *L7FlowLog) { spanAttrs.PutStr("rpc.system", "grpc") putStrWithoutEmpty(spanAttrs, "rpc.service", l7.RequestResource) putStrWithoutEmpty(spanAttrs, "rpc.method", l7.RequestType) @@ -473,7 +477,7 @@ func setGRPC(span *ptrace.Span, spanAttrs pcommon.Map, l7 *log_data.L7FlowLog) { } } -func setKafka(span *ptrace.Span, spanAttrs pcommon.Map, l7 *log_data.L7FlowLog) { +func setKafka(span *ptrace.Span, spanAttrs pcommon.Map, l7 *L7FlowLog) { spanAttrs.PutStr("messaging.system", "kafka") span.SetName(l7.RequestResource) @@ -491,7 +495,7 @@ func setKafka(span *ptrace.Span, spanAttrs pcommon.Map, l7 *log_data.L7FlowLog) } } -func setMQTT(span *ptrace.Span, spanAttrs pcommon.Map, l7 *log_data.L7FlowLog) { +func setMQTT(span *ptrace.Span, spanAttrs pcommon.Map, l7 *L7FlowLog) { spanAttrs.PutStr("messaging.system", "mqtt") span.SetName(l7.RequestResource) @@ -506,7 +510,7 @@ func setMQTT(span *ptrace.Span, spanAttrs pcommon.Map, l7 *log_data.L7FlowLog) { } } -func setMySQL(span *ptrace.Span, spanAttrs pcommon.Map, l7 *log_data.L7FlowLog) { +func setMySQL(span *ptrace.Span, spanAttrs pcommon.Map, l7 *L7FlowLog) { spanName, operation := getSQLSpanNameAndOperation(l7.RequestResource) putStrWithoutEmpty(spanAttrs, "db.system", "mysql") putStrWithoutEmpty(spanAttrs, "db.operation", operation) @@ -519,7 +523,7 @@ func setMySQL(span *ptrace.Span, spanAttrs pcommon.Map, l7 *log_data.L7FlowLog) span.SetName(spanName) } -func setPostgreSQL(span *ptrace.Span, spanAttrs pcommon.Map, l7 *log_data.L7FlowLog) { +func setPostgreSQL(span *ptrace.Span, spanAttrs pcommon.Map, l7 *L7FlowLog) { spanName, operation := getSQLSpanNameAndOperation(l7.RequestResource) putStrWithoutEmpty(spanAttrs, "db.system", "postgresql") putStrWithoutEmpty(spanAttrs, "db.operation", operation) @@ -531,7 +535,7 @@ func setPostgreSQL(span *ptrace.Span, spanAttrs pcommon.Map, l7 *log_data.L7Flow span.SetName(spanName) } -func setRedis(span *ptrace.Span, spanAttrs pcommon.Map, l7 *log_data.L7FlowLog) { +func setRedis(span *ptrace.Span, spanAttrs pcommon.Map, l7 *L7FlowLog) { putStrWithoutEmpty(spanAttrs, "db.system", "redis") putStrWithoutEmpty(spanAttrs, "db.operation", l7.RequestType) putStrWithoutEmpty(spanAttrs, "db.statement", l7.RequestResource) diff --git a/server/ingester/flow_log/exporters/otlp_exporter/otlp_test.go b/server/ingester/flow_log/log_data/otlp_export_test.go similarity index 98% rename from server/ingester/flow_log/exporters/otlp_exporter/otlp_test.go rename to server/ingester/flow_log/log_data/otlp_export_test.go index b23489ee6ad4..1f84e2e5a496 100644 --- a/server/ingester/flow_log/exporters/otlp_exporter/otlp_test.go +++ b/server/ingester/flow_log/log_data/otlp_export_test.go @@ -14,7 +14,7 @@ * limitations under the License. */ -package otlp_exporter +package log_data import "testing" diff --git a/server/ingester/flow_log/exporters/otlp_exporter/util.go b/server/ingester/flow_log/log_data/otlp_utils.go similarity index 99% rename from server/ingester/flow_log/exporters/otlp_exporter/util.go rename to server/ingester/flow_log/log_data/otlp_utils.go index 8e184e570f29..3d9911acee9b 100644 --- a/server/ingester/flow_log/exporters/otlp_exporter/util.go +++ b/server/ingester/flow_log/log_data/otlp_utils.go @@ -14,7 +14,7 @@ * limitations under the License. */ -package otlp_exporter +package log_data import ( "bytes" diff --git a/server/ingester/flow_metrics/config/config.go b/server/ingester/flow_metrics/config/config.go index 853fc887a863..715c51bd3ffb 100644 --- a/server/ingester/flow_metrics/config/config.go +++ b/server/ingester/flow_metrics/config/config.go @@ -52,22 +52,10 @@ type FlowMetricsTTL struct { VtapApp1S int `yaml:"vtap-app-1s"` } -type PromWriterConfig struct { - Enabled bool `yaml:"enabled"` - Endpoint string `yaml:"endpoint"` - Headers map[string]string `yaml:"headers"` - BatchSize int `yaml:"batch-size"` - FlushTimeout int `yaml:"flush-timeout"` - QueueCount int `yaml:"queue-count"` - QueueSize int `yaml:"queue-size"` - MetricsFilter []string `yaml:"metrics-filter"` -} - type Config struct { Base *config.Config CKReadTimeout int `yaml:"ck-read-timeout"` CKWriterConfig config.CKWriterConfig `yaml:"metrics-ck-writer"` - PromWriterConfig PromWriterConfig `yaml:"metrics-prom-writer"` DisableSecondWrite bool `yaml:"disable-second-write"` UnmarshallQueueCount int `yaml:"unmarshall-queue-count"` UnmarshallQueueSize int `yaml:"unmarshall-queue-size"` @@ -100,21 +88,6 @@ func (c *Config) Validate() error { c.FlowMetricsTTL.VtapApp1S = DefaultFlowMetrics1STTL } - if c.PromWriterConfig.QueueCount <= 0 { - c.PromWriterConfig.QueueCount = DefaultPromWriterQueueCount - } - if c.PromWriterConfig.QueueSize <= 0 { - c.PromWriterConfig.QueueCount = DefaultPromWriterQueueSize - } - - if c.PromWriterConfig.BatchSize <= 0 { - c.PromWriterConfig.BatchSize = DefaultPromWriterBatchSize - } - - if c.PromWriterConfig.FlushTimeout <= 0 { - c.PromWriterConfig.FlushTimeout = DefaultPromWriterFlushTimeout - } - return nil } @@ -123,7 +96,6 @@ func Load(base *config.Config, path string) *Config { FlowMetrics: Config{ Base: base, CKWriterConfig: config.CKWriterConfig{QueueCount: 1, QueueSize: 1000000, BatchSize: 512000, FlushTimeout: 10}, - PromWriterConfig: PromWriterConfig{}, CKReadTimeout: DefaultCKReadTimeout, UnmarshallQueueCount: DefaultUnmarshallQueueCount, UnmarshallQueueSize: DefaultUnmarshallQueueSize, diff --git a/server/ingester/flow_metrics/dbwriter/dbwriter.go b/server/ingester/flow_metrics/dbwriter/dbwriter.go index ceaaebec2ae9..854277cfdddc 100644 --- a/server/ingester/flow_metrics/dbwriter/dbwriter.go +++ b/server/ingester/flow_metrics/dbwriter/dbwriter.go @@ -17,17 +17,6 @@ package dbwriter import ( - "bytes" - "context" - "fmt" - "io" - "net/http" - "strconv" - "sync/atomic" - "time" - - "github.com/gogo/protobuf/proto" - "github.com/golang/snappy" logging "github.com/op/go-logging" "github.com/deepflowio/deepflow/server/ingester/common" @@ -36,11 +25,7 @@ import ( "github.com/deepflowio/deepflow/server/ingester/pkg/ckwriter" "github.com/deepflowio/deepflow/server/libs/app" "github.com/deepflowio/deepflow/server/libs/ckdb" - "github.com/deepflowio/deepflow/server/libs/datatype/prompb" flow_metrics "github.com/deepflowio/deepflow/server/libs/flow-metrics" - "github.com/deepflowio/deepflow/server/libs/pool" - "github.com/deepflowio/deepflow/server/libs/queue" - "github.com/deepflowio/deepflow/server/libs/stats" ) var log = logging.MustGetLogger("flow_metrics.dbwriter") @@ -95,7 +80,7 @@ func (w *CkDbWriter) Put(items ...interface{}) error { caches[i] = make([]interface{}, 0, CACHE_SIZE) } for _, item := range items { - doc, ok := item.(*app.Document) + doc, ok := item.(app.Document) if !ok { log.Warningf("receive wrong type data %v", item) continue @@ -121,250 +106,3 @@ func (w *CkDbWriter) Close() { ckwriter.Close() } } - -type PromWriterCounter struct { - RecvMetricsCount int64 `statsd:"recv-metrics-count"` - RecvTimeSeriesCount int64 `statsd:"recv-timeseries-count"` - - SendFailedCount int64 `statsd:"send-failed-count"` - SendSucceedCount int64 `statsd:"send-succeed-count"` - SendTimeSeriesCount int64 `statsd:"send-timeseries-count"` -} - -// PromWriter 是 prom remotewrite 的 db.Writer 实现,负责将 metrics 数据推送给到服务端 -type PromWriter struct { - ctx context.Context - cancel context.CancelFunc - - conf flowmetricsconfig.PromWriterConfig - client *http.Client - queues queue.FixedMultiQueue - queueCount int - filter map[string]struct{} - seq int32 - closed bool - counter *PromWriterCounter -} - -func (pw *PromWriter) GetCounter() interface{} { - var counter *PromWriterCounter - counter, pw.counter = pw.counter, &PromWriterCounter{} - return counter -} - -func (pw *PromWriter) Closed() bool { - return pw.closed -} - -func NewPromWriter(conf flowmetricsconfig.PromWriterConfig) *PromWriter { - ctx, cancel := context.WithCancel(context.Background()) - filter := make(map[string]struct{}) - for _, m := range conf.MetricsFilter { - filter[m] = struct{}{} - } - queues := queue.NewOverwriteQueues( - "prometheus_remotewrite", uint8(conf.QueueCount), conf.QueueSize, - queue.OptionFlushIndicator(time.Duration(conf.FlushTimeout)*time.Second), - queue.OptionRelease(func(p interface{}) { ReleasePrompbTimeSeries(p.(*prompb.TimeSeries)) }), - common.QUEUE_STATS_MODULE_INGESTER) - pw := &PromWriter{ - ctx: ctx, - cancel: cancel, - conf: conf, - client: &http.Client{Timeout: time.Second * 10}, - queues: queues, - queueCount: conf.QueueCount, - filter: filter, - counter: &PromWriterCounter{}, - } - common.RegisterCountableForIngester("prom_writer", pw, stats.OptionStatTags{"queue_count": strconv.Itoa(int(conf.QueueCount))}) - - for i := 0; i < conf.QueueCount; i++ { - go pw.loopConsume(i) - } - return pw -} - -// multi thread will call Put -func (pw *PromWriter) Put(items ...interface{}) error { - atomic.AddInt32(&pw.seq, 1) - var timeSeries []interface{} - for _, item := range items { - doc, ok := item.(*app.Document) - if !ok { - log.Warningf("receive wrong type data %v", item) - continue - } - - id, err := doc.TableID() - if err != nil { - log.Warningf("doc table id not found, %v", err) - doc.Release() - continue - } - - // 只处理 APPLICATION_MAP_1S 这张表 - if id != uint8(flow_metrics.APPLICATION_MAP_1S) { - doc.Release() - continue - } - t := int64(doc.Timestamp) * 1000 // 转换为 ms - - var metrics map[string]float64 - // TODO: 其余 metrics 类型待实现 - if doc.Meter != nil { - switch meter := doc.Meter.(type) { - case *flow_metrics.AppMeter: - if _, ok := pw.filter[metricsFilterApp]; ok { - metrics = flow_metrics.EncodeAppMeterToMetrics(meter) - } - } - } - - // 无指标则不匹配 labels - if len(metrics) <= 0 { - doc.Release() - continue - } - - var labels []prompb.Label - if doc.Tagger != nil { - switch tag := doc.Tagger.(type) { - case *flow_metrics.MiniTag: - labels = flow_metrics.EncodeMiniTagToPromLabels(tag) - case *flow_metrics.CustomTag: - labels = flow_metrics.EncodeCustomTagToPromLabels(tag) - case *flow_metrics.Tag: - labels = flow_metrics.EncodeTagToPromLabels(tag) - } - } - - pw.counter.RecvMetricsCount++ - for metric, value := range metrics { - ts := AcquirePrompbTimeSeries() - ts.Labels = append(ts.Labels, labels...) - ts.Labels = append(ts.Labels, prompb.Label{ - Name: "__name__", - Value: metric, - }) - ts.Samples[0].Value = value - ts.Samples[0].Timestamp = t - timeSeries = append(timeSeries, ts) - } - doc.Release() - } - - if len(timeSeries) > 0 { - pw.counter.RecvTimeSeriesCount += int64(len(timeSeries)) - pw.queues.Put(queue.HashKey(int(pw.seq)%pw.queueCount), timeSeries...) - } - return nil -} - -func (pw *PromWriter) Close() { - pw.closed = true - pw.cancel() -} - -func (pw *PromWriter) loopConsume(queueId int) { - batch := make([]prompb.TimeSeries, 0, pw.conf.BatchSize) - releaseCache := make([]*prompb.TimeSeries, 0, pw.conf.BatchSize) - doReq := func() { - if len(batch) == 0 { - return - } - if err := pw.sendRequest(&prompb.WriteRequest{Timeseries: batch}); err != nil { - if pw.counter.SendFailedCount == 0 { - log.Warningf("failed to send promrw request, err: %v", err) - } - pw.counter.SendFailedCount++ - } else { - pw.counter.SendSucceedCount++ - pw.counter.SendTimeSeriesCount += int64(len(batch)) - } - batch = batch[:0] - for _, ts := range releaseCache { - ReleasePrompbTimeSeries(ts) - } - releaseCache = releaseCache[:0] - } - - queueTimeSeries := make([]interface{}, QUEUE_BATCH_SIZE) - for !pw.closed { - n := pw.queues.Gets(queue.HashKey(queueId), queueTimeSeries) - for _, value := range queueTimeSeries[:n] { - if value == nil { - doReq() - continue - } - if ts, ok := value.(*prompb.TimeSeries); ok { - batch = append(batch, *ts) - releaseCache = append(releaseCache, ts) - if len(batch) >= pw.conf.BatchSize { - doReq() - } - } else { - log.Warningf("get prom remote write queue data type wrong") - } - } - } -} - -func (pw *PromWriter) sendRequest(wr *prompb.WriteRequest) error { - data, err := proto.Marshal(wr) - if err != nil { - return err - } - buf := make([]byte, len(data), cap(data)) - compressedData := snappy.Encode(buf, data) - - req, err := http.NewRequestWithContext(pw.ctx, "POST", pw.conf.Endpoint, bytes.NewReader(compressedData)) - if err != nil { - return err - } - - // Add necessary headers specified by: - // https://cortexmetrics.io/docs/apis/#remote-api - req.Header.Add("Content-Encoding", "snappy") - req.Header.Set("Content-Type", "application/x-protobuf") - req.Header.Set("X-Prometheus-Remote-Write-Version", "0.1.0") - - // inject extra headers - for k, v := range pw.conf.Headers { - req.Header.Set(k, v) - } - - resp, err := http.DefaultClient.Do(req) - if err != nil { - return err - } - defer resp.Body.Close() - - // 5xx errors are recoverable and the writer should retry? - // Reference for different behavior according to status code: - // https://github.com/prometheus/prometheus/pull/2552/files#diff-ae8db9d16d8057358e49d694522e7186 - body, err := io.ReadAll(io.LimitReader(resp.Body, 256)) - if resp.StatusCode >= 500 && resp.StatusCode < 600 { - return fmt.Errorf("remote write returned HTTP status %v; err = %w: %s", resp.Status, err, body) - } - - return nil -} - -var prompbTimeSeriesPool = pool.NewLockFreePool(func() interface{} { - return &prompb.TimeSeries{ - Samples: make([]prompb.Sample, 1), - } -}) - -func AcquirePrompbTimeSeries() *prompb.TimeSeries { - return prompbTimeSeriesPool.Get().(*prompb.TimeSeries) -} - -func ReleasePrompbTimeSeries(t *prompb.TimeSeries) { - if t == nil { - return - } - t.Labels = t.Labels[:0] - prompbTimeSeriesPool.Put(t) -} diff --git a/server/ingester/flow_metrics/flow_metrics/flow_metrics.go b/server/ingester/flow_metrics/flow_metrics/flow_metrics.go index 5d6cfc40f23d..4937aaa94faa 100644 --- a/server/ingester/flow_metrics/flow_metrics/flow_metrics.go +++ b/server/ingester/flow_metrics/flow_metrics/flow_metrics.go @@ -24,6 +24,7 @@ import ( logging "github.com/op/go-logging" "github.com/deepflowio/deepflow/server/ingester/droplet/queue" + "github.com/deepflowio/deepflow/server/ingester/exporters" "github.com/deepflowio/deepflow/server/ingester/flow_metrics/config" "github.com/deepflowio/deepflow/server/ingester/flow_metrics/dbwriter" "github.com/deepflowio/deepflow/server/ingester/flow_metrics/unmarshaller" @@ -40,10 +41,11 @@ var log = logging.MustGetLogger("flow_metrics") type FlowMetrics struct { unmarshallers []*unmarshaller.Unmarshaller platformDatas []*grpc.PlatformInfoTable - dbwriters []dbwriter.DbWriter + dbwriter dbwriter.DbWriter + exporters *exporters.Exporters } -func NewFlowMetrics(cfg *config.Config, recv *receiver.Receiver, platformDataManager *grpc.PlatformDataManager) (*FlowMetrics, error) { +func NewFlowMetrics(cfg *config.Config, recv *receiver.Receiver, platformDataManager *grpc.PlatformDataManager, exporters *exporters.Exporters) (*FlowMetrics, error) { flowMetrics := FlowMetrics{} manager := queue.NewManager(ingesterctl.INGESTERCTL_FLOW_METRICS_QUEUE) @@ -57,21 +59,15 @@ func NewFlowMetrics(cfg *config.Config, recv *receiver.Receiver, platformDataMan recv.RegistHandler(datatype.MESSAGE_TYPE_METRICS, unmarshallQueues, unmarshallQueueCount) var err error - var writers []dbwriter.DbWriter ckWriter, err := dbwriter.NewCkDbWriter(cfg.Base.CKDB.ActualAddrs, cfg.Base.CKDBAuth.Username, cfg.Base.CKDBAuth.Password, cfg.Base.CKDB.ClusterName, cfg.Base.CKDB.StoragePolicy, cfg.Base.CKDB.TimeZone, cfg.CKWriterConfig, cfg.FlowMetricsTTL, cfg.Base.GetCKDBColdStorages(), cfg.Base.CKDB.Watcher) if err != nil { log.Error(err) return nil, err } - writers = append(writers, ckWriter) - if cfg.PromWriterConfig.Enabled { - writer := dbwriter.NewPromWriter(cfg.PromWriterConfig) - writers = append(writers, writer) - } - - flowMetrics.dbwriters = writers + flowMetrics.dbwriter = ckWriter + flowMetrics.exporters = exporters flowMetrics.unmarshallers = make([]*unmarshaller.Unmarshaller, unmarshallQueueCount) flowMetrics.platformDatas = make([]*grpc.PlatformInfoTable, unmarshallQueueCount) for i := 0; i < unmarshallQueueCount; i++ { @@ -85,7 +81,7 @@ func NewFlowMetrics(cfg *config.Config, recv *receiver.Receiver, platformDataMan if err != nil { return nil, err } - flowMetrics.unmarshallers[i] = unmarshaller.NewUnmarshaller(i, flowMetrics.platformDatas[i], cfg.DisableSecondWrite, libqueue.QueueReader(unmarshallQueues.FixedMultiQueue[i]), flowMetrics.dbwriters) + flowMetrics.unmarshallers[i] = unmarshaller.NewUnmarshaller(i, flowMetrics.platformDatas[i], cfg.DisableSecondWrite, libqueue.QueueReader(unmarshallQueues.FixedMultiQueue[i]), flowMetrics.dbwriter, exporters) } return &flowMetrics, nil @@ -102,8 +98,6 @@ func (r *FlowMetrics) Close() error { for i := 0; i < len(r.unmarshallers); i++ { r.platformDatas[i].ClosePlatformInfoTable() } - for i := 0; i < len(r.dbwriters); i++ { - r.dbwriters[i].Close() - } + r.dbwriter.Close() return nil } diff --git a/server/ingester/flow_metrics/unmarshaller/exporter.go b/server/ingester/flow_metrics/unmarshaller/exporter.go new file mode 100644 index 000000000000..2b153e14d6cc --- /dev/null +++ b/server/ingester/flow_metrics/unmarshaller/exporter.go @@ -0,0 +1,208 @@ +/* + * Copyright (c) 2024 Yunshan Networks + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package unmarshaller + +import ( + "fmt" + "reflect" + "strings" + "time" + "unsafe" + + exportercommon "github.com/deepflowio/deepflow/server/ingester/exporters/common" + "github.com/deepflowio/deepflow/server/ingester/exporters/config" + utag "github.com/deepflowio/deepflow/server/ingester/exporters/universal_tag" + "github.com/deepflowio/deepflow/server/libs/app" + flow_metrics "github.com/deepflowio/deepflow/server/libs/flow-metrics" + "github.com/deepflowio/deepflow/server/libs/utils" + "github.com/prometheus/prometheus/prompb" +) + +type ExportDocumentFlow app.DocumentFlow +type ExportDocumentApp app.DocumentApp +type ExportDocumentUsage app.DocumentUsage + +func (e *ExportDocumentFlow) TimestampUs() int64 { + return int64(time.Duration(e.Timestamp) * time.Second / time.Microsecond) +} + +func (e *ExportDocumentFlow) GetFieldValueByOffsetAndKind(offset uintptr, kind reflect.Kind, fieldName string) interface{} { + return utils.GetValueByOffsetAndKind(uintptr(unsafe.Pointer(e)), offset, kind, fieldName) +} + +func (e *ExportDocumentFlow) Meter() flow_metrics.Meter { + return e.Meter() +} + +func (e *ExportDocumentFlow) EncodeTo(protocol config.ExportProtocol, utags *utag.UniversalTagsManager, cfg *config.ExporterCfg) (interface{}, error) { + return EncodeTo(e, protocol, utags, cfg) +} + +func (e *ExportDocumentApp) GetFieldValueByOffsetAndKind(offset uintptr, kind reflect.Kind, fieldName string) interface{} { + return utils.GetValueByOffsetAndKind(uintptr(unsafe.Pointer(e)), offset, kind, fieldName) +} + +func (e *ExportDocumentApp) Meter() flow_metrics.Meter { + return e.Meter() +} + +func (e *ExportDocumentApp) EncodeTo(protocol config.ExportProtocol, utags *utag.UniversalTagsManager, cfg *config.ExporterCfg) (interface{}, error) { + return EncodeTo(e, protocol, utags, cfg) +} + +func (e *ExportDocumentUsage) GetFieldValueByOffsetAndKind(offset uintptr, kind reflect.Kind, fieldName string) interface{} { + return utils.GetValueByOffsetAndKind(uintptr(unsafe.Pointer(e)), offset, kind, fieldName) +} + +func (e *ExportDocumentUsage) Meter() flow_metrics.Meter { + return e.Meter() +} + +func (e *ExportDocumentUsage) EncodeTo(protocol config.ExportProtocol, utags *utag.UniversalTagsManager, cfg *config.ExporterCfg) (interface{}, error) { + return EncodeTo(e, protocol, utags, cfg) +} + +func EncodeTo(e app.Document, protocol config.ExportProtocol, utags *utag.UniversalTagsManager, cfg *config.ExporterCfg) (interface{}, error) { + switch protocol { + case config.PROTOCOL_KAFKA: + tags0, tags1 := QueryUniversalTags0(e, utags), QueryUniversalTags1(e, utags) + k8sLabels0, k8sLabels1 := utags.QueryCustomK8sLabels(e.Tags().PodID), utags.QueryCustomK8sLabels(e.Tags().PodID1) + return exportercommon.EncodeToJson(e, int(e.DataSource()), cfg, tags0, tags1, k8sLabels0, k8sLabels1), nil + case config.PROTOCOL_PROMETHEUS: + return EncodeToPrometheus(e, utags, cfg) + default: + return nil, fmt.Errorf("doc unsupport export to %s", protocol) + } +} + +func QueryUniversalTags0(e app.Document, utags *utag.UniversalTagsManager) *utag.UniversalTags { + t := e.Tags() + return utags.QueryUniversalTags( + t.RegionID, t.AZID, t.HostID, t.PodNSID, t.PodClusterID, t.SubnetID, t.VTAPID, + uint8(t.L3DeviceType), t.AutoServiceType, t.AutoInstanceType, + t.L3DeviceID, t.AutoServiceID, t.AutoInstanceID, t.PodNodeID, t.PodGroupID, t.PodID, uint32(t.L3EpcID), t.GPID, t.ServiceID, + t.IsIPv4 == 1, t.IP, t.IP6, + ) +} + +func QueryUniversalTags1(e app.Document, utags *utag.UniversalTagsManager) *utag.UniversalTags { + t := e.Tags() + return utags.QueryUniversalTags( + t.RegionID1, t.AZID1, t.HostID1, t.PodNSID1, t.PodClusterID1, t.SubnetID1, t.VTAPID, + uint8(t.L3DeviceType1), t.AutoServiceType1, t.AutoInstanceType1, + t.L3DeviceID1, t.AutoServiceID1, t.AutoInstanceID1, t.PodNodeID1, t.PodGroupID1, t.PodID1, uint32(t.L3EpcID1), t.GPID1, t.ServiceID1, + t.IsIPv4 == 1, t.IP1, t.IP61, + ) +} + +func getPrometheusLabels(e app.Document, uTags0, uTags1 *utag.UniversalTags, cfg *config.ExporterCfg) []prompb.Label { + dataSourceId := config.DataSourceID(e.DataSource()) + labels := make([]prompb.Label, 0, 16) + labels = append(labels, prompb.Label{ + Name: "datasource", + Value: dataSourceId.String(), + }) + var name, valueStr string + for _, structTags := range cfg.ExportFieldStructTags[dataSourceId] { + if structTags.CategoryBit&config.TAG == 0 { + continue + } + + value := e.GetFieldValueByOffsetAndKind(structTags.Offset, structTags.DataType, structTags.FieldName) + if utils.IsNil(value) { + log.Debug("is nil ", structTags.FieldName) + continue + } + + if v, ok := value.(string); ok { + valueStr = v + } else { + valueStr = fmt.Sprintf("%v", value) + } + + if structTags.ToStringFuncName != "" { + ret := structTags.ToStringFunc.Call([]reflect.Value{reflect.ValueOf(value)}) + valueStr = ret[0].String() + } else if structTags.UniversalTagMapID > 0 && !cfg.UniversalTagNotConvertToString { + if strings.HasSuffix(structTags.Name, "_1") { + valueStr = uTags1.GetTagValue(structTags.UniversalTagMapID) + } else { + valueStr = uTags0.GetTagValue(structTags.UniversalTagMapID) + } + } else if structTags.EnumFile != "" && !cfg.EnumNotConvertToString { + valueStr = structTags.EnumStringMap[valueStr] + } + + if cfg.TagOmitempty && valueStr == "" { + continue + } + + if dataSourceId.IsMap() && structTags.MapName != "" { + name = structTags.MapName + } else { + name = structTags.Name + } + + labels = append(labels, prompb.Label{ + Name: name, + Value: valueStr, + }) + } + return labels +} + +func EncodeToPrometheus(e app.Document, utags *utag.UniversalTagsManager, cfg *config.ExporterCfg) (interface{}, error) { + dataSourceId := e.DataSource() + uTags0, uTags1 := QueryUniversalTags0(e, utags), QueryUniversalTags1(e, utags) + timeSeries := []prompb.TimeSeries{} + + labels := getPrometheusLabels(e, uTags0, uTags1, cfg) + for _, structTags := range cfg.ExportFieldStructTags[dataSourceId] { + if structTags.CategoryBit&config.METRICS == 0 { + continue + } + isFloat64 := false + value := e.GetFieldValueByOffsetAndKind(structTags.Offset, structTags.DataType, structTags.FieldName) + if utils.IsNil(value) { + log.Debugf("is nil ", structTags.FieldName) + continue + } + + valueFloat64, isFloat64 := utils.ConvertToFloat64(value) + if !isFloat64 { + continue + } + + if cfg.MetricsOmitempty && valueFloat64 == 0 { + continue + } + + ts := prompb.TimeSeries{} + ts.Labels = make([]prompb.Label, 0, len(labels)+1) + ts.Labels = append(ts.Labels, labels...) + ts.Labels = append(ts.Labels, prompb.Label{ + Name: "__name__", + Value: structTags.Name, + }) + ts.Samples = make([]prompb.Sample, 1) + ts.Samples[0].Value = valueFloat64 + ts.Samples[0].Timestamp = int64(e.Time()) * 1000 // convert to ms + timeSeries = append(timeSeries, ts) + } + + return timeSeries, nil +} diff --git a/server/ingester/flow_metrics/unmarshaller/handle_document.go b/server/ingester/flow_metrics/unmarshaller/handle_document.go index 55fcee1d0e37..f9f8fd01799e 100644 --- a/server/ingester/flow_metrics/unmarshaller/handle_document.go +++ b/server/ingester/flow_metrics/unmarshaller/handle_document.go @@ -62,9 +62,9 @@ func getPlatformInfos(t *flow_metrics.Tag, platformData *grpc.PlatformInfoTable) info = platformData.QueryMacInfo(t.MAC | uint64(t.L3EpcID)<<48) if info == nil { t.TagSource |= uint8(flow_metrics.EpcIP) - info = common.RegetInfoFromIP(t.IsIPv6 == 1, t.IP6, t.IP, t.L3EpcID, platformData) + info = common.RegetInfoFromIP(t.IsIPv4 == 0, t.IP6, t.IP, t.L3EpcID, platformData) } - } else if t.IsIPv6 != 0 { + } else if t.IsIPv4 == 0 { t.TagSource |= uint8(flow_metrics.EpcIP) info = platformData.QueryIPV6Infos(t.L3EpcID, t.IP6) } else { @@ -95,9 +95,9 @@ func getPlatformInfos(t *flow_metrics.Tag, platformData *grpc.PlatformInfoTable) info1 = platformData.QueryMacInfo(t.MAC1 | uint64(t.L3EpcID1)<<48) if info1 == nil { t.TagSource1 |= uint8(flow_metrics.EpcIP) - info1 = common.RegetInfoFromIP(t.IsIPv6 == 1, t.IP61, t.IP1, t.L3EpcID1, platformData) + info1 = common.RegetInfoFromIP(t.IsIPv4 == 0, t.IP61, t.IP1, t.L3EpcID1, platformData) } - } else if t.IsIPv6 != 0 { + } else if t.IsIPv4 == 0 { t.TagSource1 |= uint8(flow_metrics.EpcIP) info1 = platformData.QueryIPV6Infos(t.L3EpcID1, t.IP61) } else { @@ -110,12 +110,12 @@ func getPlatformInfos(t *flow_metrics.Tag, platformData *grpc.PlatformInfoTable) return info, info1 } -func DocumentExpand(doc *app.Document, platformData *grpc.PlatformInfoTable) error { - t := doc.Tagger.(*flow_metrics.Tag) +func DocumentExpand(doc app.Document, platformData *grpc.PlatformInfoTable) error { + t := doc.Tags() t.SetID("") // 由于需要修改Tag增删Field,清空ID避免字段脏 // vtap_acl 分钟级数据不用填充 - if doc.Meter.ID() == flow_metrics.ACL_ID && + if doc.Meter().ID() == flow_metrics.ACL_ID && t.DatabaseSuffixID() == 1 { // 只有acl后缀 return nil } @@ -149,11 +149,11 @@ func DocumentExpand(doc *app.Document, platformData *grpc.PlatformInfoTable) err t.PodID1 = info1.PodID t.PodClusterID1 = uint16(info1.PodClusterID) if common.IsPodServiceIP(t.L3DeviceType1, t.PodID1, t.PodNodeID1) { - t.ServiceID1 = platformData.QueryService(t.PodID1, t.PodNodeID1, uint32(t.PodClusterID1), t.PodGroupID1, t.L3EpcID1, t.IsIPv6 == 1, t.IP1, t.IP61, t.Protocol, t.ServerPort) + t.ServiceID1 = platformData.QueryService(t.PodID1, t.PodNodeID1, uint32(t.PodClusterID1), t.PodGroupID1, t.L3EpcID1, t.IsIPv4 == 0, t.IP1, t.IP61, t.Protocol, t.ServerPort) } if info == nil { var ip0 net.IP - if t.IsIPv6 != 0 { + if t.IsIPv4 == 0 { ip0 = t.IP6 } else { ip0 = utils.IpFromUint32(t.IP) @@ -192,15 +192,15 @@ func DocumentExpand(doc *app.Document, platformData *grpc.PlatformInfoTable) err if common.IsPodServiceIP(t.L3DeviceType, t.PodID, t.PodNodeID) { //for a single-side table (vtap_xxx_port), if ServerPort is valid, it needs to match the serviceID if t.ServerPort > 0 && t.Code&EdgeCode == 0 { - t.ServiceID = platformData.QueryService(t.PodID, t.PodNodeID, uint32(t.PodClusterID), t.PodGroupID, t.L3EpcID, t.IsIPv6 == 1, t.IP, t.IP6, t.Protocol, t.ServerPort) + t.ServiceID = platformData.QueryService(t.PodID, t.PodNodeID, uint32(t.PodClusterID), t.PodGroupID, t.L3EpcID, t.IsIPv4 == 0, t.IP, t.IP6, t.Protocol, t.ServerPort) // for the 0-side of the double-side table (vtap_xxx_edge_port) or serverPort is invalid, if it is PodServiceIP, then need to match the serviceID } else if common.IsPodServiceIP(t.L3DeviceType, t.PodID, 0) { //On the 0 side, if it is just Pod Node, there is no need to match the service - t.ServiceID = platformData.QueryService(t.PodID, t.PodNodeID, uint32(t.PodClusterID), t.PodGroupID, t.L3EpcID, t.IsIPv6 == 1, t.IP, t.IP6, t.Protocol, 0) + t.ServiceID = platformData.QueryService(t.PodID, t.PodNodeID, uint32(t.PodClusterID), t.PodGroupID, t.L3EpcID, t.IsIPv4 == 0, t.IP, t.IP6, t.Protocol, 0) } } if info1 == nil && (t.Code&EdgeCode == EdgeCode) { var ip1 net.IP - if t.IsIPv6 != 0 { + if t.IsIPv4 == 0 { ip1 = t.IP61 } else { ip1 = utils.IpFromUint32(t.IP1) diff --git a/server/ingester/flow_metrics/unmarshaller/unmarshaller.go b/server/ingester/flow_metrics/unmarshaller/unmarshaller.go index aff971eb397a..04da3d1c0a40 100644 --- a/server/ingester/flow_metrics/unmarshaller/unmarshaller.go +++ b/server/ingester/flow_metrics/unmarshaller/unmarshaller.go @@ -25,10 +25,12 @@ import ( logging "github.com/op/go-logging" "github.com/deepflowio/deepflow/server/ingester/common" + "github.com/deepflowio/deepflow/server/ingester/exporters" + "github.com/deepflowio/deepflow/server/ingester/exporters/config" "github.com/deepflowio/deepflow/server/ingester/flow_metrics/dbwriter" "github.com/deepflowio/deepflow/server/libs/app" "github.com/deepflowio/deepflow/server/libs/codec" - "github.com/deepflowio/deepflow/server/libs/flow-metrics" + flow_metrics "github.com/deepflowio/deepflow/server/libs/flow-metrics" "github.com/deepflowio/deepflow/server/libs/flow-metrics/pb" "github.com/deepflowio/deepflow/server/libs/grpc" "github.com/deepflowio/deepflow/server/libs/queue" @@ -47,6 +49,9 @@ const ( HASH_SEED = 17 ) +var exportDataSources = []config.DataSourceID{config.NETWORK_1M, config.NETWORK_MAP_1M, config.NETWORK_1S, config.NETWORK_MAP_1S, + config.APPLICATION_1M, config.APPLICATION_MAP_1M, config.APPLICATION_1S, config.APPLICATION_MAP_1S} + type QueueCache struct { values []interface{} } @@ -76,21 +81,23 @@ type Unmarshaller struct { platformData *grpc.PlatformInfoTable disableSecondWrite bool unmarshallQueue queue.QueueReader - dbwriters []dbwriter.DbWriter + dbwriter dbwriter.DbWriter queueBatchCache QueueCache counter *Counter tableCounter [flow_metrics.METRICS_TABLE_ID_MAX + 1]int64 + exporters *exporters.Exporters utils.Closable } -func NewUnmarshaller(index int, platformData *grpc.PlatformInfoTable, disableSecondWrite bool, unmarshallQueue queue.QueueReader, dbwriters []dbwriter.DbWriter) *Unmarshaller { +func NewUnmarshaller(index int, platformData *grpc.PlatformInfoTable, disableSecondWrite bool, unmarshallQueue queue.QueueReader, dbwriter dbwriter.DbWriter, exporters *exporters.Exporters) *Unmarshaller { return &Unmarshaller{ index: index, platformData: platformData, disableSecondWrite: disableSecondWrite, unmarshallQueue: unmarshallQueue, counter: &Counter{MaxDelay: -3600, MinDelay: 3600}, - dbwriters: dbwriters, + dbwriter: dbwriter, + exporters: exporters, } } @@ -147,18 +154,12 @@ func (u *Unmarshaller) GetCounter() interface{} { return counter } -func (u *Unmarshaller) putStoreQueue(doc *app.Document) { +func (u *Unmarshaller) putStoreQueue(doc app.Document) { queueCache := &u.queueBatchCache - writersCount := len(u.dbwriters) - if writersCount-1 > 0 { - doc.AddReferenceCountN(int32(writersCount) - 1) - } queueCache.values = append(queueCache.values, doc) if len(queueCache.values) >= QUEUE_BATCH_SIZE { - for i := 0; i < writersCount; i++ { - u.dbwriters[i].Put(queueCache.values...) - } + u.dbwriter.Put(queueCache.values...) queueCache.values = queueCache.values[:0] } } @@ -166,9 +167,7 @@ func (u *Unmarshaller) putStoreQueue(doc *app.Document) { func (u *Unmarshaller) flushStoreQueue() { queueCache := &u.queueBatchCache if len(queueCache.values) > 0 { - for i := 0; i < len(u.dbwriters); i++ { - u.dbwriters[i].Put(queueCache.values...) - } + u.dbwriter.Put(queueCache.values...) queueCache.values = queueCache.values[:0] } } @@ -184,10 +183,10 @@ func DecodeForQueueMonitor(item interface{}) (interface{}, error) { return ret, err } -type BatchDocument []*app.Document +type BatchDocument []app.Document func (bd BatchDocument) String() string { - docs := []*app.Document(bd) + docs := []app.Document(bd) str := fmt.Sprintf("batch msg num=%d\n", len(docs)) for i, doc := range docs { str += fmt.Sprintf("%d%s", i, doc.String()) @@ -202,7 +201,7 @@ func decodeForDebug(b []byte) (BatchDocument, error) { decoder := &codec.SimpleDecoder{} decoder.Init(b) - docs := make([]*app.Document, 0) + docs := make([]app.Document, 0) for !decoder.IsEnd() { doc, err := app.DecodeForQueueMonitor(decoder) @@ -219,7 +218,7 @@ func (u *Unmarshaller) QueueProcess() { rawDocs := make([]interface{}, GET_MAX_SIZE) decoder := &codec.SimpleDecoder{} pbDoc := pb.NewDocument() - for { + for !u.Closed() { n := u.unmarshallQueue.Gets(rawDocs) start := time.Now() for i := 0; i < n; i++ { @@ -235,19 +234,19 @@ func (u *Unmarshaller) QueueProcess() { log.Warningf("Decode failed, bytes len=%d err=%s", len([]byte(bytes)), err) break } - u.isGoodDocument(int64(doc.Timestamp)) + u.isGoodDocument(int64(doc.Time())) // 秒级数据是否写入 if u.disableSecondWrite && - doc.Flags&app.FLAG_PER_SECOND_METRICS != 0 { - app.ReleaseDocument(doc) + doc.Flag()&app.FLAG_PER_SECOND_METRICS != 0 { + doc.Release() continue } if err := DocumentExpand(doc, u.platformData); err != nil { log.Debug(err) u.counter.DropDocCount++ - app.ReleaseDocument(doc) + doc.Release() continue } @@ -255,17 +254,18 @@ func (u *Unmarshaller) QueueProcess() { if err != nil { log.Debug(err) u.counter.DropDocCount++ - app.ReleaseDocument(doc) + doc.Release() continue } u.tableCounter[tableID]++ + u.export(doc) u.putStoreQueue(doc) } receiver.ReleaseRecvBuffer(recvBytes) - } else if value == nil { // flush ticker u.flushStoreQueue() + u.export(nil) } else { log.Warning("get unmarshall queue data type wrong") } @@ -273,3 +273,24 @@ func (u *Unmarshaller) QueueProcess() { u.counter.TotalTime += int64(time.Since(start)) } } + +func (u *Unmarshaller) export(doc app.Document) { + if u.exporters == nil { + return + } + if doc == nil { + // flush data + for _, v := range exportDataSources { + u.exporters.Put(uint32(v), u.index, nil) + } + } + + switch v := doc.(type) { + case *app.DocumentFlow: + u.exporters.Put(v.DataSource(), u.index, (*ExportDocumentFlow)(v)) + case *app.DocumentApp: + u.exporters.Put(v.DataSource(), u.index, (*ExportDocumentApp)(v)) + case *app.DocumentUsage: + u.exporters.Put(v.DataSource(), u.index, (*ExportDocumentUsage)(v)) + } +} diff --git a/server/ingester/ingester/ingester.go b/server/ingester/ingester/ingester.go index 31fdcd4b53ff..a808b815ecaf 100644 --- a/server/ingester/ingester/ingester.go +++ b/server/ingester/ingester/ingester.go @@ -26,6 +26,7 @@ import ( "github.com/deepflowio/deepflow/server/ingester/ckmonitor" "github.com/deepflowio/deepflow/server/ingester/datasource" + "github.com/deepflowio/deepflow/server/ingester/exporters" "github.com/deepflowio/deepflow/server/libs/grpc" "github.com/deepflowio/deepflow/server/libs/logger" "github.com/deepflowio/deepflow/server/libs/pool" @@ -43,6 +44,7 @@ import ( "github.com/deepflowio/deepflow/server/ingester/droplet/droplet" eventcfg "github.com/deepflowio/deepflow/server/ingester/event/config" "github.com/deepflowio/deepflow/server/ingester/event/event" + exporterscfg "github.com/deepflowio/deepflow/server/ingester/exporters/config" extmetricscfg "github.com/deepflowio/deepflow/server/ingester/ext_metrics/config" "github.com/deepflowio/deepflow/server/ingester/ext_metrics/ext_metrics" flowlogcfg "github.com/deepflowio/deepflow/server/ingester/flow_log/config" @@ -128,6 +130,10 @@ func Start(configPath string, shared *servercommon.ControllerIngesterShared) []i bytes, _ = yaml.Marshal(prometheusConfig) log.Infof("prometheus config:\n%s", string(bytes)) + exportersConfig := exporterscfg.Load(cfg, configPath) + bytes, _ = yaml.Marshal(exportersConfig) + log.Infof("exporters config:\n%s", string(bytes)) + var issu *ckissu.Issu if !cfg.StorageDisabled { var err error @@ -160,8 +166,12 @@ func Start(configPath string, shared *servercommon.ControllerIngesterShared) []i cfg.NodeIP, receiver) + exporters := exporters.NewExporters(exportersConfig) + exporters.Start() + closers = append(closers, exporters) + // 写流日志数据 - flowLog, err := flowlog.NewFlowLog(flowLogConfig, receiver, platformDataManager) + flowLog, err := flowlog.NewFlowLog(flowLogConfig, receiver, platformDataManager, exporters) checkError(err) flowLog.Start() closers = append(closers, flowLog) @@ -174,13 +184,13 @@ func Start(configPath string, shared *servercommon.ControllerIngesterShared) []i closers = append(closers, extMetrics) // 写遥测数据 - flowMetrics, err := flowmetrics.NewFlowMetrics(flowMetricsConfig, receiver, platformDataManager) + flowMetrics, err := flowmetrics.NewFlowMetrics(flowMetricsConfig, receiver, platformDataManager, exporters) checkError(err) flowMetrics.Start() closers = append(closers, flowMetrics) // write event data - event, err := event.NewEvent(eventConfig, shared.ResourceEventQueue, receiver, platformDataManager) + event, err := event.NewEvent(eventConfig, shared.ResourceEventQueue, receiver, platformDataManager, exporters) checkError(err) event.Start() closers = append(closers, event) diff --git a/server/ingester/ingesterctl/cmd/cmd.go b/server/ingester/ingesterctl/cmd/cmd.go index 7b234b33ce98..7f6ffc780f9b 100644 --- a/server/ingester/ingesterctl/cmd/cmd.go +++ b/server/ingester/ingesterctl/cmd/cmd.go @@ -59,9 +59,9 @@ func RegisterIngesterCommand(root *cobra.Command) { Use: "prometheus", Short: "Prometheus label debug commands", } - otlpCmd := &cobra.Command{ - Use: "otlp", - Short: "otlp exporter debug commands", + exportersCmd := &cobra.Command{ + Use: "exporters", + Short: "exporters debug commands", } profileCmd := &cobra.Command{ Use: "profile", @@ -69,7 +69,7 @@ func RegisterIngesterCommand(root *cobra.Command) { } root.AddCommand(ingesterCmd) - ingesterCmd.AddCommand(dropletCmd, flowMetricsCmd, flowLogCmd, prometheusCmd, otlpCmd, profileCmd) + ingesterCmd.AddCommand(dropletCmd, flowMetricsCmd, flowLogCmd, prometheusCmd, exportersCmd, profileCmd) ingesterCmd.AddCommand(profiler.RegisterProfilerCommand()) ingesterCmd.AddCommand(debug.RegisterLogLevelCommand()) ingesterCmd.AddCommand(RegisterTimeConvertCommand()) @@ -103,8 +103,10 @@ func RegisterIngesterCommand(root *cobra.Command) { "2-decode-to-slow-decode-prometheus", })) - otlpCmd.AddCommand(debug.ClientRegisterSimple(ingesterctl.CMD_OTLP_EXPORTER, debug.CmdHelper{"stats", "show otlp exporter stats"}, nil)) - otlpCmd.AddCommand(debug.ClientRegisterSimple(ingesterctl.CMD_EXPORTER_PLATFORMDATA, debug.CmdHelper{"platformData", "show otlp platformData"}, nil)) + exportersCmd.AddCommand(debug.ClientRegisterSimple(ingesterctl.CMD_OTLP_EXPORTER, debug.CmdHelper{"otlp", "show otlp exporter stats"}, nil)) + exportersCmd.AddCommand(debug.ClientRegisterSimple(ingesterctl.CMD_EXPORTER_PLATFORMDATA, debug.CmdHelper{"platformData", "show otlp platformData"}, nil)) + exportersCmd.AddCommand(debug.ClientRegisterSimple(ingesterctl.CMD_KAFKA_EXPORTER, debug.CmdHelper{Cmd: "kafka", Helper: "show kafka exporter stats"}, nil)) + exportersCmd.AddCommand(debug.ClientRegisterSimple(ingesterctl.CMD_PROMETHEUS_EXPORTER, debug.CmdHelper{Cmd: "prometheus", Helper: "show prometheus exporter stats"}, nil)) profileCmd.AddCommand(debug.ClientRegisterSimple(ingesterctl.CMD_PLATFORMDATA_PROFILE, debug.CmdHelper{"platformData [filter]", "show profile platform data statistics"}, nil)) diff --git a/server/ingester/ingesterctl/const.go b/server/ingester/ingesterctl/const.go index 86d8ca102676..fd90552f7ac1 100644 --- a/server/ingester/ingesterctl/const.go +++ b/server/ingester/ingesterctl/const.go @@ -52,6 +52,8 @@ const ( CMD_OTLP_EXPORTER CMD_EXPORTER_PLATFORMDATA CMD_PLATFORMDATA_PROFILE + CMD_KAFKA_EXPORTER + CMD_PROMETHEUS_EXPORTER ) const ( diff --git a/server/libs/app/codec.go b/server/libs/app/codec.go index 1d852449a8d5..64e0e2bc0777 100644 --- a/server/libs/app/codec.go +++ b/server/libs/app/codec.go @@ -21,11 +21,11 @@ import ( "fmt" "github.com/deepflowio/deepflow/server/libs/codec" - "github.com/deepflowio/deepflow/server/libs/flow-metrics" + flow_metrics "github.com/deepflowio/deepflow/server/libs/flow-metrics" "github.com/deepflowio/deepflow/server/libs/flow-metrics/pb" ) -func DecodePB(decoder *codec.SimpleDecoder, pbDoc *pb.Document) (*Document, error) { +func DecodePB(decoder *codec.SimpleDecoder, pbDoc *pb.Document) (Document, error) { if decoder == nil { return nil, errors.New("No input decoder") } @@ -35,69 +35,67 @@ func DecodePB(decoder *codec.SimpleDecoder, pbDoc *pb.Document) (*Document, erro return nil, fmt.Errorf("Decode failed: %s", err) } - doc := AcquireDocument() - doc.Timestamp = pbDoc.Timestamp - - tag := flow_metrics.AcquireTag() - tag.Field = flow_metrics.AcquireField() - tag.ReadFromPB(pbDoc.Tag) - doc.Tagger = tag - meterID := uint8(pbDoc.Meter.MeterId) switch meterID { case flow_metrics.FLOW_ID: - flowMeter := flow_metrics.AcquireFlowMeter() - flowMeter.ReadFromPB(pbDoc.Meter.Flow) - doc.Meter = flowMeter + doc := AcquireDocumentFlow() + doc.Timestamp = pbDoc.Timestamp + doc.Flags = DocumentFlag(pbDoc.Flags) + doc.Tag.ReadFromPB(pbDoc.Tag) + doc.FlowMeter.ReadFromPB(pbDoc.Meter.Flow) + return doc, nil case flow_metrics.ACL_ID: - usageMeter := flow_metrics.AcquireUsageMeter() - usageMeter.ReadFromPB(pbDoc.Meter.Usage) - doc.Meter = usageMeter + doc := AcquireDocumentUsage() + doc.Timestamp = pbDoc.Timestamp + doc.Flags = DocumentFlag(pbDoc.Flags) + doc.Tag.ReadFromPB(pbDoc.Tag) + doc.UsageMeter.ReadFromPB(pbDoc.Meter.Usage) + return doc, nil case flow_metrics.APP_ID: - appMeter := flow_metrics.AcquireAppMeter() - appMeter.ReadFromPB(pbDoc.Meter.App) - doc.Meter = appMeter + doc := AcquireDocumentApp() + doc.Timestamp = pbDoc.Timestamp + doc.Flags = DocumentFlag(pbDoc.Flags) + doc.Tag.ReadFromPB(pbDoc.Tag) + doc.AppMeter.ReadFromPB(pbDoc.Meter.App) + return doc, nil default: return nil, fmt.Errorf("Unknow meter ID %d", meterID) } - - doc.Flags = DocumentFlag(pbDoc.Flags) - return doc, nil } // queue monitor 打印时使用 -func DecodeForQueueMonitor(decoder *codec.SimpleDecoder) (*Document, error) { +func DecodeForQueueMonitor(decoder *codec.SimpleDecoder) (Document, error) { pbDoc := &pb.Document{} decoder.ReadPB(pbDoc) if decoder.Failed() { return nil, errors.New("Decode failed") } - doc := &Document{} - doc.Timestamp = pbDoc.Timestamp - - tag := &flow_metrics.Tag{} - tag.Field = &flow_metrics.Field{} - tag.ReadFromPB(pbDoc.Tag) - doc.Tagger = tag - meterID := uint8(pbDoc.Meter.MeterId) switch meterID { case flow_metrics.FLOW_ID: - flowMeter := flow_metrics.AcquireFlowMeter() - flowMeter.ReadFromPB(pbDoc.Meter.Flow) - doc.Meter = flowMeter + doc := &DocumentFlow{} + doc.Timestamp = pbDoc.Timestamp + doc.Flags = DocumentFlag(pbDoc.Flags) + doc.Tag.ReadFromPB(pbDoc.Tag) + doc.FlowMeter.ReadFromPB(pbDoc.Meter.Flow) + return doc, nil case flow_metrics.ACL_ID: - usageMeter := flow_metrics.AcquireUsageMeter() - usageMeter.ReadFromPB(pbDoc.Meter.Usage) - doc.Meter = usageMeter + doc := &DocumentUsage{} + doc.Timestamp = pbDoc.Timestamp + doc.Flags = DocumentFlag(pbDoc.Flags) + doc.Tag.ReadFromPB(pbDoc.Tag) + doc.UsageMeter.ReadFromPB(pbDoc.Meter.Usage) + return doc, nil case flow_metrics.APP_ID: - appMeter := flow_metrics.AcquireAppMeter() - appMeter.ReadFromPB(pbDoc.Meter.App) - doc.Meter = appMeter + doc := &DocumentApp{} + doc.Timestamp = pbDoc.Timestamp + doc.Flags = DocumentFlag(pbDoc.Flags) + doc.Tag.ReadFromPB(pbDoc.Tag) + doc.AppMeter.ReadFromPB(pbDoc.Meter.App) + return doc, nil + default: + return nil, fmt.Errorf("Unknow meter ID %d", meterID) } - - doc.Flags = DocumentFlag(pbDoc.Flags) - return doc, nil } diff --git a/server/libs/app/document.go b/server/libs/app/document.go index ae49d607ff46..7d0d3743702a 100644 --- a/server/libs/app/document.go +++ b/server/libs/app/document.go @@ -17,14 +17,15 @@ package app import ( - "errors" "fmt" + "reflect" + "time" + "unsafe" "github.com/deepflowio/deepflow/server/libs/ckdb" - "github.com/deepflowio/deepflow/server/libs/codec" flow_metrics "github.com/deepflowio/deepflow/server/libs/flow-metrics" - "github.com/deepflowio/deepflow/server/libs/flow-metrics/pb" "github.com/deepflowio/deepflow/server/libs/pool" + "github.com/deepflowio/deepflow/server/libs/utils" ) const ( @@ -34,141 +35,201 @@ const ( type DocumentFlag uint32 -type Document struct { +const ( + FLAG_PER_SECOND_METRICS DocumentFlag = 1 << iota +) + +type Document interface { + Tags() *flow_metrics.Tag + Time() uint32 + Flag() DocumentFlag + Meter() flow_metrics.Meter + Release() + WriteBlock(block *ckdb.Block) + OrgID() uint16 + TableID() (uint8, error) + String() string + AddReferenceCount() + AddReferenceCountN(n int32) + DataSource() uint32 + GetFieldValueByOffsetAndKind(offset uintptr, kind reflect.Kind, fieldName string) interface{} + TimestampUs() int64 +} + +type DocumentBase struct { pool.ReferenceCount + Timestamp uint32 `json:"time" category:"tag" sub:"flow_info"` + Flags DocumentFlag + flow_metrics.Tag +} - Timestamp uint32 - flow_metrics.Tagger - flow_metrics.Meter - Flags DocumentFlag +func (b *DocumentBase) Tags() *flow_metrics.Tag { + return &b.Tag } -const ( - FLAG_PER_SECOND_METRICS DocumentFlag = 1 << iota -) +func (b *DocumentBase) Time() uint32 { + return b.Timestamp +} -func (d Document) String() string { - return fmt.Sprintf("\n{\n\ttimestamp: %d\tFlags: b%b\n\ttag: %s\n\tmeter: %#v\n}\n", - d.Timestamp, d.Flags, d.Tagger, d.Meter) +func (b *DocumentBase) Flag() DocumentFlag { + return b.Flags } -var poolDocument = pool.NewLockFreePool(func() interface{} { - return &Document{} +func (b *DocumentBase) TableID() (uint8, error) { + return b.Tag.TableID((b.Flags & FLAG_PER_SECOND_METRICS) == 1) +} + +func (b *DocumentBase) OrgID() uint16 { + return b.Tag.OrgId +} + +func (b *DocumentBase) DataSource() uint32 { + dataSourceId, _ := b.TableID() + return uint32(dataSourceId) +} + +func (e *DocumentBase) TimestampUs() int64 { + return int64(time.Duration(e.Timestamp) * time.Second / time.Microsecond) +} + +type DocumentFlow struct { + DocumentBase + flow_metrics.FlowMeter +} + +type DocumentApp struct { + DocumentBase + flow_metrics.AppMeter +} + +type DocumentUsage struct { + DocumentBase + flow_metrics.UsageMeter +} + +func (d *DocumentFlow) String() string { + return fmt.Sprintf("\n{\n\ttimestamp: %d\tFlags: b%b\n\ttag: %+v\n\tmeter: %#v\n}\n", + d.Timestamp, d.Flags, d.Tag, d.FlowMeter) +} + +var poolDocumentFlow = pool.NewLockFreePool(func() interface{} { + return &DocumentFlow{} }) -func AcquireDocument() *Document { - d := poolDocument.Get().(*Document) +func AcquireDocumentFlow() *DocumentFlow { + d := poolDocumentFlow.Get().(*DocumentFlow) d.ReferenceCount.Reset() return d } -func ReleaseDocument(doc *Document) { +func ReleaseDocumentFlow(doc *DocumentFlow) { if doc == nil || doc.SubReferenceCount() { return } - if doc.Tagger != nil { - doc.Tagger.Release() - } - if doc.Meter != nil { - doc.Meter.Release() - } - *doc = Document{} - poolDocument.Put(doc) + *doc = DocumentFlow{} + poolDocumentFlow.Put(doc) } -func CloneDocument(doc *Document) *Document { - newDoc := AcquireDocument() - newDoc.Timestamp = doc.Timestamp - newDoc.Tagger = doc.Tagger.Clone() - newDoc.Meter = doc.Meter.Clone() - newDoc.Flags = doc.Flags - return newDoc +func (d *DocumentFlow) Release() { + ReleaseDocumentFlow(d) } -func PseudoCloneDocument(doc *Document) { - doc.AddReferenceCount() +func (d *DocumentFlow) WriteBlock(block *ckdb.Block) { + d.Tag.WriteBlock(block, d.Timestamp) + d.FlowMeter.WriteBlock(block) } -func (d *Document) Release() { - ReleaseDocument(d) +func (d *DocumentFlow) GetFieldValueByOffsetAndKind(offset uintptr, kind reflect.Kind, fieldName string) interface{} { + return utils.GetValueByOffsetAndKind(uintptr(unsafe.Pointer(d)), offset, kind, fieldName) } -func (d *Document) EncodePB(encoder *codec.SimpleEncoder, i interface{}) error { - p, ok := i.(*pb.Document) - if !ok { - return fmt.Errorf("invalid interface type, should be *pb.Document") - } - if p.Meter == nil { - p.Meter = &pb.Meter{} - } - flow := p.Meter.Flow - app := p.Meter.App - usage := p.Meter.Usage +func (d *DocumentFlow) GetStringValue(offset uintptr, kind reflect.Kind) string { + return "" +} - if err := d.WriteToPB(p); err != nil { - return err - } - encoder.WritePB(p) - if p.Meter.Flow == nil { - p.Meter.Flow = flow - } - if p.Meter.App == nil { - p.Meter.App = app - } - if p.Meter.Usage == nil { - p.Meter.Usage = usage - } - return nil +func (d *DocumentFlow) Meter() flow_metrics.Meter { + return &d.FlowMeter } -func (d *Document) WriteToPB(p *pb.Document) error { - p.Timestamp = d.Timestamp - if p.Tag == nil { - p.Tag = &pb.MiniTag{} - } - d.Tagger.(*flow_metrics.MiniTag).WriteToPB(p.Tag) - if p.Meter == nil { - p.Meter = &pb.Meter{} +func (d *DocumentApp) String() string { + return fmt.Sprintf("\n{\n\ttimestamp: %d\tFlags: b%b\n\ttag: %+v\n\tmeter: %#v\n}\n", + d.Timestamp, d.Flags, d.Tag, d.AppMeter) +} + +var poolDocumentApp = pool.NewLockFreePool(func() interface{} { + return &DocumentApp{} +}) + +func AcquireDocumentApp() *DocumentApp { + d := poolDocumentApp.Get().(*DocumentApp) + d.ReferenceCount.Reset() + return d +} + +func ReleaseDocumentApp(doc *DocumentApp) { + if doc == nil || doc.SubReferenceCount() { + return } - p.Meter.MeterId = uint32(d.Meter.ID()) - switch d.Meter.ID() { - case flow_metrics.FLOW_ID: - if p.Meter.Flow == nil { - p.Meter.Flow = &pb.FlowMeter{} - } - d.Meter.(*flow_metrics.FlowMeter).WriteToPB(p.Meter.Flow) - p.Meter.Usage, p.Meter.App = nil, nil - case flow_metrics.ACL_ID: - if p.Meter.Usage == nil { - p.Meter.Usage = &pb.UsageMeter{} - } - d.Meter.(*flow_metrics.UsageMeter).WriteToPB(p.Meter.Usage) - p.Meter.Flow, p.Meter.App = nil, nil - case flow_metrics.APP_ID: - if p.Meter.App == nil { - p.Meter.App = &pb.AppMeter{} - } - d.Meter.(*flow_metrics.AppMeter).WriteToPB(p.Meter.App) - p.Meter.Usage, p.Meter.Flow = nil, nil - default: - return errors.New(fmt.Sprintf("unknown meter id %d", d.Meter.ID())) + + *doc = DocumentApp{} + poolDocumentApp.Put(doc) +} + +func (d *DocumentApp) Release() { + ReleaseDocumentApp(d) +} + +func (d *DocumentApp) WriteBlock(block *ckdb.Block) { + d.Tag.WriteBlock(block, d.Timestamp) + d.AppMeter.WriteBlock(block) +} + +func (d *DocumentApp) Meter() flow_metrics.Meter { + return &d.AppMeter +} + +func (d *DocumentApp) GetFieldValueByOffsetAndKind(offset uintptr, kind reflect.Kind, fieldName string) interface{} { + return utils.GetValueByOffsetAndKind(uintptr(unsafe.Pointer(d)), offset, kind, fieldName) +} + +func (d *DocumentUsage) String() string { + return fmt.Sprintf("\n{\n\ttimestamp: %d\tFlags: b%b\n\ttag: %+v\n\tmeter: %#v\n}\n", + d.Timestamp, d.Flags, d.Tag, d.UsageMeter) +} + +var poolDocumentUsage = pool.NewLockFreePool(func() interface{} { + return &DocumentUsage{} +}) + +func AcquireDocumentUsage() *DocumentUsage { + d := poolDocumentUsage.Get().(*DocumentUsage) + d.ReferenceCount.Reset() + return d +} + +func ReleaseDocumentUsage(doc *DocumentUsage) { + if doc == nil || doc.SubReferenceCount() { + return } - p.Flags = uint32(d.Flags) - return nil + *doc = DocumentUsage{} + poolDocumentUsage.Put(doc) +} + +func (d *DocumentUsage) Release() { + ReleaseDocumentUsage(d) } -func (d *Document) WriteBlock(block *ckdb.Block) { - d.Tagger.(*flow_metrics.Tag).WriteBlock(block, d.Timestamp) - d.Meter.WriteBlock(block) +func (d *DocumentUsage) WriteBlock(block *ckdb.Block) { + d.Tag.WriteBlock(block, d.Timestamp) + d.UsageMeter.WriteBlock(block) } -func (d *Document) OrgID() uint16 { - return d.Tagger.(*flow_metrics.Tag).OrgId +func (d *DocumentUsage) Meter() flow_metrics.Meter { + return &d.UsageMeter } -func (d *Document) TableID() (uint8, error) { - tag, _ := d.Tagger.(*flow_metrics.Tag) - return tag.TableID((d.Flags & FLAG_PER_SECOND_METRICS) == 1) +func (d *DocumentUsage) GetFieldValueByOffsetAndKind(offset uintptr, kind reflect.Kind, fieldName string) interface{} { + return utils.GetValueByOffsetAndKind(uintptr(unsafe.Pointer(d)), offset, kind, fieldName) } diff --git a/server/libs/datatype/base_type.go b/server/libs/datatype/base_type.go index c5c540a028b5..e0fef07b9058 100644 --- a/server/libs/datatype/base_type.go +++ b/server/libs/datatype/base_type.go @@ -107,3 +107,14 @@ func (m *MACAddr) String() string { func (m *MACAddr) Int() uint64 { return m.addrInt } + +type IP4 uint32 + +func (ip4 IP4) String() string { + ip := make(net.IP, 4) + ip[0] = byte(ip4 >> 24) + ip[1] = byte(ip4 >> 16) + ip[2] = byte(ip4 >> 8) + ip[3] = byte(ip4) + return ip.String() +} diff --git a/server/libs/flow-metrics/app_meter.go b/server/libs/flow-metrics/app_meter.go index 65271ab9445a..98213588fa5c 100644 --- a/server/libs/flow-metrics/app_meter.go +++ b/server/libs/flow-metrics/app_meter.go @@ -135,9 +135,9 @@ func (m *AppMeter) WriteBlock(block *ckdb.Block) { } type AppTraffic struct { - Request uint32 `db:"request"` - Response uint32 `db:"response"` - DirectionScore uint8 `db:"direction_score"` + Request uint32 `json:"request" category:"metrics" sub:"throughput"` + Response uint32 `json:"response" category:"metrics" sub:"throughput"` + DirectionScore uint8 `json:"direction_score" category:"metrics" sub:"throughput"` } func (_ *AppTraffic) Reverse() { @@ -195,9 +195,9 @@ func (t *AppTraffic) WriteBlock(block *ckdb.Block) { } type AppLatency struct { - RRTMax uint32 `db:"rrt_max"` // us - RRTSum uint64 `db:"rrt_sum"` // us - RRTCount uint32 `db:"rrt_count"` + RRTMax uint32 `json:"rrt_max" category:"metrics" sub:"delay"` // us + RRTSum uint64 `json:"rrt_sum" category:"metrics" sub:"delay"` // us + RRTCount uint32 `json:"rrt_count" category:"metrics" sub:"delay"` } func (_ *AppLatency) Reverse() { @@ -255,9 +255,9 @@ func (l *AppLatency) WriteBlock(block *ckdb.Block) { } type AppAnomaly struct { - ClientError uint32 `db:"client_error"` - ServerError uint32 `db:"server_error"` - Timeout uint32 `db:"timeout"` + ClientError uint32 `json:"client_error" category:"metrics" sub:"error"` + ServerError uint32 `json:"server_error" category:"metrics" sub:"error"` + Timeout uint32 `json:"timeout" category:"metrics" sub:"error"` } func (_ *AppAnomaly) Reverse() { diff --git a/server/libs/flow-metrics/basic_meter.go b/server/libs/flow-metrics/basic_meter.go index 422b06f3217e..57c227081e52 100644 --- a/server/libs/flow-metrics/basic_meter.go +++ b/server/libs/flow-metrics/basic_meter.go @@ -24,23 +24,23 @@ import ( ) type Traffic struct { - PacketTx uint64 `db:"packet_tx"` - PacketRx uint64 `db:"packet_rx"` - ByteTx uint64 `db:"byte_tx"` - ByteRx uint64 `db:"byte_rx"` - L3ByteTx uint64 `db:"l3_byte_tx"` - L3ByteRx uint64 `db:"l3_byte_rx"` - L4ByteTx uint64 `db:"l4_byte_tx"` - L4ByteRx uint64 `db:"l4_byte_rx"` - NewFlow uint64 `db:"new_flow"` - ClosedFlow uint64 `db:"closed_flow"` + PacketTx uint64 `json:"packet_tx" category:"metrics" sub:"l3_throughput"` + PacketRx uint64 `json:"packet_rx" category:"metrics" sub:"l3_throughput"` + ByteTx uint64 `json:"byte_tx" category:"metrics" sub:"l3_throughput"` + ByteRx uint64 `json:"byte_rx" category:"metrics" sub:"l3_throughput"` + L3ByteTx uint64 `json:"l3_byte_tx" category:"metrics" sub:"l3_throughput"` + L3ByteRx uint64 `json:"l3_byte_rx" category:"metrics" sub:"l3_throughput"` + L4ByteTx uint64 `json:"l4_byte_tx" category:"metrics" sub:"l4_throughput"` + L4ByteRx uint64 `json:"l4_byte_rx" category:"metrics" sub:"l4_throughput"` + NewFlow uint64 `json:"new_flow" category:"metrics" sub:"l4_throughput"` + ClosedFlow uint64 `json:"closed_flow" category:"metrics" sub:"l4_throughput"` - L7Request uint32 `db:"l7_request"` - L7Response uint32 `db:"l7_response"` - SynCount uint32 `db:"syn_count"` - SynackCount uint32 `db:"synack_count"` + L7Request uint32 `json:"l7_request" category:"metrics" sub:"application"` + L7Response uint32 `json:"l7_response" category:"metrics" sub:"application"` + SynCount uint32 `json:"syn_count" category:"metrics" sub:"l4_throughput"` + SynackCount uint32 `json:"synack_count" category:"metrics" sub:"l4_throughput"` - DirectionScore uint8 `db:"direction_score"` + DirectionScore uint8 `json:"direction_score" category:"metrics" sub:"l4_throughput"` } func (t *Traffic) Reverse() { @@ -221,29 +221,29 @@ func (t *Traffic) WriteBlock(block *ckdb.Block) { } type Latency struct { - RTTMax uint32 `db:"rtt_max"` // us,Trident保证时延最大值不会超过3600s,能容纳在u32内 - RTTClientMax uint32 `db:"rtt_client_max"` // us - RTTServerMax uint32 `db:"rtt_server_max"` // us - SRTMax uint32 `db:"srt_max"` // us - ARTMax uint32 `db:"art_max"` // us - RRTMax uint32 `db:"rrt_max"` // us - CITMax uint32 `db:"cit_max"` // client idle time max - - RTTSum uint64 `db:"rtt_sum"` // us - RTTClientSum uint64 `db:"rtt_client_sum"` // us - RTTServerSum uint64 `db:"rtt_server_sum"` // us - SRTSum uint64 `db:"srt_sum"` // us - ARTSum uint64 `db:"art_sum"` // us - RRTSum uint64 `db:"rrt_sum"` // us - CITSum uint64 `db:"cit_sum"` - - RTTCount uint32 `db:"rtt_count"` - RTTClientCount uint32 `db:"rtt_client_count"` - RTTServerCount uint32 `db:"rtt_server_count"` - SRTCount uint32 `db:"srt_count"` - ARTCount uint32 `db:"art_count"` - RRTCount uint32 `db:"rrt_count"` - CITCount uint32 `db:"cit_count"` + RTTMax uint32 `json:"rtt_max" category:"metrics" sub:"delay"` // us,Trident保证时延最大值不会超过3600s,能容纳在u32内 + RTTClientMax uint32 `json:"rtt_client_max" category:"metrics" sub:"delay"` // us + RTTServerMax uint32 `json:"rtt_server_max" category:"metrics" sub:"delay"` // us + SRTMax uint32 `json:"srt_max" category:"metrics" sub:"delay"` // us + ARTMax uint32 `json:"art_max" category:"metrics" sub:"delay"` // us + RRTMax uint32 `json:"rrt_max" category:"metrics" sub:"delay"` // us + CITMax uint32 `json:"cit_max" category:"metrics" sub:"delay"` // client idle time max + + RTTSum uint64 `json:"rtt_sum" category:"metrics" sub:"delay"` // us + RTTClientSum uint64 `json:"rtt_client_sum" category:"metrics" sub:"delay"` // us + RTTServerSum uint64 `json:"rtt_server_sum" category:"metrics" sub:"delay"` // us + SRTSum uint64 `json:"srt_sum" category:"metrics" sub:"delay"` // us + ARTSum uint64 `json:"art_sum" category:"metrics" sub:"delay"` // us + RRTSum uint64 `json:"rrt_sum" category:"metrics" sub:"delay"` // us + CITSum uint64 `json:"cit_sum" category:"metrics" sub:"delay"` + + RTTCount uint32 `json:"rtt_count" category:"metrics" sub:"delay"` + RTTClientCount uint32 `json:"rtt_client_count" category:"metrics" sub:"delay"` + RTTServerCount uint32 `json:"rtt_server_count" category:"metrics" sub:"delay"` + SRTCount uint32 `json:"srt_count" category:"metrics" sub:"delay"` + ARTCount uint32 `json:"art_count" category:"metrics" sub:"delay"` + RRTCount uint32 `json:"rrt_count" category:"metrics" sub:"delay"` + CITCount uint32 `json:"cit_count" category:"metrics" sub:"delay"` } func (_ *Latency) Reverse() { @@ -443,12 +443,12 @@ func (l *Latency) WriteBlock(block *ckdb.Block) { } type Performance struct { - RetransTx uint64 `db:"retrans_tx"` - RetransRx uint64 `db:"retrans_rx"` - ZeroWinTx uint64 `db:"zero_win_tx"` - ZeroWinRx uint64 `db:"zero_win_rx"` - RetransSyn uint32 `db:"retrans_syn"` - RetransSynack uint32 `db:"retrans_synack"` + RetransTx uint64 `json:"retrans_tx" category:"metrics"` + RetransRx uint64 `json:"retrans_rx" category:"metrics"` + ZeroWinTx uint64 `json:"zero_win_tx" category:"metrics"` + ZeroWinRx uint64 `json:"zero_win_rx" category:"metrics"` + RetransSyn uint32 `json:"retrans_syn" category:"metrics"` + RetransSynack uint32 `json:"retrans_synack" category:"metrics"` } func (a *Performance) Reverse() { @@ -537,23 +537,23 @@ func (a *Performance) WriteBlock(block *ckdb.Block) { } type Anomaly struct { - ClientRstFlow uint64 `db:"client_rst_flow"` - ServerRstFlow uint64 `db:"server_rst_flow"` - ServerSynMiss uint64 `db:"server_syn_miss"` - ClientAckMiss uint64 `db:"client_ack_miss"` - ClientHalfCloseFlow uint64 `db:"client_half_close_flow"` - ServerHalfCloseFlow uint64 `db:"server_half_close_flow"` + ClientRstFlow uint64 `json:"client_rst_flow" category:"metrics" sub:"tcp_error"` + ServerRstFlow uint64 `json:"server_rst_flow" category:"metrics" sub:"tcp_error"` + ServerSynMiss uint64 `json:"server_syn_miss" category:"metrics" sub:"tcp_error"` + ClientAckMiss uint64 `json:"client_ack_miss" category:"metrics" sub:"tcp_error"` + ClientHalfCloseFlow uint64 `json:"client_half_close_flow" category:"metrics" sub:"tcp_error"` + ServerHalfCloseFlow uint64 `json:"server_half_close_flow" category:"metrics" sub:"tcp_error"` - ClientSourcePortReuse uint64 `db:"client_source_port_reuse"` - ClientEstablishReset uint64 `db:"client_establish_other_rst"` - ServerReset uint64 `db:"server_reset"` - ServerQueueLack uint64 `db:"server_queue_lack"` - ServerEstablishReset uint64 `db:"server_establish_other_rst"` - TCPTimeout uint64 `db:"tcp_timeout"` + ClientSourcePortReuse uint64 `json:"client_source_port_reuse" category:"metrics" sub:"tcp_error"` + ClientEstablishReset uint64 `json:"client_establish_other_rst" category:"metrics" sub:"tcp_error"` + ServerReset uint64 `json:"server_reset" category:"metrics" sub:"tcp_error"` + ServerQueueLack uint64 `json:"server_queue_lack" category:"metrics" sub:"tcp_error"` + ServerEstablishReset uint64 `json:"server_establish_other_rst" category:"metrics" sub:"tcp_error"` + TCPTimeout uint64 `json:"tcp_timeout" category:"metrics" sub:"tcp_error"` - L7ClientError uint32 `db:"l7_client_error"` - L7ServerError uint32 `db:"l7_server_error"` - L7Timeout uint32 `db:"l7_timeout"` + L7ClientError uint32 `json:"l7_client_error" category:"metrics" sub:"application"` + L7ServerError uint32 `json:"l7_server_error" category:"metrics" sub:"application"` + L7Timeout uint32 `json:"l7_timeout" category:"metrics" sub:"application"` } func (_ *Anomaly) Reverse() { @@ -767,7 +767,7 @@ func (a *Anomaly) WriteBlock(block *ckdb.Block) { } type FlowLoad struct { - Load uint64 `db:"flow_load"` + Load uint64 `json:"flow_load" category:"metrics"` } func (l *FlowLoad) Reverse() { diff --git a/server/libs/flow-metrics/interface.go b/server/libs/flow-metrics/interface.go index f99067975b56..f4078967a6b4 100644 --- a/server/libs/flow-metrics/interface.go +++ b/server/libs/flow-metrics/interface.go @@ -41,7 +41,7 @@ type Meter interface { ToKVString() string MarshalTo([]byte) int SortKey() uint64 - Clone() Meter + // Clone() Meter Release() Reverse() ToReversed() Meter diff --git a/server/libs/flow-metrics/tag.go b/server/libs/flow-metrics/tag.go index 735c728df4ef..2908886a9919 100644 --- a/server/libs/flow-metrics/tag.go +++ b/server/libs/flow-metrics/tag.go @@ -252,70 +252,71 @@ type Field struct { // - ingester写入clickhouse,作用类似_id,序列化为_tid GlobalThreadID uint8 - IP6 net.IP // FIXME: 合并IP6和IP + // structTag "datasource":"n|nm|a|am" means datasource: network, network_map, application, application_map + IP6 net.IP `json:"ip6" map_json:"ip6_0" category:"tag" sub:"network_layer" to_string:"IPv6String" ` // FIXME: 合并IP6和IP MAC uint64 - IP uint32 - L3EpcID int32 // (8B) - L3DeviceID uint32 - L3DeviceType DeviceType - RegionID uint16 - SubnetID uint16 - HostID uint16 - PodNodeID uint32 - AZID uint16 - PodGroupID uint32 - PodNSID uint16 - PodID uint32 - PodClusterID uint16 - ServiceID uint32 - AutoInstanceID uint32 - AutoInstanceType uint8 - AutoServiceID uint32 - AutoServiceType uint8 - GPID uint32 + IP uint32 `json:"ip4" map_json:"ip4_0" category:"tag" sub:"network_layer" to_string:"IPv4String"` + L3EpcID int32 `json:"l3_epc_id" map_json:"l3_epc_id_0" category:"tag" sub:"universal_tag"` + L3DeviceID uint32 `json:"l3_device_id" map_json:"l3_device_id_0" category:"tag" sub:"universal_tag"` + L3DeviceType DeviceType `json:"l3_device_type" map_json:"l3_device_type_0" category:"tag" sub:"universal_tag"` + RegionID uint16 `json:"region_id" map_json:"region_id_0" category:"tag" sub:"universal_tag"` + SubnetID uint16 `json:"subnet_id" map_json:"subnet_id_0" category:"tag" sub:"universal_tag"` + HostID uint16 `json:"host_id" map_json:"host_id_0" category:"tag" sub:"universal_tag"` + PodNodeID uint32 `json:"pod_node_id" map_json:"pod_node_id_0" category:"tag" sub:"universal_tag"` + AZID uint16 `json:"az_id" map_json:"az_id_0" category:"tag" sub:"universal_tag"` + PodGroupID uint32 `json:"pod_group_id" map_json:"pod_group_id_0" category:"tag" sub:"universal_tag"` + PodNSID uint16 `json:"pod_ns_id" map_json:"pod_ns_id_0" category:"tag" sub:"universal_tag"` + PodID uint32 `json:"pod_id" map_json:"pod_id_0" category:"tag" sub:"universal_tag"` + PodClusterID uint16 `json:"pod_cluster_id" map_json:"pod_cluster_id_0" category:"tag" sub:"universal_tag"` + ServiceID uint32 `json:"service_id" map_json:"service_id_0" category:"tag" sub:"universal_tag"` + AutoInstanceID uint32 `json:"auto_instance_id" map_json:"auto_instance_id_0" category:"tag" sub:"universal_tag"` + AutoInstanceType uint8 `json:"auto_instance_type" map_json:"auto_instance_type_0" category:"tag" sub:"universal_tag"` + AutoServiceID uint32 `json:"auto_service_id" map_json:"auto_service_id_0" category:"tag" sub:"universal_tag"` + AutoServiceType uint8 `json:"auto_service_type" map_json:"auto_service_type_0" category:"tag" sub:"universal_tag"` + GPID uint32 `json:"gprocess_id" map_json:"gprocess_id_0" category:"tag" sub:"universal_tag"` MAC1 uint64 - IP61 net.IP // FIXME: 合并IP61和IP1 - IP1 uint32 - L3EpcID1 int32 // (8B) - L3DeviceID1 uint32 - L3DeviceType1 DeviceType // (+1B=8B) - RegionID1 uint16 - SubnetID1 uint16 // (8B) - HostID1 uint16 - PodNodeID1 uint32 - AZID1 uint16 - PodGroupID1 uint32 - PodNSID1 uint16 - PodID1 uint32 - PodClusterID1 uint16 - ServiceID1 uint32 - AutoInstanceID1 uint32 - AutoInstanceType1 uint8 - AutoServiceID1 uint32 - AutoServiceType1 uint8 - GPID1 uint32 + IP61 net.IP `json:"ip6_1" category:"tag" sub:"network_layer" to_string:"IPv6String" datasource:"nm|am"` // FIXME: 合并IP61和IP1 + IP1 uint32 `json:"ip4_1" category:"tag" sub:"network_layer" to_string:"IPv4String" datasource:"nm|am"` + L3EpcID1 int32 `json:"l3_epc_id_1" category:"tag" sub:"universal_tag" datasource:"nm|am"` + L3DeviceID1 uint32 `json:"l3_device_id_1" category:"tag" sub:"universal_tag" datasource:"nm|am"` + L3DeviceType1 DeviceType `json:"l3_device_type_1" category:"tag" sub:"universal_tag" datasource:"nm|am"` + RegionID1 uint16 `json:"region_id_1" category:"tag" sub:"universal_tag" datasource:"nm|am"` + SubnetID1 uint16 `json:"subnet_id_1" category:"tag" sub:"universal_tag" datasource:"nm|am"` + HostID1 uint16 `json:"host_id_1" category:"tag" sub:"universal_tag" datasource:"nm|am"` + PodNodeID1 uint32 `json:"pod_node_id_1" category:"tag" sub:"universal_tag" datasource:"nm|am"` + AZID1 uint16 `json:"az_id_1" category:"tag" sub:"universal_tag" datasource:"nm|am"` + PodGroupID1 uint32 `json:"pod_group_id_1" category:"tag" sub:"universal_tag" datasource:"nm|am"` + PodNSID1 uint16 `json:"pod_ns_id_1" category:"tag" sub:"universal_tag" datasource:"nm|am"` + PodID1 uint32 `json:"pod_id_1" category:"tag" sub:"universal_tag" datasource:"nm|am"` + PodClusterID1 uint16 `json:"pod_cluster_id_1" category:"tag" sub:"universal_tag" datasource:"nm|am"` + ServiceID1 uint32 `json:"service_id_0" category:"tag" sub:"universal_tag" datasource:"nm|am"` + AutoInstanceID1 uint32 `json:"auto_instance_id_1" category:"tag" sub:"universal_tag" datasource:"nm|am"` + AutoInstanceType1 uint8 `json:"auto_instance_type_1" category:"tag" sub:"universal_tag" datasource:"nm|am"` + AutoServiceID1 uint32 `json:"auto_service_id_1" category:"tag" sub:"universal_tag" datasource:"nm|am"` + AutoServiceType1 uint8 `json:"auto_service_type_1" category:"tag" sub:"universal_tag" datasource:"nm|am"` + GPID1 uint32 `json:"gprocess_id_1" category:"tag" sub:"universal_tag" datasource:"nm|am"` ACLGID uint16 - Direction DirectionEnum - Protocol layers.IPProtocol - ServerPort uint16 - VTAPID uint16 + Direction DirectionEnum `json:"role" category:"tag" sub:"capture_info" datasource:"n|a"` + Protocol layers.IPProtocol `json:"protocol" category:"tag" sub:"network_layer" enumfile:"protocol"` + ServerPort uint16 `json:"server_port" category:"tag" sub:"network_layer"` + VTAPID uint16 `json:"agent_id" category:"tag" sub:"capture_info"` // Not stored, only determines which database to store in. // When Orgid is 0 or 1, it is stored in database 'flow_metrics', otherwise stored in '_flow_metrics'. OrgId uint16 TeamID uint16 - TAPPort datatype.TapPort - TAPSide TAPSideEnum - TAPType TAPTypeEnum - IsIPv6 uint8 // (8B) 与IP/IP6是共生字段 + TAPPort datatype.TapPort `json:"tap_port" category:"tag" sub:"capture_info" datasource:"nm|am"` + TAPSide TAPSideEnum `json:"observation_point" category:"tag" sub:"capture_info" enumfile:"observation_point" datasource:"nm|am"` + TAPType TAPTypeEnum `json:"capture_nic_type" category:"tag" sub:"capture_info" enumfile:"capture_nic_type"` + IsIPv4 uint8 `json:"is_ipv4" category:"tag" sub:"network_layer"` // (8B) 与IP/IP6是共生字段 IsKeyService uint8 - L7Protocol datatype.L7Protocol - AppService string - AppInstance string - Endpoint string - BizType uint8 - SignalSource uint16 + L7Protocol datatype.L7Protocol `json:"l7_protocol" category:"tag" sub:"application_layer" enumfile:"l7_protocol" datasource:"a|am"` + AppService string `json:"app_service" category:"tag" sub:"application_layer" datasource:"a|am"` + AppInstance string `json:"app_instance" category:"tag" sub:"application_layer" datasource:"a|am"` + Endpoint string `json:"endpoint" category:"tag" sub:"application_layer" datasource:"a|am"` + BizType uint8 `json:"biz_type" category:"tag" sub:"application_layer" datasource:"a|am"` + SignalSource uint16 `json:"signal_source" category:"tag" sub:"application_layer" enumfile:"l7_signal_source"` // FIXME: network,network_1m should use l4_signal_source for translate TagSource, TagSource1 uint8 @@ -487,7 +488,7 @@ var metricsTableCodes = []Code{ } type Tag struct { - *Field + Field Code id string } @@ -609,7 +610,7 @@ func (t *Tag) MarshalTo(b []byte) int { offset += copy(b[offset:], strconv.FormatUint(uint64(t.HostID1), 10)) } if t.Code&IP != 0 { - if t.IsIPv6 != 0 { + if t.IsIPv4 == 0 { offset += copy(b[offset:], ",ip=") offset += copy(b[offset:], t.IP6.String()) offset += copy(b[offset:], ",ip_version=6") @@ -620,7 +621,7 @@ func (t *Tag) MarshalTo(b []byte) int { } } if t.Code&IPPath != 0 { - if t.IsIPv6 != 0 { + if t.IsIPv4 == 0 { offset += copy(b[offset:], ",ip_0=") offset += copy(b[offset:], t.IP6.String()) offset += copy(b[offset:], ",ip_1=") @@ -1143,7 +1144,7 @@ func (t *Tag) WriteBlock(block *ckdb.Block, time uint32) { if code&IP != 0 { block.WriteIPv4(t.IP) block.WriteIPv6(t.IP6) - block.Write(1 - t.IsIPv6) + block.Write(t.IsIPv4) block.Write(t.TagSource) } if code&IPPath != 0 { @@ -1151,7 +1152,7 @@ func (t *Tag) WriteBlock(block *ckdb.Block, time uint32) { block.WriteIPv4(t.IP1) block.WriteIPv6(t.IP6) block.WriteIPv6(t.IP61) - block.Write(1 - t.IsIPv6) + block.Write(t.IsIPv4) block.Write(t.TagSource) block.Write(t.TagSource1) } @@ -1333,8 +1334,8 @@ func (t *Tag) String() string { func (t *Tag) ReadFromPB(p *pb.MiniTag) { t.Code = Code(p.Code) - t.IsIPv6 = uint8(p.Field.IsIpv6) - if t.IsIPv6 != 0 { + t.IsIPv4 = 1 - uint8(p.Field.IsIpv6) + if t.IsIPv4 == 0 { if t.IP6 == nil { t.IP6 = make([]byte, 16) } @@ -1495,9 +1496,6 @@ func ReleaseTag(tag *Tag) { if tag == nil { return } - if tag.Field != nil { - ReleaseField(tag.Field) - } *tag = Tag{} tagPool.Put(tag) } @@ -1505,7 +1503,7 @@ func ReleaseTag(tag *Tag) { // CloneTag 需要复制Tag拥有的Field func CloneTag(tag *Tag) *Tag { newTag := AcquireTag() - newTag.Field = CloneField(tag.Field) + newTag.Field = tag.Field newTag.Code = tag.Code newTag.id = tag.id return newTag @@ -1521,7 +1519,7 @@ func (t *Tag) Release() { func (f *Field) NewTag(c Code) *Tag { tag := AcquireTag() - tag.Field = CloneField(f) + tag.Field = *f tag.Code = c tag.id = "" return tag diff --git a/server/libs/flow-metrics/usage_meter.go b/server/libs/flow-metrics/usage_meter.go index 033fdcfb3193..11658413f4bf 100644 --- a/server/libs/flow-metrics/usage_meter.go +++ b/server/libs/flow-metrics/usage_meter.go @@ -24,14 +24,14 @@ import ( ) type UsageMeter struct { - PacketTx uint64 `db:"packet_tx"` - PacketRx uint64 `db:"packet_rx"` - ByteTx uint64 `db:"byte_tx"` - ByteRx uint64 `db:"byte_rx"` - L3ByteTx uint64 `db:"l3_byte_tx"` - L3ByteRx uint64 `db:"l3_byte_rx"` - L4ByteTx uint64 `db:"l4_byte_tx"` - L4ByteRx uint64 `db:"l4_byte_rx"` + PacketTx uint64 `json:"packet_tx" category:"metrics" sub:"NPB"` + PacketRx uint64 `json:"packet_rx" category:"metrics" sub:"NPB"` + ByteTx uint64 `json:"byte_tx" category:"metrics" sub:"NPB"` + ByteRx uint64 `json:"byte_rx" category:"metrics" sub:"NPB"` + L3ByteTx uint64 `json:"l3_byte_tx" category:"metrics" sub:"NPB"` + L3ByteRx uint64 `json:"l3_byte_rx" category:"metrics" sub:"NPB"` + L4ByteTx uint64 `json:"l4_byte_tx" category:"metrics" sub:"NPB"` + L4ByteRx uint64 `json:"l4_byte_rx" category:"metrics" sub:"NPB"` } func (m *UsageMeter) Reverse() { diff --git a/server/libs/utils/utils.go b/server/libs/utils/utils.go index 28d021dc9995..877f6c476f00 100644 --- a/server/libs/utils/utils.go +++ b/server/libs/utils/utils.go @@ -284,3 +284,95 @@ func GetTraceIdIndex(traceId string, indexTypeIsIncremetalId, formatIsHex bool, // the lowest 16 bits are set as the hash value of traceId to reduce duplication when filtering data return num<<16 | (hash & 0xffff), nil } + +func GetValueByOffsetAndKind(ptr, offset uintptr, kind reflect.Kind, fieldName string) interface{} { + fieldAddr := unsafe.Pointer(ptr + offset) + + switch kind { + case reflect.String: + return *(*string)(fieldAddr) + case reflect.Bool: + return *(*bool)(fieldAddr) + case reflect.Int: + return *(*int)(fieldAddr) + case reflect.Int8: + return *(*int8)(fieldAddr) + case reflect.Int16: + return *(*int16)(fieldAddr) + case reflect.Int32: + return *(*int32)(fieldAddr) + case reflect.Int64: + return *(*int64)(fieldAddr) + case reflect.Uint: + return *(*uint)(fieldAddr) + case reflect.Uint8: + return *(*uint8)(fieldAddr) + case reflect.Uint16: + return *(*uint16)(fieldAddr) + case reflect.Uint32: + return *(*uint32)(fieldAddr) + case reflect.Uint64: + return *(*uint64)(fieldAddr) + case reflect.Float32: + return *(*float32)(fieldAddr) + case reflect.Float64: + return *(*float64)(fieldAddr) + // case reflect.Uintptr + // case reflect.Array + // case reflect.Chan + // case reflect.Func + case reflect.Slice: + if strings.Contains(fieldName, "IP6") { + return *(*net.IP)(fieldAddr) + } else if strings.Contains(fieldName, "AttributeNames") || strings.Contains(fieldName, "AttributeValues") || strings.Contains(fieldName, "MetricsNames") { + return *(*[]string)(fieldAddr) + } else if strings.Contains(fieldName, "MetricsValues") { + return *(*[]float64)(fieldAddr) + } + return nil + default: + return nil + } +} + +func ConvertToFloat64(data interface{}) (float64, bool) { + switch v := data.(type) { + case uint: + return float64(v), true + case uint8: + return float64(v), true + case uint16: + return float64(v), true + case uint32: + return float64(v), true + case uint64: + return float64(v), true + case uintptr: + return float64(v), true + case int: + return float64(v), true + case int8: + return float64(v), true + case int16: + return float64(v), true + case int32: + return float64(v), true + case int64: + return float64(v), true + case float64: + return v, true + default: + return 0, false + } +} + +func IsNil(i interface{}) bool { + if i == nil { + return true + } + vi := reflect.ValueOf(i) + if vi.Kind() == reflect.Ptr { + return vi.IsNil() + } + return false +} diff --git a/server/querier/db_descriptions/embed.go b/server/querier/db_descriptions/embed.go new file mode 100644 index 000000000000..50e805a5b94c --- /dev/null +++ b/server/querier/db_descriptions/embed.go @@ -0,0 +1,6 @@ +package db_descriptions + +import "embed" + +//go:embed clickhouse/tag/enum/* +var EnumFiles embed.FS diff --git a/server/server.yaml b/server/server.yaml index d91bcd0111db..12a44e5cf1da 100644 --- a/server/server.yaml +++ b/server/server.yaml @@ -643,33 +643,74 @@ ingester: #flow-tag-cache-flush-timeout: 1800 #exporters: - # enabled: false - # # export datas ranges: cbpf-net-span, ebpf-sys-span (app spans from Opentelemetry are not supported) - # export-datas: [cbpf-net-span,ebpf-sys-span] - # # export-data-types ranges: service_info,tracing_info,network_layer,flow_info,client_universal_tag,server_universal_tag,tunnel_info,transport_layer,application_layer,capture_info,native_tag,metrics - # export-data-types: [service_info,tracing_info,network_layer,flow_info,transport_layer,application_layer,metrics] - # export-custom-k8s-labels-regexp: # type string, default: "", means not export custom k8s labels. for example: ".+", means export all custom k8s labels; "aaa|bbb": means export labels which contains 'aaa' or 'bbb' string. ref: https://github.com/google/re2/wiki/Syntax - # export-only-with-traceid: false # if set 'true', if the span has no trace id, it will not be exported - # otlp-exporters: - # - enabled: false - # addr: 127.0.0.1:4317 # grpc protobuf addr, only support protocol 'grpc' - # queue-count: 4 # parallelism of sender - # queue-size: 100000 # size of each exporter queue - # export-datas: [cbpf-net-span,ebpf-sys-span] - # export-data-types: [service_info,tracing_info,network_layer,flow_info,transport_layer,application_layer,metrics] - # export-custom-k8s-labels-regexp: - # export-only-with-traceid: false - # export-batch-count: 32 # ExportRequest contains defalut(32) ResourceSpans(each l7_flow_log corresponds to a ResourceSpans) - # grpc-headers: # grpc headers, type: map[string]string, default is null, the following is an example configuration - # key1: value1 - # key2: value2 - - #metrics-prom-writer: - # enabled: false - # endpoint: #eg. http://1.2.3.4:9091 - # headers: # type: map[string]string, extra http request headers - # batch-size: 2048 # each http request contians how many timeseries - # flush-timeout: 5 - # queue-count: 2 - # queue-size: 1000000 - # metrics-filter: [app] # only support: 'app', means flow_metrics.'vtap_app_edgp_port.1s'/'vtap_app_port.1s' + #- protocol: kafka + # endpoints: [broker1.example.com:9092, broker2.example.com:9092] # 随机选择一个可以发送成功的地址 kafka 地址格式: 如 broker1.example.com:9092 + # data-sources: # $db_name.$table_name + # - flow_log.l7_flow_log + # - flow_log.network.1m + # - flow_log.application_map.1m + # queue-count: 4 + # queue-size: 100000 + # batch-size: 1024 # 不同协议的 batch-size 可不同 + # flush-timeout: 10 + # tag-omitempty: false # tag的值为0或"" 是否导出 + # metrics-omitempty: false # metrics 的值为0, 是否导出 + # enum-not-convert-to-string: false # 枚举类型的id值,是否不转化为对应字符串输出 + # universaltag-not-convert-to-string: false # universal-tag 的id值,是否不转化为对应资源名称输出 + # tag-filters: + # - field-name: signal_source + # operator: "=" + # field-values: [3] # 仅当操作符是 IN 或 NOT IN 时,才支持填写长度大于 1 的列表 + # export-fields: # $field_name 等 + # - "@tag" + # - "@metrics" + # sasl: + # enabled: false # 默认为 False + # security-protocol: SASL_SSL # 目前只支持 SASL_SSL + # sasl-mechanism: PLAIN # 目前只支持 PLAIN + # username: aaa + # password: bbb + #- protocol: prometheus + # endpoints: [http://127.0.0.1:9091, http://1.1.1.1:9091] # 随机选择一个可以发送成功的地址, prometheus 地址格式,如 http://127.0.0.1:9091 + # data-sources: # $db_name.$table_name + # - flow_metrics.application_map.1s + # queue-count: 4 + # queue-size: 100000 + # batch-size: 1024 # 不同协议的 batch-size 不同 + # flush-timeout: 10 + # tag-filters: + # - field-name: signal_source + # operator: "!=" + # field-values: [10] # 仅当操作符是 IN 或 NOT IN 时,才支持填写长度大于 1 的列表 + # export-fields: # $field_name 等 + # - "@tag" + # - "@metrics" + # extra-headers: # type: map[string]string, extra http request headers + # key1: value1 + # key2: value2 + # tag-omitempty: false # tag的值为0或"" 是否导出 + # metrics-omitempty: false # metrics 的值为0, 是否导出 + # enum-not-convert-to-string: false # 枚举类型的id值,是否不转化为对应字符串输出 + # universaltag-not-convert-to-string: false # universal-tag 的id值,是否不转化为对应资源名称输出 + #- protocol: opentemetry + # endpoints: [http://127.0.0.1:9091, http://1.1.1.1:9091] # 随机选择一个可以发送成功的地址, otlp 地址格式,如 127.0.0.1:4317, 仅支持grpc协议 + # data-sources: # $db_name.$table_name + # - flow_log.l7_flow_log + # queue-count: 4 + # queue-size: 100000 + # batch-size: 32 # 不同协议的 batch-size 不同 + # flush-timeout: 10 + # tag-filters: + # - field-name: signal_source + # operator: "!=" # =,!=, IN, NOT IN, :, !:, ~, !~ + # field-values: [10] # 仅当操作符是 IN 或 NOT IN 时,才支持填写长度大于 1 的列表 + # export-fields: # $field_name 等 + # - "@tag" + # - "@metrics" + # tag-omitempty: false # tag的值为0或"" 是否导出 + # metrics-omitempty: false # metrics 的值为0, 是否导出 + # enum-not-convert-to-string: false # 枚举类型的id值,是否不转化为对应字符串输出 + # universaltag-not-convert-to-string: false # universal-tag 的id值,是否不转化为对应资源名称输出 + # extra-headers: # type: map[string]string, extra http request headers + # key1: value1 + # key2: value2