diff --git a/CHANGELOG.md b/CHANGELOG.md index 8af081d3..6273e70b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,11 @@ New features: - When `--version` is given, the release version is reported on standard output. +- When `jobs[].from` refers to a URL source, and the server for that URL supports HTTP Range Requests, files are now + downloaded in segments of 500 MiB to avoid overly long connections. Furthermore, if a segmented download fails, + swift-http-import is now able to restart the download without having to download the entire file again. Segmented + downloading can be disabled and the segment size can be changed in the new `jobs[].from.segmenting` configuration + section. Changes: - When making HTTP requests, the correct User-Agent "swift-http-import/x.y.z" is now reported. diff --git a/README.md b/README.md index f81d6a5a..0f549e14 100644 --- a/README.md +++ b/README.md @@ -6,7 +6,8 @@ * [Usage](#usage) * [Source specification](#source-specification) * [File selection](#file-selection) - * [Transfer behavior: Segmenting](#transfer-behavior-segmenting) + * [Transfer behavior: Segmenting on the source side](#transfer-behavior-segmenting-on-the-source-side) + * [Transfer behavior: Segmenting on the target side](#transfer-behavior-segmenting-on-the-target-side) * [Transfer behavior: Expiring objects](#transfer-behavior-expiring-objects) * [Performance](#performance) * [Log output](#log-output) @@ -205,7 +206,39 @@ jobs: immutable: '.*\.deb$' ``` -### Transfer behavior: Segmenting +### Transfer behavior: Segmenting on the source side + +By default, `swift-http-import` will download source files in segments of at most 500 MiB, using [HTTP range +requests](https://tools.ietf.org/html/rfc7233). Range requests are supported by most HTTP servers that serve static +files, and servers without support will fallback to regular HTTP and send the whole file at once. **Note that** range +requests are currently not supported for Swift sources that require authentication. + +In the unlikely event that range requests confuse the HTTP server at the source side, they can be disabled by setting +`jobs[].from.segmenting` to `false`: + +```yaml +jobs: + - from: + url: http://de.archive.ubuntu.com/ubuntu/ + segmenting: false + to: + container: mirror + object_prefix: ubuntu-repos +``` + +The default segment size of 500 MiB can be changed by setting `jobs[].from.segment_bytes` like so: + +```yaml +jobs: + - from: + url: http://de.archive.ubuntu.com/ubuntu/ + segment_bytes: 1073741824 # 1 GiB + to: + container: mirror + object_prefix: ubuntu-repos +``` + +### Transfer behavior: Segmenting on the target side Swift rejects objects beyond a certain size (usually 5 GiB). To import larger files, [segmenting](https://docs.openstack.org/swift/latest/overview_large_objects.html) must be used. The configuration diff --git a/pkg/objects/file.go b/pkg/objects/file.go index 75489799..2de52dc9 100644 --- a/pkg/objects/file.go +++ b/pkg/objects/file.go @@ -110,9 +110,12 @@ func (f File) PerformTransfer() TransferResult { //retrieve object from source, taking advantage of Etag and Last-Modified where possible metadata := headers.ObjectMetadata() - targetState := FileState{ - Etag: metadata["source-etag"], - LastModified: metadata["source-last-modified"], + requestHeaders := make(map[string]string) + if val := metadata["source-etag"]; val != "" { + requestHeaders["If-None-Match"] = val + } + if val := metadata["source-last-modified"]; val != "" { + requestHeaders["If-Modified-Since"] = val } var ( @@ -120,10 +123,10 @@ func (f File) PerformTransfer() TransferResult { sourceState FileState ) if f.Spec.Contents == nil { - body, sourceState, err = f.Job.Source.GetFile(f.Spec.Path, targetState) + body, sourceState, err = f.Job.Source.GetFile(f.Spec.Path, requestHeaders) } else { util.Log(util.LogDebug, "using cached contents for %s", f.Spec.Path) - body, sourceState, err = f.Spec.toTransferFormat(targetState) + body, sourceState, err = f.Spec.toTransferFormat(requestHeaders) } if err != nil { util.Log(util.LogError, err.Error()) @@ -170,7 +173,12 @@ func (f File) PerformTransfer() TransferResult { return TransferFailed } -func (s FileSpec) toTransferFormat(targetState FileState) (io.ReadCloser, FileState, error) { +func (s FileSpec) toTransferFormat(requestHeaders map[string]string) (io.ReadCloser, FileState, error) { + targetState := FileState{ + Etag: requestHeaders["If-None-Match"], + LastModified: requestHeaders["If-Modified-Since"], + } + sourceState := FileState{ Etag: s.Headers.Get("Etag"), LastModified: s.Headers.Get("Last-Modified"), diff --git a/pkg/objects/source.go b/pkg/objects/source.go index 721b4ec6..9b555e4f 100644 --- a/pkg/objects/source.go +++ b/pkg/objects/source.go @@ -30,7 +30,6 @@ import ( "path" "path/filepath" "regexp" - "strconv" "strings" "time" @@ -55,8 +54,9 @@ type Source interface { //none for files. ListEntries(directoryPath string) ([]FileSpec, *ListEntriesError) //GetFile retrieves the contents and metadata for the file at the given path - //in the source. - GetFile(directoryPath string, targetState FileState) (body io.ReadCloser, sourceState FileState, err error) + //in the source. The `headers` map contains additional HTTP request headers + //that shall be passed to the source in the GET request. + GetFile(directoryPath string, headers map[string]string) (body io.ReadCloser, sourceState FileState, err error) } //ListEntriesError is an error that occurs while scraping a directory. @@ -95,6 +95,10 @@ type URLSource struct { ClientCertificateKeyPath string `yaml:"key"` ServerCAPath string `yaml:"ca"` HTTPClient *http.Client `yaml:"-"` + //transfer options + SegmentingIn *bool `yaml:"segmenting"` + Segmenting bool `yaml:"-"` + SegmentSize uint64 `yaml:"segment_bytes"` //NOTE: All attributes that can be deserialized from YAML also need to be in //the YumSource with the same YAML field names. } @@ -135,6 +139,15 @@ func (u *URLSource) Validate(name string) (result []error) { } } + if u.SegmentingIn == nil { + u.Segmenting = true + } else { + u.Segmenting = *u.SegmentingIn + } + if u.SegmentSize == 0 { + u.SegmentSize = 512 << 20 //default: 512 MiB + } + return } @@ -289,28 +302,31 @@ func (u URLSource) ListEntries(directoryPath string) ([]FileSpec, *ListEntriesEr } //GetFile implements the Source interface. -func (u URLSource) GetFile(directoryPath string, targetState FileState) (io.ReadCloser, FileState, error) { +func (u URLSource) GetFile(directoryPath string, requestHeaders map[string]string) (io.ReadCloser, FileState, error) { uri := u.getURLForPath(directoryPath).String() - - //prepare request to retrieve from source, taking advantage of Etag and - //Last-Modified where possible - req, err := http.NewRequest("GET", uri, nil) - if err != nil { - return nil, FileState{}, fmt.Errorf("skipping %s: GET failed: %s", uri, err.Error()) - } - if targetState.Etag != "" { - req.Header.Set("If-None-Match", targetState.Etag) - } - if targetState.LastModified != "" { - req.Header.Set("If-Modified-Since", targetState.LastModified) - } - req.Header.Set("User-Agent", "swift-http-import/"+util.Version) + requestHeaders["User-Agent"] = "swift-http-import/" + util.Version //retrieve file from source - response, err := u.HTTPClient.Do(req) + var ( + response *http.Response + err error + ) + if u.Segmenting { + response, err = util.EnhancedGet(u.HTTPClient, uri, requestHeaders, u.SegmentSize) + } else { + var req *http.Request + req, err := http.NewRequest("GET", uri, nil) + if err == nil { + for key, val := range requestHeaders { + req.Header.Set(key, val) + } + response, err = u.HTTPClient.Do(req) + } + } if err != nil { return nil, FileState{}, fmt.Errorf("skipping %s: GET failed: %s", uri, err.Error()) } + if response.StatusCode != 200 && response.StatusCode != 304 { return nil, FileState{}, fmt.Errorf( "skipping %s: GET returned unexpected status code: expected 200 or 304, but got %d", @@ -318,20 +334,10 @@ func (u URLSource) GetFile(directoryPath string, targetState FileState) (io.Read ) } - //read Content-Length header (or report -1, i.e. unknown size, if header missing or corrupted) - var sizeBytes int64 = -1 - if sizeBytesStr := response.Header.Get("Content-Length"); sizeBytesStr != "" { - sizeBytes, err = strconv.ParseInt(sizeBytesStr, 10, 64) - if err != nil { - util.Log(util.LogError, "invalid header \"Content-Length: %s\" in GET %s", sizeBytesStr, uri) - sizeBytes = -1 - } - } - return response.Body, FileState{ Etag: response.Header.Get("Etag"), LastModified: response.Header.Get("Last-Modified"), - SizeBytes: sizeBytes, + SizeBytes: response.ContentLength, ExpiryTime: nil, //no way to get this information via HTTP only SkipTransfer: response.StatusCode == 304, ContentType: response.Header.Get("Content-Type"), diff --git a/pkg/objects/swift.go b/pkg/objects/swift.go index 73553e0e..bdb5b7ca 100644 --- a/pkg/objects/swift.go +++ b/pkg/objects/swift.go @@ -185,18 +185,12 @@ func (s *SwiftLocation) ListEntries(path string) ([]FileSpec, *ListEntriesError) } //GetFile implements the Source interface. -func (s *SwiftLocation) GetFile(path string, targetState FileState) (io.ReadCloser, FileState, error) { +func (s *SwiftLocation) GetFile(path string, requestHeaders map[string]string) (io.ReadCloser, FileState, error) { objectPath := filepath.Join(s.ObjectNamePrefix, path) - reqHeaders := make(swift.Headers) - if targetState.Etag != "" { - reqHeaders["If-None-Match"] = targetState.Etag - } - if targetState.LastModified != "" { - reqHeaders["If-Modified-Since"] = targetState.LastModified - } - - body, respHeaders, err := s.Connection.ObjectOpen(s.ContainerName, objectPath, false, reqHeaders) + body, respHeaders, err := s.Connection.ObjectOpen( + s.ContainerName, objectPath, false, swift.Headers(requestHeaders), + ) switch err { case nil: diff --git a/pkg/objects/yum.go b/pkg/objects/yum.go index b6ea65d6..5c533efb 100644 --- a/pkg/objects/yum.go +++ b/pkg/objects/yum.go @@ -68,8 +68,8 @@ func (s *YumSource) ListEntries(directoryPath string) ([]FileSpec, *ListEntriesE } //GetFile implements the Source interface. -func (s *YumSource) GetFile(directoryPath string, targetState FileState) (body io.ReadCloser, sourceState FileState, err error) { - return s.urlSource.GetFile(directoryPath, targetState) +func (s *YumSource) GetFile(directoryPath string, requestHeaders map[string]string) (body io.ReadCloser, sourceState FileState, err error) { + return s.urlSource.GetFile(directoryPath, requestHeaders) } //ListAllFiles implements the Source interface. diff --git a/pkg/util/http.go b/pkg/util/http.go new file mode 100644 index 00000000..dc8084f0 --- /dev/null +++ b/pkg/util/http.go @@ -0,0 +1,310 @@ +/******************************************************************************* +* +* Copyright 2018 SAP SE +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You should have received a copy of the License along with this +* program. If not, you may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +* +*******************************************************************************/ + +package util + +import ( + "errors" + "fmt" + "io" + "net/http" + "regexp" + "strconv" +) + +const ( + maxTotalRetryCount = 10 + maxRetryCount = 3 +) + +//EnhancedGet is like http.Client.Get(), but recognizes if the HTTP server +//understands range requests, and downloads the file in segments in that case. +func EnhancedGet(client *http.Client, uri string, requestHeaders map[string]string, segmentBytes uint64) (*http.Response, error) { + d := downloader{ + Client: client, + URI: uri, + RequestHeaders: requestHeaders, + SegmentBytes: int64(segmentBytes), + BytesTotal: -1, + } + + //make initial HTTP request that detects whether the source server supports + //range requests + resp, headers, err := d.getNextChunk() + //if we receive a 416 (Requested Range Not Satisfiable), the most likely cause + //is that the object is 0 bytes long, so even byte index 0 is already over + //EOF -> fall back to a plain HTTP request in this case + if resp != nil && resp.StatusCode == http.StatusRequestedRangeNotSatisfiable { + Log(LogInfo, "received status code 416 -> falling back to plain GET for %s") + resp.Body.Close() + req, err := d.buildRequest() + if err != nil { + return nil, err + } + return d.Client.Do(req) + } + + //we're done here unless we receive a 206 (Partial Content) response; possible causes: + // * resp.StatusCode == 200 (server does not understand Range header) + // * resp.StatusCode == 304 (target is already up-to-date) + // * resp.StatusCode > 400 (other error) + if err != nil || resp.StatusCode != http.StatusPartialContent { + return resp, err + } + + //actual response is 206, but we simulate the behavior of a 200 response + resp.Status = "200 OK" + resp.StatusCode = 200 + + //if the current chunk contains the entire file, we do not need to hijack the body + if d.BytesTotal > 0 && headers.ContentRangeLength == d.BytesTotal { + return resp, err + } + + //return the original response, but: + //1. hijack the response body so that the next segments will be loaded once + //the current response body is exhausted (the `struct downloader` is wrapped + //into a util.FullReader because ncw/swift seems to get confused when Read() + //does not fill the provided buffer completely) + d.Reader = resp.Body + resp.Body = &FullReader{&d} + //2. report the total file size in the response, so that the caller can + //decide whether to PUT this directly or as a large object + if d.BytesTotal > 0 { + resp.ContentLength = d.BytesTotal + } + return resp, err +} + +//downloader is an io.ReadCloser that downloads a file from a Source in +//segments by using the HTTP request parameter "Range" [RFC 7233]. +// +//A downloader is usually constructed by DownloadFile() which probes the server +//for Range support and otherwise falls back on downloading the entire file at +//once. +type downloader struct { + //the original arguments to EnhancedGet() + Client *http.Client + URI string + RequestHeaders map[string]string + SegmentBytes int64 + //this object's internal state + Etag string //we track the source URL's Etag to detect changes mid-transfer + BytesRead int64 //how many bytes have already been read out of this Reader + BytesTotal int64 //the total Content-Length the file, or -1 if not known + Reader io.ReadCloser //current, not-yet-exhausted, response.Body, or nil after EOF or error + Err error //non-nil after error + //retry handling + LastErrorAtBytesRead int64 //at which offset the last read error occurred + LastErrorRetryCount int //retry counter for said offset, or 0 if no error occurred before + TotalRetryCount int //retry counter for all read errors encountered at any offset +} + +type parsedResponseHeaders struct { + ContentRangeStart int64 + ContentRangeLength int64 +} + +func (d *downloader) buildRequest() (*http.Request, error) { + req, err := http.NewRequest("GET", d.URI, nil) + if err != nil { + return nil, err + } + for key, val := range d.RequestHeaders { + req.Header.Set(key, val) + } + return req, nil +} + +func (d *downloader) getNextChunk() (*http.Response, *parsedResponseHeaders, error) { + req, err := d.buildRequest() + if err != nil { + return nil, nil, err + } + + //add Range header + start := d.BytesRead + end := start + d.SegmentBytes + if d.BytesTotal > 0 && end > d.BytesTotal { + end = d.BytesTotal + } + req.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", start, end-1)) //end index is inclusive! + + //execute request + resp, err := d.Client.Do(req) + if err != nil { + return resp, nil, err + } + + //check Etag + etag := resp.Header.Get("Etag") + if d.Etag == "" { + d.Etag = etag + } else if d.Etag != etag { + resp.Body.Close() + return nil, nil, fmt.Errorf("Etag has changed mid-transfer: %q -> %q", d.Etag, etag) + } + + //parse Content-Range header + var headers parsedResponseHeaders + if resp.StatusCode == 206 { + contentRange := resp.Header.Get("Content-Range") + start, stop, total, ok := parseContentRange(contentRange) + if !ok { + resp.Body.Close() + return nil, nil, errors.New("malformed response header: Content-Range: " + contentRange) + } + headers.ContentRangeStart = start + headers.ContentRangeLength = stop - start + 1 //stop index is inclusive! + if d.BytesTotal == -1 { + d.BytesTotal = total + } else if d.BytesTotal != total { + resp.Body.Close() + return nil, nil, fmt.Errorf("Content-Length has changed mid-transfer: %d -> %d", d.BytesTotal, total) + } + } + + return resp, &headers, nil +} + +//Matches values of Content-Range response header [RFC7233, section 4.2] for +//successful range responses (i.e. HTTP status code 206, not 416). +var contentRangeRx = regexp.MustCompile(`^bytes ([0-9]+)-([0-9]+)/(\*|[0-9]+)$`) + +func parseContentRange(headerValue string) (start, stop, total int64, ok bool) { + match := contentRangeRx.FindStringSubmatch(headerValue) + if match == nil { + return 0, 0, 0, false + } + + var err error + start, err = strconv.ParseInt(match[1], 10, 64) + if err != nil { + return 0, 0, 0, false + } + stop, err = strconv.ParseInt(match[2], 10, 64) + if err != nil || stop < start { + return 0, 0, 0, false + } + total = -1 + if match[3] != "*" { + total, err = strconv.ParseInt(match[3], 10, 64) + if err != nil { + return 0, 0, 0, false + } + } + return start, stop, total, true +} + +//Read implements the io.ReadCloser interface. +func (d *downloader) Read(buf []byte) (int, error) { + //if we don't have a response body, we're at EOF + if d.Reader == nil { + return 0, io.EOF + } + + //read from the current response body + bytesRead, err := d.Reader.Read(buf) + d.BytesRead += int64(bytesRead) + switch err { + case nil: + return bytesRead, err + case io.EOF: + //current response body is EOF -> close it + err = d.Reader.Close() + d.Reader = nil + if err != nil { + return bytesRead, err + } + d.Reader = nil + default: + //unexpected read error + if !d.shouldRetry() { + return bytesRead, err + } + Log(LogError, "restarting GET %s after read error at offset %d: %s", + d.URI, d.BytesRead, err.Error(), + ) + err := d.Reader.Close() + if err != nil { + Log(LogError, + "encountered additional error when trying to close the existing reader: %s", + err.Error(), + ) + } + } + + //is there a next chunk? + if d.BytesRead == d.BytesTotal { + return bytesRead, io.EOF + } + + //get next chunk + resp, headers, err := d.getNextChunk() + if err != nil { + return bytesRead, err + } + if headers.ContentRangeStart != d.BytesRead { + resp.Body.Close() + return bytesRead, fmt.Errorf( + "expected next segment to start at offset %d, but starts at %d", + d.BytesRead, headers.ContentRangeStart, + ) + } + d.Reader = resp.Body + return bytesRead, nil +} + +//Close implements the io.ReadCloser interface. +func (d *downloader) Close() error { + if d.Reader != nil { + return d.Reader.Close() + } + return nil +} + +//This function is called when a read error is encountered, and decides whether +//to retry or not. +func (d *downloader) shouldRetry() bool { + //never restart transfer of the same file more than 10 times + d.TotalRetryCount++ + if d.TotalRetryCount > maxTotalRetryCount { + Log(LogInfo, "giving up on GET %s after %d read errors", d.URI, maxTotalRetryCount) + return false + } + + //if there was no error at this offset before, always retry + if d.LastErrorAtBytesRead != d.BytesRead { + d.LastErrorAtBytesRead = d.BytesRead + d.LastErrorRetryCount = 0 + return true + } + + //only retry an error at the same offset for 3 times + d.LastErrorRetryCount++ + if d.LastErrorRetryCount > maxRetryCount { + Log(LogInfo, "giving up on GET %s after %d read errors at the same offset (%d)", + d.URI, + maxRetryCount, + d.LastErrorAtBytesRead, + ) + return false + } + return true +} diff --git a/pkg/util/io.go b/pkg/util/io.go new file mode 100644 index 00000000..7006d26e --- /dev/null +++ b/pkg/util/io.go @@ -0,0 +1,46 @@ +/******************************************************************************* +* +* Copyright 2018 SAP SE +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You should have received a copy of the License along with this +* program. If not, you may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +* +*******************************************************************************/ + +package util + +import "io" + +//FullReader is an io.ReadCloser whose Read() implementation always fills the read +//buffer as much as possible by calling Base.Read() repeatedly. +type FullReader struct { + Base io.ReadCloser +} + +//Read implements the io.Reader interface. +func (r *FullReader) Read(buf []byte) (int, error) { + numRead := 0 + for numRead < len(buf) { + n, err := r.Base.Read(buf[numRead:]) + numRead += n + if err != nil { //including io.EOF + return numRead, err + } + } + return numRead, nil +} + +//Close implements the io.Reader interface. +func (r *FullReader) Close() error { + return r.Base.Close() +} diff --git a/tests.sh b/tests.sh index 00972eef..d9eb0291 100755 --- a/tests.sh +++ b/tests.sh @@ -82,7 +82,8 @@ if [ "$1" = swift ]; then SOURCE_SPEC="{ container: \"${CONTAINER_PUBLIC}\", object_prefix: \"${DISAMBIGUATOR}\", ${AUTH_PARAMS} }" else # get public HTTP URL for container - SOURCE_SPEC="{ url: \"$(swift stat -v "${CONTAINER_PUBLIC}" | awk '$1=="URL:"{print$2}')/${DISAMBIGUATOR}/\" }" + SOURCE_URL="$(swift stat -v "${CONTAINER_PUBLIC}" | awk '$1=="URL:"{print$2}')/${DISAMBIGUATOR}/" + SOURCE_SPEC="{ url: \"${SOURCE_URL}\" }" fi ################################################################################ @@ -266,7 +267,7 @@ Hello Another World. EOF ################################################################################ -step 'Test 6: Segmenting of large files' +step 'Test 6: Segmented upload of large files' upload_file_from_stdin largefile.txt <<-EOF Line number 1 @@ -316,14 +317,14 @@ fi ################################################################################ step 'Test 7: Object expiration' -if [ "$1" = http ]; then - echo ">> Test skipped (works only with Swift source)." -else - upload_file_from_stdin expires.txt -H 'X-Delete-At: 2000000000' <<-EOF This will expire soon. EOF +if [ "$1" = http ]; then + echo ">> Test skipped (works only with Swift source)." +else + mirror <<-EOF swift: { $AUTH_PARAMS } jobs: @@ -347,6 +348,60 @@ fi fi # end of: if [ "$1" = http ] +################################################################################ +step 'Test 8: Chunked download' + +# This test specifically checks that segmented upload works correctly when a file is +# downloaded segmentedly. There was a bug where EnhancedGet() reported the +# Content-Length of the first segment only (instead of the whole file), causing +# the segmenting logic to incorrectly determine when to upload as a large object. + +if [ "$1" = swift ]; then + echo ">> Test skipped (works only with HTTP source)." +else + +mirror <<-EOF + swift: { $AUTH_PARAMS } + jobs: + - from: + url: ${SOURCE_URL} + segment_bytes: 20 # less than job.segmenting.min_bytes, but also more + # than the smallest files (to exercise all code paths) + to: { container: ${CONTAINER_BASE}-test8 } + segmenting: + container: ${CONTAINER_BASE}-test8-segments + min_bytes: 30 + segment_bytes: 14 +EOF +# NOTE: A segment size of 14 bytes should put each line of text in its own +# segment, i.e. 5 segments. + +expect test8 <<-EOF +>> expires.txt +This will expire soon. +>> just/another/file.txt +This is the new file! +>> just/some/files/1.txt +Hello World. +>> just/some/files/2.txt +Hello Second World. +>> largefile.txt +Line number 1 +Line number 2 +Line number 3 +Line number 4 +Line number 5 +EOF + +SEGMENT_COUNT="$(swift list ${CONTAINER_BASE}-test8-segments | wc -l)" +if [ "${SEGMENT_COUNT}" -ne 5 ]; then + echo -e "\e[1;31m>>\e[0;31m Expected SLO to have 5 segments, but got ${SEGMENT_COUNT} instead:\e[0m" + dump test8-segments + exit 1 +fi + +fi # end of: if [ "$1" = swift ] + ################################################################################ # cleanup before exiting