Skip to content

Commit

Permalink
fix regression on invalid n param and
Browse files Browse the repository at this point in the history
improved log output for GetLog(), which changed units from µs to s on internal benchmarking comment ☹
  • Loading branch information
mikebd committed Oct 10, 2023
1 parent 03a41a1 commit ed08b7d
Show file tree
Hide file tree
Showing 2 changed files with 10 additions and 5 deletions.
11 changes: 8 additions & 3 deletions controller/rest/v1/logs.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,10 @@ func GetLogs(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
// GetLog handles GET /api/v1/logs/{log} and return the contents in reverse order
func GetLog(w http.ResponseWriter, r *http.Request, p httprouter.Params) {
start := time.Now()
defer log.Println(r.URL.RequestURI(), "took", time.Since(start))
linesReturned := 0
defer func() {
log.Println(r.URL.RequestURI(), "returned", linesReturned, "lines in", time.Since(start))
}()

logFilename := p.ByName("log")

Expand All @@ -41,11 +44,13 @@ func GetLog(w http.ResponseWriter, r *http.Request, p httprouter.Params) {
regex = regexCompiled
}

maxLines, _ := util.PositiveIntParamStrict(w, r, config.GetArguments().NumberOfLogLines, "n")
if maxLines >= 0 {
maxLines, err := util.PositiveIntParamStrict(w, r, config.GetArguments().NumberOfLogLines, "n")
if err == nil {
logEvents, err := service.GetLog(config.LogDirectory, logFilename, textMatch, regex, maxLines)

if err == nil {
linesReturned = len(logEvents)

// Reverse the slice - we want the most recent events first.
// Tests to see if this is slower than just iterating backwards when rendering
// showed that it was not. This is easier to read for maintainability.
Expand Down
4 changes: 2 additions & 2 deletions service/getLog.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,10 +27,10 @@ func GetLog(directoryPath string, filename string, textMatch string, regex *rege
// Consider seek() near the end of the file, backwards iteratively, until the desired number of lines is found.
// This will be more efficient for large files, but will be more complex to implement and maintain.
// On my machine:
// - First scan of a 1GB file with 10.5 million lines takes ≈ 2-3µs returning all (1) lines matching both
// - First scan of a 1GB file with 10.5 million lines takes ≈ 2-3s returning all (1) lines matching both
// a textMatch and regex.
// - Subsequent scans of the same file for a different textMatch and regex, returning all (1) matching lines,
// takes ≈ 1-1.3µs. This is likely due to the file fitting in the filesystem page cache on my system.
// takes ≈ 1.5s. This is likely due to the file fitting in the filesystem page cache on my system.
// kern.vm_page_free_min: 3500
// kern.vm_page_free_reserved: 912
// kern.vm_page_free_target: 4000
Expand Down

0 comments on commit ed08b7d

Please sign in to comment.