Skip to content

Commit

Permalink
Bugfix: Mime types were not set correctly for svg files. (#3768)
Browse files Browse the repository at this point in the history
Some minor mime type identification for notebook uploads.

Replaced Linux.Events.Journal with native journald parser.

---------

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: snyk-bot <snyk-bot@snyk.io>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
  • Loading branch information
3 people authored Sep 21, 2024
1 parent 6314375 commit 5f69d41
Show file tree
Hide file tree
Showing 12 changed files with 565 additions and 644 deletions.
31 changes: 17 additions & 14 deletions api/download.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,6 @@
package api

import (
"bytes"
"fmt"
"html"
"io"
Expand Down Expand Up @@ -225,7 +224,7 @@ func vfsFileDownloadHandler() http.Handler {
w.Header().Set("Content-Disposition", "attachment; "+
sanitizeFilenameForAttachment(filename))
w.Header().Set("Content-Type",
detectMime(output, request.DetectMime))
utils.GetMimeString(output, utils.AutoDetectMime(request.DetectMime)))
w.Header().Set("Content-Range",
fmt.Sprintf("bytes %d-%d/%d", request.Offset, next_offset, total_size))
w.WriteHeader(200)
Expand Down Expand Up @@ -288,7 +287,8 @@ func vfsFileDownloadHandler() http.Handler {
w.Header().Set("Content-Disposition", "attachment; "+
sanitizeFilenameForAttachment(filename))
w.Header().Set("Content-Type",
detectMime(buf[:n], request.DetectMime))
utils.GetMimeString(buf[:n],
utils.AutoDetectMime(request.DetectMime)))
w.WriteHeader(200)
headers_sent = true
}
Expand Down Expand Up @@ -369,16 +369,6 @@ func filterData(reader_at io.ReaderAt,
return output, offset, nil
}

func detectMime(buffer []byte, detect_mime bool) string {
if detect_mime && len(buffer) > 8 {
if 0 == bytes.Compare(
[]byte("\x89\x50\x4E\x47\x0D\x0A\x1A\x0A"), buffer[:8]) {
return "image/png"
}
}
return "binary/octet-stream"
}

func getRows(
ctx context.Context,
config_obj *config_proto.Config,
Expand Down Expand Up @@ -507,14 +497,27 @@ func downloadFileStore(prefix []string) http.Handler {
return
}

buf := pool.Get().([]byte)
defer pool.Put(buf)

// Read the first buffer for mime detection.
n, err := fd.Read(buf)
if err != nil {
returnError(w, 404, err.Error())
return
}

// From here on we already sent the headers and we can
// not really report an error to the client.
w.Header().Set("Content-Disposition", "attachment; "+
sanitizePathspecForAttachment(path_spec))

w.Header().Set("Content-Type", "binary/octet-stream")
w.Header().Set("Content-Type",
utils.GetMimeString(buf[:n], utils.AutoDetectMime(true)))
w.WriteHeader(200)
w.Write(buf[:n])

// Copy the rest directly.
utils.Copy(r.Context(), w, fd)
})
}
Expand Down
2 changes: 0 additions & 2 deletions api/notebooks.go
Original file line number Diff line number Diff line change
Expand Up @@ -467,8 +467,6 @@ func (self *ApiServer) UploadNotebookAttachment(
if err != nil {
return nil, Status(self.verbose, err)
}

res.MimeType = detectMime([]byte(in.Data), true)
return res, nil
}

Expand Down
21 changes: 21 additions & 0 deletions artifacts/definitions/Linux/Events/Journal.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
name: Linux.Events.Journal
description: |
Watches the binary journal logs. Systemd uses a binary log format to
store logs.
type: CLIENT_EVENT

parameters:
- name: JournalGlob
type: glob
description: A Glob expression for finding journal files.
default: /{run,var}/log/journal/*/*.journal

sources:
- query: |
SELECT * FROM foreach(row={
SELECT OSPath FROM glob(globs=JournalGlob)
}, query={
SELECT *
FROM watch_journald(filename=OSPath)
}, workers=100)
172 changes: 10 additions & 162 deletions artifacts/definitions/Linux/Forensics/Journal.yaml
Original file line number Diff line number Diff line change
@@ -1,183 +1,31 @@
name: Linux.Forensics.Journal
description: |
Parses the binary journal logs. Systemd uses a binary log format to
store logs. You can read these logs using journalctl command:
`journalctl --file /run/log/journal/*/*.journal`
This artifact uses the Velociraptor Binary parser to parse the
binary format. The format is documented
https://systemd.io/JOURNAL_FILE_FORMAT/
store logs.
parameters:
- name: JournalGlob
type: glob
description: A Glob expression for finding journal files.
default: /{run,var}/log/journal/*/*.journal

- name: OnlyShowMessage
type: bool
description: If set we only show the message entry (similar to syslog).

- name: AlsoUpload
type: bool
description: If set we also upload the raw files.

export: |
LET JournalProfile = '''[
["Header", "x=>x.header_size", [
["Signature", 0, "String", {
"length": 8,
}],
["compatible_flags", 8, uint32],
["incompatible_flags", 12, Flags, {
type: uint32,
bitmap: {
COMPRESSED_XZ: 0,
COMPRESSED_LZ4: 1,
KEYED_HASH: 2,
COMPRESSED_ZSTD: 3,
COMPACT: 4,
}
}],
["IsCompact", 12, BitField, {
type: uint32,
start_bit: 4,
end_bit: 5,
}],
["header_size", 88, "uint64"],
["arena_size", 96, "uint64"],
["n_objects", 144, uint64],
["n_entries", 152, uint64],
["Objects", "x=>x.header_size", "Array", {
"type": "ObjectHeader",
"count": "x=>x.n_objects",
"max_count": 100000
}]
]],
["ObjectHeader", "x=>x.size", [
["Offset", 0, "Value", {
"value": "x=>x.StartOf",
}],
["type", 0, "Enumeration",{
"type": "uint8",
"choices": {
"0": OBJECT_UNUSED,
"1": OBJECT_DATA,
"2": OBJECT_FIELD,
"3": OBJECT_ENTRY,
"4": OBJECT_DATA_HASH_TABLE,
"5": OBJECT_FIELD_HASH_TABLE,
"6": OBJECT_ENTRY_ARRAY,
"7": OBJECT_TAG,
}
}],
["flags", 1, "uint8"],
["__real_size", 8, "uint64"],
["__round_size", 8, "Value", {
"value": "x=>int(int=x.__real_size / 8) * 8",
}],
["size", 0, "Value", {
"value": "x=>if(condition=x.__real_size = x.__round_size, then=x.__round_size, else=x.__round_size + 8)",
}],
["payload", 16, Union, {
"selector": "x=>x.type",
"choices": {
"OBJECT_DATA": DataObject,
"OBJECT_ENTRY": EntryObject,
}
}]
]],
["DataObject", 0, [
["payload", "x=>DataOffset", String]
]],
# This is basically a single log line -
# it is really a list of references to data Objects
["EntryObject", 0, [
["seqnum", 0, "uint64"],
["realtime", 8, "uint64"],
["monotonic", 16, "uint64"],
["_items", 48, Array, {
"type": EntryItem,
"count": 50,
"sentinel": "x=>NOT x.object",
}],
["_items_compact", 48, Array, {
"type": CompatEntryItem,
"count": 50,
"sentinel": "x=>NOT x.object",
}],
["items", 0, Value, {
value: "x=>if(condition=IsCompact, then=x._items_compact, else=x._items)",
}]
]],
["CompatEntryItem", 4, [
["object", 0, uint32]
]],
["EntryItem", 16, [
["object", 0, "uint64"],
]],
]
'''
-- We make a quick pass over the file to get all the OBJECT_ENTRY
-- objects which are all we care about. By extracting Just the
-- offsets of the OBJECT_ENTRY Objects in the first pass we can
-- free memory we wont need.
LET Offsets(File) = SELECT Offset
FROM foreach(row=parse_binary(filename=File, profile=JournalProfile,
struct="Header").Objects)
WHERE type = "OBJECT_ENTRY"
-- Now parse the ObjectEntry in each offset
LET _ParseFile(File) =
SELECT Offset,
parse_binary(
filename=File, profile=JournalProfile,
struct="ObjectHeader", offset=Offset) AS Parsed
FROM Offsets(File=File)
-- Extract the timestamps and all the attributes
LET ParseFile(File) = SELECT * FROM foreach(row={
-- If the file is compact the payload is shifted by 8 bytes.
SELECT parse_binary(
filename=File,
profile=JournalProfile,
struct="Header").IsCompact * 8 + 48 AS DataOffset,
parse_binary(
filename=File,
profile=JournalProfile,
struct="Header").IsCompact AS IsCompact
FROM scope()
}, query={
SELECT File, Offset,
timestamp(epoch=Parsed.payload.realtime) AS Timestamp,
{
SELECT parse_binary(
filename=File,
profile=JournalProfile,
struct="ObjectHeader",
offset=_value).payload.payload AS Line
FROM foreach(row=Parsed.payload.items.object)
WHERE Line
} AS Data
FROM _ParseFile(File=File)
sources:
- name: Uploads
query: |
SELECT * FROM if(condition=AlsoUpload,
then={
SELECT OSPath, upload(file=OSPath) AS Upload
FROM glob(globs=JournalGlob)
})
sources:
- query: |
SELECT * FROM foreach(row={
SELECT OSPath FROM glob(globs=JournalGlob)
}, query={
SELECT *, if(condition=OnlyShowMessage,
then=filter(list=Data, regex="^MESSAGE=")[0], else=Data) AS Data,
if(condition=AlsoUpload, then=upload(file=File)) AS Upload
FROM ParseFile(File=OSPath)
SELECT *
FROM parse_journald(filename=OSPath)
})
8 changes: 2 additions & 6 deletions artifacts/testdata/server/testcases/linux_systemd.in.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -21,16 +21,12 @@ Queries:
- LET _ <= remap(config=format(format=RemappingTemplate, args=srcDir),
copy=["zip", "file"], clear=TRUE)

- SELECT Offset, Timestamp, Data
- SELECT *
FROM Artifact.Linux.Forensics.Journal(
JournalGlob='/system.journal')

- SELECT Offset, Timestamp, Data
FROM Artifact.Linux.Forensics.Journal(OnlyShowMessage=TRUE,
JournalGlob='/system.journal')

# Check that we also support compact style files
- SELECT Offset, Timestamp, Data
- SELECT *
FROM Artifact.Linux.Forensics.Journal(
JournalGlob='/compact.journal')
LIMIT 10
Expand Down
Loading

0 comments on commit 5f69d41

Please sign in to comment.