From fc27fb94b477bca910ef032027d726c8a015d078 Mon Sep 17 00:00:00 2001 From: idelcano Date: Mon, 9 Mar 2026 08:28:29 +0100 Subject: [PATCH 01/16] added bom generation scripts --- scripts/normalize_sarif.py | 709 ++++++++++++++++++ ...port_open_code_scanning_alert_instances.sh | 250 ++++++ scripts/upload_bom_and_fetch_metrics.sh | 171 +++++ 3 files changed, 1130 insertions(+) create mode 100755 scripts/normalize_sarif.py create mode 100644 scripts/report_open_code_scanning_alert_instances.sh create mode 100644 scripts/upload_bom_and_fetch_metrics.sh diff --git a/scripts/normalize_sarif.py b/scripts/normalize_sarif.py new file mode 100755 index 0000000..d184911 --- /dev/null +++ b/scripts/normalize_sarif.py @@ -0,0 +1,709 @@ +#!/usr/bin/env python3 +from __future__ import annotations + +import argparse +import json +import re +import sys +from typing import Any, Dict, List, Optional, Tuple + +DEFAULT_TOOL_PREFIX = "OWASP Dependency-Track" +DEFAULT_FALLBACK_URI = "package.json" +DEFAULT_FALLBACK_LINE = 2 +# GitHub truncates long fullDescription content in the UI, so we keep both: +# - result header summary +# - rule.fullDescription +# to the same short preview length. +DEFAULT_SUMMARY_MAX_LEN = 180 +URI_BASE_ID = "%SRCROOT%" + + +def has_text(value: Any) -> bool: + return value is not None and str(value) != "" + + +def load_json(path: str) -> Dict[str, Any]: + with open(path, "r", encoding="utf-8") as file_obj: + data = json.load(file_obj) + if not isinstance(data, dict): + raise ValueError(f"Top-level JSON object expected: {path}") + return data + + +def save_json(path: str, payload: Dict[str, Any]) -> None: + with open(path, "w", encoding="utf-8") as file_obj: + json.dump(payload, file_obj, ensure_ascii=False, indent=2) + file_obj.write("\n") + + +def to_number(value: Any) -> Optional[float]: + if value is None: + return None + if isinstance(value, (int, float)): + return float(value) + if isinstance(value, str) and re.match(r"^[0-9]+(\.[0-9]+)?$", value.strip()): + return float(value.strip()) + return None + + +def score_from_result(result: Dict[str, Any]) -> Optional[float]: + properties = result.get("properties") + if not isinstance(properties, dict): + properties = {} + + cvss_value = to_number(properties.get("cvssV3BaseScore")) + if cvss_value is not None: + return cvss_value + + rank = to_number(properties.get("severityRank")) + if rank is None: + return None + + mapped = {0.0: 9.0, 1.0: 8.0, 2.0: 5.0, 3.0: 2.0} + return mapped.get(rank) + + +def map_level(score: Optional[float], current: Any) -> str: + if score is None: + return str(current) if has_text(current) else "warning" + if score >= 7.0: + return "error" + if score >= 4.0: + return "warning" + return "note" + + +def severity_from_result(result: Dict[str, Any], score: Optional[float]) -> str: + properties = result.get("properties") + if not isinstance(properties, dict): + properties = {} + + explicit = properties.get("severity") + if has_text(explicit): + return str(explicit).strip().lower() + + rank = to_number(properties.get("severityRank")) + if rank is not None: + mapped = {0.0: "critical", 1.0: "high", 2.0: "medium", 3.0: "low"} + if rank in mapped: + return mapped[rank] + + if score is None: + level = str(result.get("level") or "").strip().lower() + if level == "error": + return "high" + if level == "warning": + return "medium" + if level == "note": + return "low" + return "unknown" + + if score >= 9.0: + return "critical" + if score >= 7.0: + return "high" + if score >= 4.0: + return "medium" + if score > 0.0: + return "low" + return "unknown" + + +def normalize_path(path_value: Any) -> Optional[str]: + if not has_text(path_value): + return None + normalized = str(path_value).replace("\\", "/").lstrip("/") + return normalized or None + + +def extract_component_paths(component: Dict[str, Any]) -> List[str]: + paths: List[str] = [] + + properties = component.get("properties") + if isinstance(properties, list): + for prop in properties: + if not isinstance(prop, dict): + continue + name = prop.get("name") + if not isinstance(name, str): + continue + if not (name.startswith("syft:location:") and name.endswith(":path")): + continue + normalized = normalize_path(prop.get("value")) + if normalized and normalized not in paths: + paths.append(normalized) + + evidence = component.get("evidence") + if isinstance(evidence, dict): + occurrences = evidence.get("occurrences") + if isinstance(occurrences, list): + for occ in occurrences: + if not isinstance(occ, dict): + continue + normalized = normalize_path(occ.get("location")) + if normalized and normalized not in paths: + paths.append(normalized) + + return paths + + +def build_purl_to_path_map(vdr_doc: Dict[str, Any]) -> Dict[str, str]: + mapping: Dict[str, str] = {} + components = vdr_doc.get("components") + if not isinstance(components, list): + return mapping + + for component in components: + if not isinstance(component, dict): + continue + paths = extract_component_paths(component) + if not paths: + continue + first_path = paths[0] + for key_name in ("purl", "bom-ref"): + key_value = component.get(key_name) + if has_text(key_value): + mapping[str(key_value)] = first_path + return mapping + + +def build_vulnerability_url_map(doc: Dict[str, Any]) -> Dict[str, str]: + mapping: Dict[str, str] = {} + vulnerabilities = doc.get("vulnerabilities") + if not isinstance(vulnerabilities, list): + return mapping + + for vuln in vulnerabilities: + if not isinstance(vuln, dict): + continue + vuln_id = vuln.get("id") + if not has_text(vuln_id): + continue + + selected_url: Optional[str] = None + source = vuln.get("source") + if isinstance(source, dict) and has_text(source.get("url")): + selected_url = str(source.get("url")) + + if not selected_url: + references = vuln.get("references") + if isinstance(references, list): + for ref in references: + if isinstance(ref, dict) and has_text(ref.get("url")): + selected_url = str(ref.get("url")) + break + + if selected_url: + mapping[str(vuln_id)] = selected_url + + return mapping + + +def merge_url_maps(primary: Dict[str, str], secondary: Dict[str, str]) -> Dict[str, str]: + merged = dict(primary) + for key, value in secondary.items(): + if not has_text(merged.get(key)): + merged[key] = value + return merged + + +def advisory_url(rule_id: str, url_map: Dict[str, str], existing: Optional[str]) -> Optional[str]: + if has_text(existing): + return str(existing) + mapped = url_map.get(rule_id) + if has_text(mapped): + return str(mapped) + return None + + +def clean_text(text: str) -> str: + value = str(text or "") + value = value.replace("\r", "") + # Convert markdown links to plain text. + value = re.sub(r"\[([^\]]+)\]\(([^)]+)\)", r"\1 (\2)", value) + ignored_lines = {"Summary", "Details", "Impact", "PoC"} + + lines: List[str] = [] + for raw_line in value.split("\n"): + line = re.sub(r"^#+\s*", "", raw_line) + line = line.replace("`", "").strip() + if line: + if line in ignored_lines: + continue + lines.append(line) + return "\n".join(lines) + + +def clean_summary(text: str) -> str: + ignored_lines = {"Summary", "Details", "Impact", "PoC"} + for line in clean_text(text).split("\n"): + line = line.strip() + if not line: + continue + if line in ignored_lines: + continue + return line + return "" + + +def truncate_summary(text: str, max_len: int) -> str: + if len(text) <= max_len: + return text + return text[:max_len] + "..." + + +def get_first_logical_fqn(result: Dict[str, Any]) -> str: + locations = result.get("locations") + if not isinstance(locations, list) or not locations: + return "" + first = locations[0] + if not isinstance(first, dict): + return "" + logical_locations = first.get("logicalLocations") + if not isinstance(logical_locations, list) or not logical_locations: + return "" + logical = logical_locations[0] + if not isinstance(logical, dict): + return "" + value = logical.get("fullyQualifiedName") + return str(value) if has_text(value) else "" + + +def ensure_locations( + result: Dict[str, Any], + mapped_path: Optional[str], + logical_key: str, + package_name: str, + fallback_uri: str, + fallback_line: int, +) -> None: + uri = mapped_path if has_text(mapped_path) else fallback_uri + start_line = 1 if has_text(mapped_path) else fallback_line + + locations = result.get("locations") + if not isinstance(locations, list) or not locations: + result["locations"] = [ + { + "logicalLocations": [ + {"fullyQualifiedName": logical_key or package_name or "dependency-track"} + ], + "physicalLocation": { + "artifactLocation": {"uri": uri, "uriBaseId": URI_BASE_ID}, + "region": {"startLine": start_line}, + }, + } + ] + return + + for index, loc in enumerate(locations): + if not isinstance(loc, dict): + loc = {} + locations[index] = loc + + physical = loc.get("physicalLocation") + if not isinstance(physical, dict): + physical = {} + loc["physicalLocation"] = physical + + artifact = physical.get("artifactLocation") + if not isinstance(artifact, dict): + artifact = {} + physical["artifactLocation"] = artifact + artifact["uri"] = uri + artifact["uriBaseId"] = URI_BASE_ID + + region = physical.get("region") + if not isinstance(region, dict): + region = {} + physical["region"] = region + + if has_text(mapped_path): + existing_line = region.get("startLine") + if isinstance(existing_line, int) and existing_line > 0: + region["startLine"] = existing_line + else: + region["startLine"] = 1 + else: + region["startLine"] = fallback_line + + +def set_rule_text(rule: Dict[str, Any], key: str, value: str) -> None: + node = rule.get(key) + if not isinstance(node, dict): + node = {} + rule[key] = node + node["text"] = value + + +def format_security_severity(value: float) -> str: + rounded = round(value * 10) / 10 + return str(rounded) + + +def build_long_message( + source: str, + original_rule_id: str, + package_name: str, + package_version: str, + severity: str, + component_path: str, + summary_line: str, + detail_text: str, + help_url: str, +) -> str: + affected_component = ( + f"{package_name}@{package_version}" if has_text(package_version) else package_name + ) + header = f"[{source}] {original_rule_id} in {affected_component}" + if has_text(summary_line): + header += f": {summary_line}" + + lines = [ + header, + f"severity: {severity}", + f"affected_components: {affected_component}", + f"affected_component_paths: {component_path}", + f"affected_names: {package_name}", + f"affected_versions: {package_version}", + f"id: {original_rule_id}", + "description:", + detail_text if has_text(detail_text) else "", + ] + if has_text(help_url): + lines.append(f"reference_url: {help_url}") + return "\n".join(lines) + + +def normalize_sarif( + sarif: Dict[str, Any], + vdr_doc: Dict[str, Any], + vex_doc: Dict[str, Any], + source: str, + tool_name: str, + rule_id_namespace: str, + location_mode: str, + fallback_uri: str, + fallback_line: int, + summary_max_len: int, +) -> Tuple[int, int]: + runs = sarif.get("runs") + if not isinstance(runs, list): + raise ValueError("Invalid SARIF: 'runs' must be a list") + + purl_path = build_purl_to_path_map(vdr_doc) + vdr_urls = build_vulnerability_url_map(vdr_doc) + vex_urls = build_vulnerability_url_map(vex_doc) + url_by_rule = merge_url_maps(vdr_urls, vex_urls) + + updated_rules = 0 + updated_results = 0 + + for run in runs: + if not isinstance(run, dict): + continue + + tool = run.setdefault("tool", {}) + if not isinstance(tool, dict): + tool = {} + run["tool"] = tool + driver = tool.setdefault("driver", {}) + if not isinstance(driver, dict): + driver = {} + tool["driver"] = driver + + driver["name"] = tool_name + rules = driver.get("rules") + if not isinstance(rules, list): + rules = [] + driver["rules"] = rules + + for rule in rules: + if not isinstance(rule, dict): + continue + original_rule_id = str(rule.get("id") or "Dependency-Track finding") + namespaced_rule_id = f"{rule_id_namespace}{original_rule_id}" + help_url = advisory_url(original_rule_id, url_by_rule, rule.get("helpUri")) + + full_desc = rule.get("fullDescription") + full_desc_text = ( + full_desc.get("text") + if isinstance(full_desc, dict) and has_text(full_desc.get("text")) + else None + ) + base_short = None + short_desc = rule.get("shortDescription") + if isinstance(short_desc, dict): + base_short = short_desc.get("text") + if not has_text(base_short): + base_short = rule.get("id") or "Dependency-Track finding" + rule_detail = clean_text( + str(full_desc_text or base_short or "Dependency-Track finding") + ) + + rule["id"] = namespaced_rule_id + set_rule_text(rule, "shortDescription", f"[{source}] {str(base_short)}") + full_text = f"[{source}] {original_rule_id}" + if has_text(rule_detail): + rule_summary = clean_summary(rule_detail) + if not has_text(rule_summary): + rule_summary = rule_detail + full_text += f"\n\n{truncate_summary(rule_summary, summary_max_len)}" + if help_url: + full_text += f"\n\nReference: {help_url}" + set_rule_text(rule, "fullDescription", full_text) + if help_url: + rule["helpUri"] = help_url + updated_rules += 1 + + results = run.get("results") + if not isinstance(results, list): + continue + + for result in results: + if not isinstance(result, dict): + continue + + properties = result.get("properties") + if not isinstance(properties, dict): + properties = {} + + original_rule_id = str(result.get("ruleId") or "Dependency-Track finding") + namespaced_rule_id = f"{rule_id_namespace}{original_rule_id}" + help_url = advisory_url(original_rule_id, url_by_rule, None) + package_name = str(properties.get("name") or "unknown-package") + package_version = str(properties.get("version") or "") + + message = result.get("message") + if not isinstance(message, dict): + message = {} + result["message"] = message + raw_detail = str(message.get("text") or "") + detail_text = clean_text(raw_detail) + summary_line = truncate_summary(clean_summary(raw_detail), summary_max_len) + + score = score_from_result(result) + severity = severity_from_result(result, score) + logical_key = get_first_logical_fqn(result) + component_path = purl_path.get(logical_key) if logical_key else None + mapped_path = component_path + if location_mode == "fallback": + mapped_path = None + + properties = dict(properties) + properties["scan_source"] = source + properties["original_rule_id"] = original_rule_id + if help_url: + properties["advisory_url"] = help_url + if raw_detail: + properties["advisory_detail"] = raw_detail + result["properties"] = properties + result["ruleId"] = namespaced_rule_id + + message["text"] = build_long_message( + source=source, + original_rule_id=original_rule_id, + package_name=package_name, + package_version=package_version, + severity=severity, + component_path=str(component_path or ""), + summary_line=summary_line, + detail_text=detail_text, + help_url=str(help_url or ""), + ) + + partial = result.get("partialFingerprints") + if not isinstance(partial, dict): + partial = {} + + finding_key = "|".join( + [ + source, + str(result.get("ruleId") or ""), + str(properties.get("name") or ""), + str(properties.get("group") or ""), + str(properties.get("version") or ""), + logical_key, + ] + ) + partial["dtrack_finding_key"] = finding_key + result["partialFingerprints"] = partial + + result["level"] = map_level(score, result.get("level")) + + ensure_locations( + result=result, + mapped_path=mapped_path, + logical_key=logical_key, + package_name=package_name, + fallback_uri=fallback_uri, + fallback_line=fallback_line, + ) + updated_results += 1 + + score_by_rule: Dict[str, float] = {} + for result in results: + if not isinstance(result, dict): + continue + rule_id = str(result.get("ruleId") or "") + if not rule_id: + continue + score = score_from_result(result) + if score is None: + continue + if rule_id not in score_by_rule or score > score_by_rule[rule_id]: + score_by_rule[rule_id] = score + + for rule in rules: + if not isinstance(rule, dict): + continue + rule_id = str(rule.get("id") or "") + if not rule_id or rule_id not in score_by_rule: + continue + rule_properties = rule.get("properties") + if not isinstance(rule_properties, dict): + rule_properties = {} + rule_properties["security-severity"] = format_security_severity( + score_by_rule[rule_id] + ) + if not has_text(rule_properties.get("precision")): + rule_properties["precision"] = "high" + tags = rule_properties.get("tags") + if not isinstance(tags, list): + tags = [] + if "security" not in tags: + tags.append("security") + rule_properties["tags"] = tags + rule["properties"] = rule_properties + + return updated_rules, updated_results + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser( + description="Normalize Dependency-Track SARIF for GitHub Code Scanning." + ) + parser.add_argument("--input-sarif", required=True, help="Input SARIF file path.") + parser.add_argument( + "--output-sarif", required=True, help="Output SARIF file path." + ) + parser.add_argument( + "--vdr", + default="", + help="CycloneDX VDR/BOM JSON file path. Optional but recommended for path mapping.", + ) + parser.add_argument( + "--vex", + default="", + help="CycloneDX VEX JSON file path. Optional but recommended for references.", + ) + parser.add_argument( + "--source", + required=True, + help="Source label added to output, for example syft or yarn4.", + ) + parser.add_argument( + "--tool-name", + default="", + help="Override tool.driver.name. Default: OWASP Dependency-Track ().", + ) + parser.add_argument( + "--rule-id-namespace", + default="", + help="Prefix added to rule IDs in both rules and results, for example syft-test::", + ) + parser.add_argument( + "--location-mode", + choices=["auto", "fallback"], + default="auto", + help=( + "Location strategy: 'auto' uses mapped component paths when available; " + "'fallback' always uses fallback-uri/line." + ), + ) + parser.add_argument( + "--fallback-uri", + default=DEFAULT_FALLBACK_URI, + help=f"Fallback artifact path when no component path is found. Default: {DEFAULT_FALLBACK_URI}.", + ) + parser.add_argument( + "--fallback-line", + type=int, + default=DEFAULT_FALLBACK_LINE, + help=f"Fallback start line for fallback-uri. Default: {DEFAULT_FALLBACK_LINE}.", + ) + parser.add_argument( + "--summary-max-len", + dest="summary_max_len", + type=int, + default=DEFAULT_SUMMARY_MAX_LEN, + help=( + "Maximum summary length used in result.message header and " + f"rule.fullDescription.text. Default: {DEFAULT_SUMMARY_MAX_LEN}." + ), + ) + parser.add_argument( + "--pretty-indent", + type=int, + default=2, + help="JSON indentation for output SARIF. Default: 2.", + ) + return parser.parse_args() + + +def main() -> int: + args = parse_args() + tool_name = ( + args.tool_name.strip() + if has_text(args.tool_name) + else f"{DEFAULT_TOOL_PREFIX} ({args.source})" + ) + + try: + sarif = load_json(args.input_sarif) + if not has_text(args.vdr) and not has_text(args.vex): + raise ValueError("At least one input is required: --vdr and/or --vex") + + vdr_doc = load_json(args.vdr) if has_text(args.vdr) else {} + vex_doc = load_json(args.vex) if has_text(args.vex) else {} + + updated_rules, updated_results = normalize_sarif( + sarif=sarif, + vdr_doc=vdr_doc, + vex_doc=vex_doc, + source=args.source, + tool_name=tool_name, + rule_id_namespace=args.rule_id_namespace, + location_mode=args.location_mode, + fallback_uri=args.fallback_uri, + fallback_line=args.fallback_line, + summary_max_len=args.summary_max_len, + ) + + with open(args.output_sarif, "w", encoding="utf-8") as file_obj: + json.dump(sarif, file_obj, ensure_ascii=False, indent=args.pretty_indent) + file_obj.write("\n") + + runs = sarif.get("runs") + run_count = len(runs) if isinstance(runs, list) else 0 + print(f"SARIF normalized successfully: {args.output_sarif}") + print(f"Input SARIF: {args.input_sarif}") + print(f"VDR: {args.vdr if has_text(args.vdr) else ''}") + print(f"VEX: {args.vex if has_text(args.vex) else ''}") + print(f"Source label: {args.source}") + print(f"Tool name: {tool_name}") + print( + f"Rule ID namespace: {args.rule_id_namespace if has_text(args.rule_id_namespace) else ''}" + ) + print(f"Location mode: {args.location_mode}") + print(f"Summary max len: {args.summary_max_len}") + print(f"Runs processed: {run_count}") + print(f"Rules updated: {updated_rules}") + print(f"Results updated: {updated_results}") + return 0 + except Exception as exc: + print(f"Error: {exc}", file=sys.stderr) + return 1 + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/scripts/report_open_code_scanning_alert_instances.sh b/scripts/report_open_code_scanning_alert_instances.sh new file mode 100644 index 0000000..9a6d763 --- /dev/null +++ b/scripts/report_open_code_scanning_alert_instances.sh @@ -0,0 +1,250 @@ +#!/usr/bin/env bash +set -euo pipefail + +usage() { + cat <<'EOF' +Usage: + report_open_code_scanning_alert_instances.sh [options] + +Options: + --repo Default: $GITHUB_REPOSITORY + --tool-name Default: $TOOL_NAME + --head-ref Head ref to inspect (required unless --ref or $REF is set) + --base-ref Optional base ref for comparison + --ref Backward-compatible alias for --head-ref + --token Default: $GH_TOKEN or $GITHUB_TOKEN + --api-url Default: $GITHUB_API_URL or https://api.github.com + --output-prefix Default: open_alerts + -h, --help Show this help +EOF +} + +normalize_ref() { + local value="$1" + if [[ "$value" == refs/* ]]; then + echo "$value" + else + echo "refs/heads/${value#refs/heads/}" + fi +} + +normalize_severity_stream() { + jq -r ' + (.rule.security_severity_level // .rule.severity // "unknown") + | ascii_downcase + | if . == "error" then "high" + elif . == "warning" or . == "moderate" then "medium" + elif . == "note" then "low" + else . + end + ' +} + +count_severity() { + local file="$1" + local severity="$2" + awk -v target="$severity" '$0 == target { c++ } END { print c + 0 }' < <( + normalize_severity_stream <"$file" + ) +} + +fetch_open_alerts_jsonl() { + local ref="$1" + local out_file="$2" + local page=1 + : > "$out_file" + + while :; do + local batch + batch="$( + curl -sSfL --get \ + -H "Authorization: Bearer $token" \ + -H "Accept: application/vnd.github+json" \ + --data-urlencode "state=open" \ + --data-urlencode "tool_name=$tool_name" \ + --data-urlencode "ref=$ref" \ + --data-urlencode "per_page=100" \ + --data-urlencode "page=$page" \ + "$api_url/repos/$repo/code-scanning/alerts" + )" + + local count + count="$(jq 'length' <<<"$batch")" + [ "$count" -eq 0 ] && break + + jq -c '.[]' <<<"$batch" >> "$out_file" + page=$((page + 1)) + done +} + +repo="${GITHUB_REPOSITORY:-}" +tool_name="${TOOL_NAME:-}" +head_ref="${REF:-}" +base_ref="${BASE_REF:-}" +token="${GH_TOKEN:-${GITHUB_TOKEN:-}}" +api_url="${GITHUB_API_URL:-https://api.github.com}" +output_prefix="open_alerts" + +while [ "$#" -gt 0 ]; do + case "$1" in + --repo) repo="$2"; shift 2 ;; + --tool-name) tool_name="$2"; shift 2 ;; + --head-ref) head_ref="$2"; shift 2 ;; + --base-ref) base_ref="$2"; shift 2 ;; + --ref) head_ref="$2"; shift 2 ;; + --token) token="$2"; shift 2 ;; + --api-url) api_url="$2"; shift 2 ;; + --output-prefix) output_prefix="$2"; shift 2 ;; + -h|--help) usage; exit 0 ;; + *) echo "Unknown argument: $1" >&2; usage >&2; exit 2 ;; + esac +done + +if [ -z "$repo" ]; then + echo "Missing --repo (or GITHUB_REPOSITORY)." >&2 + exit 1 +fi +if [ -z "$tool_name" ]; then + echo "Missing --tool-name (or TOOL_NAME)." >&2 + exit 1 +fi +if [ -z "$head_ref" ]; then + echo "Missing --head-ref (or --ref/REF)." >&2 + exit 1 +fi +if [ -z "$token" ]; then + echo "Missing --token (or GH_TOKEN/GITHUB_TOKEN)." >&2 + exit 1 +fi + +head_ref="$(normalize_ref "$head_ref")" +if [ -n "$base_ref" ]; then + base_ref="$(normalize_ref "$base_ref")" +fi + +head_tmp="$(mktemp)" +base_tmp="$(mktemp)" +trap 'rm -f "$head_tmp" "$base_tmp"' EXIT + +fetch_open_alerts_jsonl "$head_ref" "$head_tmp" +if [ -n "$base_ref" ]; then + fetch_open_alerts_jsonl "$base_ref" "$base_tmp" +else + : > "$base_tmp" +fi + +head_instances="$(wc -l < "$head_tmp" | tr -d ' ')" +head_unique_rule_ids="$(jq -r '.rule.id // empty' "$head_tmp" | sed '/^$/d' | sort -u | wc -l | tr -d ' ')" +head_critical="$(count_severity "$head_tmp" "critical")" +head_high="$(count_severity "$head_tmp" "high")" +head_medium="$(count_severity "$head_tmp" "medium")" +head_low="$(count_severity "$head_tmp" "low")" +head_unknown="$(count_severity "$head_tmp" "unknown")" + +base_instances=0 +base_unique_rule_ids=0 +base_critical=0 +base_high=0 +base_medium=0 +base_low=0 +base_unknown=0 +introduced_instances=0 +introduced_critical=0 +introduced_high=0 +introduced_medium=0 +introduced_low=0 +introduced_unknown=0 +baseline_missing="false" + +if [ -n "$base_ref" ]; then + base_instances="$(wc -l < "$base_tmp" | tr -d ' ')" + base_unique_rule_ids="$(jq -r '.rule.id // empty' "$base_tmp" | sed '/^$/d' | sort -u | wc -l | tr -d ' ')" + base_critical="$(count_severity "$base_tmp" "critical")" + base_high="$(count_severity "$base_tmp" "high")" + base_medium="$(count_severity "$base_tmp" "medium")" + base_low="$(count_severity "$base_tmp" "low")" + base_unknown="$(count_severity "$base_tmp" "unknown")" + + if [ "$base_instances" -eq 0 ]; then + baseline_missing="true" + fi + + introduced_json="$( + jq -s --slurpfile base "$base_tmp" ' + def norm: + ascii_downcase + | if . == "error" then "high" + elif . == "warning" or . == "moderate" then "medium" + elif . == "note" then "low" + else . + end; + ($base | map(.number) | unique) as $base_numbers + | [ .[] | select((.number as $n | $base_numbers | index($n) | not)) ] as $introduced + | { + total: ($introduced | length), + critical: ($introduced | map((.rule.security_severity_level // .rule.severity // "unknown" | norm)) | map(select(. == "critical")) | length), + high: ($introduced | map((.rule.security_severity_level // .rule.severity // "unknown" | norm)) | map(select(. == "high")) | length), + medium: ($introduced | map((.rule.security_severity_level // .rule.severity // "unknown" | norm)) | map(select(. == "medium")) | length), + low: ($introduced | map((.rule.security_severity_level // .rule.severity // "unknown" | norm)) | map(select(. == "low")) | length), + unknown: ($introduced | map((.rule.security_severity_level // .rule.severity // "unknown" | norm)) | map(select(. == "unknown")) | length) + } + ' "$head_tmp" + )" + + introduced_instances="$(jq -r '.total' <<<"$introduced_json")" + introduced_critical="$(jq -r '.critical' <<<"$introduced_json")" + introduced_high="$(jq -r '.high' <<<"$introduced_json")" + introduced_medium="$(jq -r '.medium' <<<"$introduced_json")" + introduced_low="$(jq -r '.low' <<<"$introduced_json")" + introduced_unknown="$(jq -r '.unknown' <<<"$introduced_json")" +fi + +echo "Repository: $repo" +echo "Tool name: $tool_name" +echo "Head ref: $head_ref" +if [ -n "$base_ref" ]; then + echo "Base ref: $base_ref" +fi +echo "Open alert instances (UI) in head: $head_instances" +echo "Unique rule.id in head: $head_unique_rule_ids" +echo "Open instances by severity in head (critical/high/medium/low/unknown): $head_critical/$head_high/$head_medium/$head_low/$head_unknown" + +if [ -n "$base_ref" ]; then + echo "Open alert instances (UI) in base: $base_instances" + echo "Unique rule.id in base: $base_unique_rule_ids" + echo "Open instances by severity in base (critical/high/medium/low/unknown): $base_critical/$base_high/$base_medium/$base_low/$base_unknown" + echo "New alert instances vs base (head - base): $introduced_instances" + echo "New instances by severity (critical/high/medium/low/unknown): $introduced_critical/$introduced_high/$introduced_medium/$introduced_low/$introduced_unknown" + echo "baseline_missing: $baseline_missing" +fi + +if [ -n "${GITHUB_OUTPUT:-}" ]; then + { + echo "${output_prefix}_head_ref=$head_ref" + echo "${output_prefix}_head_instances_count=$head_instances" + echo "${output_prefix}_head_unique_rule_ids_count=$head_unique_rule_ids" + echo "${output_prefix}_head_critical_count=$head_critical" + echo "${output_prefix}_head_high_count=$head_high" + echo "${output_prefix}_head_medium_count=$head_medium" + echo "${output_prefix}_head_low_count=$head_low" + echo "${output_prefix}_head_unknown_count=$head_unknown" + if [ -n "$base_ref" ]; then + echo "${output_prefix}_base_ref=$base_ref" + echo "${output_prefix}_base_instances_count=$base_instances" + echo "${output_prefix}_base_unique_rule_ids_count=$base_unique_rule_ids" + echo "${output_prefix}_base_critical_count=$base_critical" + echo "${output_prefix}_base_high_count=$base_high" + echo "${output_prefix}_base_medium_count=$base_medium" + echo "${output_prefix}_base_low_count=$base_low" + echo "${output_prefix}_base_unknown_count=$base_unknown" + echo "${output_prefix}_introduced_instances_count=$introduced_instances" + echo "${output_prefix}_introduced_instances_critical_count=$introduced_critical" + echo "${output_prefix}_introduced_instances_high_count=$introduced_high" + echo "${output_prefix}_introduced_instances_medium_count=$introduced_medium" + echo "${output_prefix}_introduced_instances_low_count=$introduced_low" + echo "${output_prefix}_introduced_instances_unknown_count=$introduced_unknown" + echo "${output_prefix}_baseline_missing=$baseline_missing" + fi + } >> "$GITHUB_OUTPUT" +fi + diff --git a/scripts/upload_bom_and_fetch_metrics.sh b/scripts/upload_bom_and_fetch_metrics.sh new file mode 100644 index 0000000..0aa82e8 --- /dev/null +++ b/scripts/upload_bom_and_fetch_metrics.sh @@ -0,0 +1,171 @@ +#!/usr/bin/env bash +set -euo pipefail + +usage() { + cat <<'EOF' +Usage: + upload_bom_and_fetch_metrics.sh --bom-file --project-suffix [options] + +Required: + --bom-file BOM JSON file (e.g. bom_syft.json) + --project-suffix Project suffix (e.g. syft, berry) + +Optional: + --repo Defaults to GITHUB_REPOSITORY + --pr-number Defaults to PR_NUMBER + --ref-name Defaults to GITHUB_REF_NAME or main + --dtrack-url Defaults to DTRACK_URL + --dtrack-api-key Defaults to DTRACK_API_KEY + --wait-seconds Defaults to 5 + --max-wait-attempts Defaults to 120 + +Behavior: + 1) Upload BOM and wait for processing + 2) Resolve project UUID and fetch metrics + 3) Trigger analysis and wait for completion +EOF +} + +require_cmd() { + command -v "$1" >/dev/null 2>&1 || { + echo "Missing required command: $1" >&2 + exit 1 + } +} + +require_cmd curl +require_cmd jq + +bom_file="" +project_suffix="" +repo="${GITHUB_REPOSITORY:-}" +pr_number="${PR_NUMBER:-}" +ref_name="${GITHUB_REF_NAME:-main}" +dtrack_url="${DTRACK_URL:-}" +dtrack_api_key="${DTRACK_API_KEY:-}" +wait_seconds=5 +max_wait_attempts=120 + +while [[ $# -gt 0 ]]; do + case "$1" in + --bom-file) bom_file="${2:-}"; shift 2 ;; + --project-suffix) project_suffix="${2:-}"; shift 2 ;; + --repo) repo="${2:-}"; shift 2 ;; + --pr-number) pr_number="${2:-}"; shift 2 ;; + --ref-name) ref_name="${2:-}"; shift 2 ;; + --dtrack-url) dtrack_url="${2:-}"; shift 2 ;; + --dtrack-api-key) dtrack_api_key="${2:-}"; shift 2 ;; + --wait-seconds) wait_seconds="${2:-}"; shift 2 ;; + --max-wait-attempts) max_wait_attempts="${2:-}"; shift 2 ;; + -h|--help) usage; exit 0 ;; + *) echo "Unknown argument: $1" >&2; usage; exit 1 ;; + esac +done + +[[ -n "$bom_file" ]] || { echo "--bom-file is required" >&2; exit 1; } +[[ -n "$project_suffix" ]] || { echo "--project-suffix is required" >&2; exit 1; } +[[ -f "$bom_file" ]] || { echo "$bom_file not found" >&2; exit 1; } +[[ -n "$repo" ]] || { echo "Missing repo (--repo or GITHUB_REPOSITORY)" >&2; exit 1; } +[[ -n "$dtrack_url" ]] || { echo "Missing dtrack url (--dtrack-url or DTRACK_URL)" >&2; exit 1; } +[[ -n "$dtrack_api_key" ]] || { echo "Missing dtrack api key (--dtrack-api-key or DTRACK_API_KEY)" >&2; exit 1; } +[[ "$wait_seconds" =~ ^[0-9]+$ ]] || { echo "--wait-seconds must be numeric" >&2; exit 1; } +[[ "$max_wait_attempts" =~ ^[0-9]+$ ]] || { echo "--max-wait-attempts must be numeric" >&2; exit 1; } + +dtrack_url="${dtrack_url%/}" +project_name="${repo}-${project_suffix}" + +if [[ -n "$pr_number" ]]; then + project_version="pr-${pr_number}" +else + project_version="${ref_name:-main}" +fi + +token="$( + curl -sSf -X POST "$dtrack_url/api/v1/bom" \ + -H "X-Api-Key: $dtrack_api_key" \ + -F "projectName=$project_name" \ + -F "projectVersion=$project_version" \ + -F "autoCreate=true" \ + -F "bom=@$bom_file" \ + | jq -r '.token // empty' +)" + +[[ -n "$token" ]] || { echo "Dependency-Track upload returned empty token" >&2; exit 1; } +echo "Dependency-Track token: $token" + +echo "Waiting for Dependency-Track to finish processing..." +processing="true" +for ((i=1; i<=max_wait_attempts; i++)); do + processing="$( + curl -sSf "$dtrack_url/api/v1/bom/token/$token" \ + -H "X-Api-Key: $dtrack_api_key" \ + | jq -r '.processing' + )" + if [[ "$processing" == "false" ]]; then + echo "Processing finished." + break + fi + sleep "$wait_seconds" +done + +[[ "$processing" == "false" ]] || { echo "Timed out waiting for BOM processing" >&2; exit 1; } + +project_name_enc="$(printf '%s' "$project_name" | jq -sRr @uri)" +project_version_enc="$(printf '%s' "$project_version" | jq -sRr @uri)" + +project="$( + curl -sSf \ + "$dtrack_url/api/v1/project/lookup?name=${project_name_enc}&version=${project_version_enc}" \ + -H "X-Api-Key: $dtrack_api_key" +)" + +project_uuid="$(echo "$project" | jq -r '.uuid // empty')" +[[ -n "$project_uuid" ]] || { echo "Could not resolve project UUID: $project" >&2; exit 1; } +echo "Project UUID: $project_uuid" + +metrics="$( + curl -sSf "$dtrack_url/api/v1/metrics/project/$project_uuid/current" \ + -H "X-Api-Key: $dtrack_api_key" +)" + +critical="$(echo "$metrics" | jq -r '.critical // 0' | tr -d '\r\n ')" +high="$(echo "$metrics" | jq -r '.high // 0' | tr -d '\r\n ')" + +[[ "$critical" =~ ^[0-9]+$ ]] || { echo "Invalid critical metric: $critical" >&2; exit 1; } +[[ "$high" =~ ^[0-9]+$ ]] || { echo "Invalid high metric: $high" >&2; exit 1; } + +echo "=== Metrics ===" +echo "$metrics" | jq '{critical, high, medium, low, unassigned, vulnerabilities, vulnerableComponents, components}' +echo "Collected gate values: critical=[$critical] high=[$high]" + +analysis_token="$( + curl -sSf -X POST "$dtrack_url/api/v1/finding/project/$project_uuid/analyze" \ + -H "X-Api-Key: $dtrack_api_key" \ + | jq -r '.token // empty' +)" +[[ -n "$analysis_token" ]] || { echo "Dependency-Track analysis token was empty" >&2; exit 1; } + +echo "Waiting for Dependency-Track analysis to finish..." +analysis_processing="true" +for ((i=1; i<=max_wait_attempts; i++)); do + analysis_processing="$( + curl -sSf "$dtrack_url/api/v1/event/token/$analysis_token" \ + -H "X-Api-Key: $dtrack_api_key" \ + | jq -r '.processing' + )" + if [[ "$analysis_processing" == "false" ]]; then + echo "Analysis finished." + break + fi + sleep "$wait_seconds" +done + +[[ "$analysis_processing" == "false" ]] || { echo "Timed out waiting for Dependency-Track analysis" >&2; exit 1; } + +if [[ -n "${GITHUB_OUTPUT:-}" ]]; then + { + echo "project_uuid=$project_uuid" + echo "critical=$critical" + echo "high=$high" + } >> "$GITHUB_OUTPUT" +fi From 3d99e9460aecc749599311e7c795ae3ad509ab58 Mon Sep 17 00:00:00 2001 From: idelcano Date: Mon, 9 Mar 2026 08:28:45 +0100 Subject: [PATCH 02/16] added bom generation actions from other repo --- .github/workflows/generate_bom_syft.yml | 257 +++++++++++++++++++++++ .github/workflows/generate_bom_v4.yml | 262 ++++++++++++++++++++++++ 2 files changed, 519 insertions(+) create mode 100644 .github/workflows/generate_bom_syft.yml create mode 100644 .github/workflows/generate_bom_v4.yml diff --git a/.github/workflows/generate_bom_syft.yml b/.github/workflows/generate_bom_syft.yml new file mode 100644 index 0000000..f29203d --- /dev/null +++ b/.github/workflows/generate_bom_syft.yml @@ -0,0 +1,257 @@ +name: Syft SBOM Runner + +on: + workflow_call: + workflow_dispatch: + pull_request: + types: + - opened + - reopened + - synchronize + +concurrency: + group: dtrack-${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + generatebom: + runs-on: bom + permissions: + contents: read + actions: read + security-events: write + steps: + - name: Resolve code scanning target (ref/sha) + id: code_scanning_target + run: | + set -euo pipefail + ref="${GITHUB_REF}" + sha="${GITHUB_SHA}" + + pr_head_ref="$(jq -r '.pull_request.head.ref // empty' "$GITHUB_EVENT_PATH")" + pr_head_sha="$(jq -r '.pull_request.head.sha // empty' "$GITHUB_EVENT_PATH")" + + if [ -n "$pr_head_ref" ] && [ -n "$pr_head_sha" ]; then + ref="refs/heads/$pr_head_ref" + sha="$pr_head_sha" + fi + + echo "code_scanning_ref=$ref" >> "$GITHUB_OUTPUT" + echo "code_scanning_sha=$sha" >> "$GITHUB_OUTPUT" + echo "Resolved code scanning target: ref=$ref sha=$sha" + + - uses: actions/checkout@v4 + with: + ref: ${{ steps.code_scanning_target.outputs.code_scanning_sha }} + + - name: Read node version (fallback 20) + run: | + if [ -f .nvmrc ]; then + echo "NODE_VERSION=$(tr -d 'v' < .nvmrc)" >> "$GITHUB_ENV" + else + echo "NODE_VERSION=20" >> "$GITHUB_ENV" + fi + + - name: Yarn install + SBOM (container) + run: | + podman run --rm \ + -v "$GITHUB_WORKSPACE:/work" -w /work \ + node:${NODE_VERSION}-bullseye \ + bash -lc ' + set -e + corepack enable + yarn install + + # Install syft in the container + curl -sSfL https://raw.githubusercontent.com/anchore/syft/main/install.sh \ + | sh -s -- -b /usr/local/bin + + # Generate CycloneDX JSON SBOM + syft . -o cyclonedx-json=bom_syft.json + ' + + - name: Upload BOM, analyze, and fetch metrics + id: dtrack_syft + env: + DTRACK_URL: ${{ vars.DTRACK_URL }} + DTRACK_API_KEY: ${{ secrets.DTRACK_API_KEY }} + PR_NUMBER: ${{ github.event.pull_request.number }} + run: | + set -euo pipefail + scripts/upload_bom_and_fetch_metrics.sh \ + --bom-file bom_syft.json \ + --project-suffix syft + + - name: Export SARIF findings (syft) + env: + DTRACK_URL: ${{ vars.DTRACK_URL }} + DTRACK_API_KEY: ${{ secrets.DTRACK_API_KEY }} + PROJECT_UUID: ${{ steps.dtrack_syft.outputs.project_uuid }} + run: | + set -euo pipefail + + curl -sSf "$DTRACK_URL/api/v1/finding/project/$PROJECT_UUID?suppressed=false" \ + -H "X-Api-Key: $DTRACK_API_KEY" \ + -H "Accept: application/sarif+json" \ + -o dtrack-syft.sarif + + test -s dtrack-syft.sarif || { echo "::error::dtrack-syft.sarif is missing or empty"; exit 1; } + jq -e '.runs and (.runs | type == "array")' dtrack-syft.sarif >/dev/null + + - name: Export VEX and VDR JSON for SARIF mapping (syft) + env: + DTRACK_URL: ${{ vars.DTRACK_URL }} + DTRACK_API_KEY: ${{ secrets.DTRACK_API_KEY }} + PROJECT_UUID: ${{ steps.dtrack_syft.outputs.project_uuid }} + run: | + set -euo pipefail + + curl -sSf "$DTRACK_URL/api/v1/vex/cyclonedx/project/$PROJECT_UUID" \ + -H "X-Api-Key: $DTRACK_API_KEY" \ + -o vex_syft.json + + curl -sSf "$DTRACK_URL/api/v1/bom/cyclonedx/project/$PROJECT_UUID?variant=vdr&format=JSON" \ + -H "X-Api-Key: $DTRACK_API_KEY" \ + -o vdr_syft.json + + test -s vex_syft.json || { echo "::error::vex_syft.json is missing or empty"; exit 1; } + test -s vdr_syft.json || { echo "::error::vdr_syft.json is missing or empty"; exit 1; } + + - name: Normalize SARIF for GitHub Code Scanning (syft) + run: | + set -euo pipefail + + python3 scripts/normalize_sarif.py \ + --input-sarif dtrack-syft.sarif \ + --output-sarif dtrack-syft.sarif \ + --vdr vdr_syft.json \ + --vex vex_syft.json \ + --source syft \ + --tool-name "OWASP Dependency-Track (syft)" \ + --rule-id-namespace "syft::" \ + --location-mode fallback \ + --fallback-uri bom.json \ + --fallback-line 1 + + jq -e '.runs[0].results[0].locations[0].physicalLocation.artifactLocation.uri' dtrack-syft.sarif >/dev/null + + - name: Upload SARIF to GitHub Code Scanning (syft) + uses: github/codeql-action/upload-sarif@v4 + with: + sarif_file: dtrack-syft.sarif + category: dependency-track-syft + ref: ${{ steps.code_scanning_target.outputs.code_scanning_ref }} + sha: ${{ steps.code_scanning_target.outputs.code_scanning_sha }} + + - name: Compare with GitHub open alert instances (syft) + id: open_alerts_syft + env: + GH_TOKEN: ${{ github.token }} + TOOL_NAME: OWASP Dependency-Track (syft) + BASE_REF: ${{ github.base_ref }} + HEAD_REF_NAME: ${{ github.head_ref }} + run: | + set -euo pipefail + scripts/report_open_code_scanning_alert_instances.sh \ + --repo "$GITHUB_REPOSITORY" \ + --tool-name "$TOOL_NAME" \ + --head-ref "refs/heads/${HEAD_REF_NAME:-${GITHUB_REF_NAME}}" \ + --base-ref "refs/heads/${BASE_REF:-${GITHUB_REF_NAME}}" \ + --output-prefix "syft_open_alerts" + + - name: Report code scanning summary (syft) + run: | + set -euo pipefail + + sarif_uploaded_total="$( + jq -r ' + [ + .runs[]?.results[]? + | [ + (.ruleId // ""), + (.properties.name // "unknown-package"), + (.properties.version // "") + ] + | join("|") + ] + | unique + | length + ' dtrack-syft.sarif + )" + ui_open_total="${{ steps.open_alerts_syft.outputs.syft_open_alerts_head_instances_count }}" + ui_open_critical="${{ steps.open_alerts_syft.outputs.syft_open_alerts_head_critical_count }}" + ui_open_high="${{ steps.open_alerts_syft.outputs.syft_open_alerts_head_high_count }}" + ui_open_medium="${{ steps.open_alerts_syft.outputs.syft_open_alerts_head_medium_count }}" + ui_open_low="${{ steps.open_alerts_syft.outputs.syft_open_alerts_head_low_count }}" + ui_open_unknown="${{ steps.open_alerts_syft.outputs.syft_open_alerts_head_unknown_count }}" + ui_new_total="${{ steps.open_alerts_syft.outputs.syft_open_alerts_introduced_instances_count }}" + ui_new_critical="${{ steps.open_alerts_syft.outputs.syft_open_alerts_introduced_instances_critical_count }}" + ui_new_high="${{ steps.open_alerts_syft.outputs.syft_open_alerts_introduced_instances_high_count }}" + ui_new_medium="${{ steps.open_alerts_syft.outputs.syft_open_alerts_introduced_instances_medium_count }}" + ui_new_low="${{ steps.open_alerts_syft.outputs.syft_open_alerts_introduced_instances_low_count }}" + ui_new_unknown="${{ steps.open_alerts_syft.outputs.syft_open_alerts_introduced_instances_unknown_count }}" + ui_baseline_missing="${{ steps.open_alerts_syft.outputs.syft_open_alerts_baseline_missing }}" + branch_name="${GITHUB_HEAD_REF:-$GITHUB_REF_NAME}" + query="is:open branch:${branch_name} tool:\"OWASP Dependency-Track (syft)\"" + branch_url="https://github.com/${GITHUB_REPOSITORY}/security/code-scanning?query=$(jq -rn --arg value "$query" '$value|@uri')" + + sarif_uploaded_total="${sarif_uploaded_total:-0}" + ui_open_total="${ui_open_total:-0}" + ui_open_critical="${ui_open_critical:-0}" + ui_open_high="${ui_open_high:-0}" + ui_open_medium="${ui_open_medium:-0}" + ui_open_low="${ui_open_low:-0}" + ui_open_unknown="${ui_open_unknown:-0}" + ui_new_total="${ui_new_total:-0}" + ui_new_critical="${ui_new_critical:-0}" + ui_new_high="${ui_new_high:-0}" + ui_new_medium="${ui_new_medium:-0}" + ui_new_low="${ui_new_low:-0}" + ui_new_unknown="${ui_new_unknown:-0}" + ui_baseline_missing="${ui_baseline_missing:-false}" + + echo "::notice::Syft (GitHub alert instances) new vs base: critical=$ui_new_critical high=$ui_new_high total=$ui_new_total baseline_missing=$ui_baseline_missing" + echo "### Syft code scanning" >> "$GITHUB_STEP_SUMMARY" + echo "- SARIF findings uploaded: $sarif_uploaded_total" >> "$GITHUB_STEP_SUMMARY" + echo "- Open alert instances in branch (GitHub UI):" >> "$GITHUB_STEP_SUMMARY" + echo " - critical: $ui_open_critical" >> "$GITHUB_STEP_SUMMARY" + echo " - high: $ui_open_high" >> "$GITHUB_STEP_SUMMARY" + echo " - medium: $ui_open_medium" >> "$GITHUB_STEP_SUMMARY" + echo " - low: $ui_open_low" >> "$GITHUB_STEP_SUMMARY" + echo " - unknown: $ui_open_unknown" >> "$GITHUB_STEP_SUMMARY" + echo " - total: $ui_open_total" >> "$GITHUB_STEP_SUMMARY" + echo "- New alert instances vs base (head - base):" >> "$GITHUB_STEP_SUMMARY" + echo " - critical: $ui_new_critical" >> "$GITHUB_STEP_SUMMARY" + echo " - high: $ui_new_high" >> "$GITHUB_STEP_SUMMARY" + echo " - medium: $ui_new_medium" >> "$GITHUB_STEP_SUMMARY" + echo " - low: $ui_new_low" >> "$GITHUB_STEP_SUMMARY" + echo " - unknown: $ui_new_unknown" >> "$GITHUB_STEP_SUMMARY" + echo " - total: $ui_new_total" >> "$GITHUB_STEP_SUMMARY" + echo "- Baseline missing for instance comparison (base has no open instances): $ui_baseline_missing" >> "$GITHUB_STEP_SUMMARY" + echo "- Branch alerts URL: $branch_url" >> "$GITHUB_STEP_SUMMARY" + + - name: Enforce newly introduced vulnerability gate (critical/high) + run: | + set -euo pipefail + + crit="${{ steps.open_alerts_syft.outputs.syft_open_alerts_introduced_instances_critical_count }}" + high="${{ steps.open_alerts_syft.outputs.syft_open_alerts_introduced_instances_high_count }}" + med="${{ steps.open_alerts_syft.outputs.syft_open_alerts_introduced_instances_medium_count }}" + low="${{ steps.open_alerts_syft.outputs.syft_open_alerts_introduced_instances_low_count }}" + branch_name="${GITHUB_HEAD_REF:-$GITHUB_REF_NAME}" + query="is:open branch:${branch_name} tool:\"OWASP Dependency-Track (syft)\"" + branch_url="https://github.com/${GITHUB_REPOSITORY}/security/code-scanning?query=$(jq -rn --arg value "$query" '$value|@uri')" + + crit="${crit:-0}" + high="${high:-0}" + med="${med:-0}" + low="${low:-0}" + + echo "New alert instances detected vs base (head - base): critical=$crit high=$high medium=$med low=$low" + + if [ "$crit" -gt 0 ] || [ "$high" -gt 0 ]; then + echo "::error::New critical/high alert instances detected vs base (critical=$crit, high=$high, medium=$med, low=$low). Review: $branch_url" + exit 1 + fi + + echo "No new critical/high alert instances detected vs base" diff --git a/.github/workflows/generate_bom_v4.yml b/.github/workflows/generate_bom_v4.yml new file mode 100644 index 0000000..d37e196 --- /dev/null +++ b/.github/workflows/generate_bom_v4.yml @@ -0,0 +1,262 @@ +name: Generate Bom Yarn 4 CycloneDX + +on: + workflow_call: + workflow_dispatch: + pull_request: + types: + - opened + - reopened + - synchronize + +concurrency: + group: dtrack-${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + generatebomv4: + runs-on: bom + permissions: + contents: read + actions: read + security-events: write + steps: + - name: Resolve code scanning target (ref/sha) + id: code_scanning_target + run: | + set -euo pipefail + ref="${GITHUB_REF}" + sha="${GITHUB_SHA}" + + pr_head_ref="$(jq -r '.pull_request.head.ref // empty' "$GITHUB_EVENT_PATH")" + pr_head_sha="$(jq -r '.pull_request.head.sha // empty' "$GITHUB_EVENT_PATH")" + + if [ -n "$pr_head_ref" ] && [ -n "$pr_head_sha" ]; then + ref="refs/heads/$pr_head_ref" + sha="$pr_head_sha" + fi + + echo "code_scanning_ref=$ref" >> "$GITHUB_OUTPUT" + echo "code_scanning_sha=$sha" >> "$GITHUB_OUTPUT" + echo "Resolved code scanning target: ref=$ref sha=$sha" + + - uses: actions/checkout@v4 + with: + ref: ${{ steps.code_scanning_target.outputs.code_scanning_sha }} + + - name: Read node version (min 20.18.0 for CycloneDX) + run: | + if [ -f .nvmrc ]; then + ver="$(tr -d 'v' < .nvmrc)" + else + ver="20.18.0" + fi + min="20.18.0" + if [ "$(printf '%s\n' "$ver" "$min" | sort -V | head -n1)" = "$ver" ] && [ "$ver" != "$min" ]; then + ver="$min" + fi + echo "NODE_VERSION=$ver" >> "$GITHUB_ENV" + + - name: Yarn Berry + CycloneDX (container) + run: | + podman run --rm \ + -v "$GITHUB_WORKSPACE:/work" -w /work \ + node:${NODE_VERSION}-bullseye \ + bash -lc ' + corepack enable + yarn set version berry + if [ ! -f src/package.json ]; then + printf "{\n \"name\": \"local-src\",\n \"version\": \"0.0.0\"\n}\n" > src/package.json + fi + # CycloneDX rejects invalid package names like "$"; strip for BOM generation only. + cp package.json /tmp/package.json.orig + node -e "const fs=require(\"fs\");const pkg=JSON.parse(fs.readFileSync(\"package.json\",\"utf8\"));if(pkg.dependencies&&pkg.dependencies[\"\\$\"]){delete pkg.dependencies[\"\\$\"];};fs.writeFileSync(\"package.json\",JSON.stringify(pkg,null,2)+\"\\n\");" + yarn install + yarn dlx -q @cyclonedx/yarn-plugin-cyclonedx --output-file bom_berry.json --output-format JSON + mv /tmp/package.json.orig package.json ' + + - name: Upload BOM, analyze, and fetch metrics + id: dtrack_yarn4 + env: + DTRACK_URL: ${{ vars.DTRACK_URL }} + DTRACK_API_KEY: ${{ secrets.DTRACK_API_KEY }} + PR_NUMBER: ${{ github.event.pull_request.number }} + run: | + set -euo pipefail + scripts/upload_bom_and_fetch_metrics.sh \ + --bom-file bom_berry.json \ + --project-suffix berry + + - name: Export SARIF findings (yarn4) + env: + DTRACK_URL: ${{ vars.DTRACK_URL }} + DTRACK_API_KEY: ${{ secrets.DTRACK_API_KEY }} + PROJECT_UUID: ${{ steps.dtrack_yarn4.outputs.project_uuid }} + run: | + set -euo pipefail + + curl -sSf "$DTRACK_URL/api/v1/finding/project/$PROJECT_UUID?suppressed=false" \ + -H "X-Api-Key: $DTRACK_API_KEY" \ + -H "Accept: application/sarif+json" \ + -o dtrack-yarn4.sarif + + test -s dtrack-yarn4.sarif || { echo "::error::dtrack-yarn4.sarif is missing or empty"; exit 1; } + jq -e '.runs and (.runs | type == "array")' dtrack-yarn4.sarif >/dev/null + + - name: Export VEX and VDR JSON for SARIF mapping (yarn4) + env: + DTRACK_URL: ${{ vars.DTRACK_URL }} + DTRACK_API_KEY: ${{ secrets.DTRACK_API_KEY }} + PROJECT_UUID: ${{ steps.dtrack_yarn4.outputs.project_uuid }} + run: | + set -euo pipefail + + curl -sSf "$DTRACK_URL/api/v1/vex/cyclonedx/project/$PROJECT_UUID" \ + -H "X-Api-Key: $DTRACK_API_KEY" \ + -o vex_yarn4.json + + curl -sSf "$DTRACK_URL/api/v1/bom/cyclonedx/project/$PROJECT_UUID?variant=vdr&format=JSON" \ + -H "X-Api-Key: $DTRACK_API_KEY" \ + -o bom_yarn4.json + + test -s vex_yarn4.json || { echo "::error::vex_yarn4.json is missing or empty"; exit 1; } + test -s bom_yarn4.json || { echo "::error::bom_yarn4.json is missing or empty"; exit 1; } + + - name: Normalize SARIF for GitHub Code Scanning (yarn4) + run: | + set -euo pipefail + + python3 scripts/normalize_sarif.py \ + --input-sarif dtrack-yarn4.sarif \ + --output-sarif dtrack-yarn4.sarif \ + --vdr bom_yarn4.json \ + --vex vex_yarn4.json \ + --source yarn4 \ + --tool-name "OWASP Dependency-Track (yarn4)" \ + --rule-id-namespace "yarn4::" \ + --location-mode fallback \ + --fallback-uri bom.json \ + --fallback-line 1 + + jq -e '.runs[0].results[0].locations[0].physicalLocation.artifactLocation.uri' dtrack-yarn4.sarif >/dev/null + + - name: Upload SARIF to GitHub Code Scanning (yarn4) + uses: github/codeql-action/upload-sarif@v4 + with: + sarif_file: dtrack-yarn4.sarif + category: dependency-track-yarn4 + ref: ${{ steps.code_scanning_target.outputs.code_scanning_ref }} + sha: ${{ steps.code_scanning_target.outputs.code_scanning_sha }} + + - name: Compare with GitHub open alert instances (yarn4) + id: open_alerts_yarn4 + env: + GH_TOKEN: ${{ github.token }} + TOOL_NAME: OWASP Dependency-Track (yarn4) + BASE_REF: ${{ github.base_ref }} + HEAD_REF_NAME: ${{ github.head_ref }} + run: | + set -euo pipefail + scripts/report_open_code_scanning_alert_instances.sh \ + --repo "$GITHUB_REPOSITORY" \ + --tool-name "$TOOL_NAME" \ + --head-ref "refs/heads/${HEAD_REF_NAME:-${GITHUB_REF_NAME}}" \ + --base-ref "refs/heads/${BASE_REF:-${GITHUB_REF_NAME}}" \ + --output-prefix "yarn4_open_alerts" + + - name: Report code scanning summary (yarn4) + run: | + set -euo pipefail + + sarif_uploaded_total="$( + jq -r ' + [ + .runs[]?.results[]? + | [ + (.ruleId // ""), + (.properties.name // "unknown-package"), + (.properties.version // "") + ] + | join("|") + ] + | unique + | length + ' dtrack-yarn4.sarif + )" + ui_open_total="${{ steps.open_alerts_yarn4.outputs.yarn4_open_alerts_head_instances_count }}" + ui_open_critical="${{ steps.open_alerts_yarn4.outputs.yarn4_open_alerts_head_critical_count }}" + ui_open_high="${{ steps.open_alerts_yarn4.outputs.yarn4_open_alerts_head_high_count }}" + ui_open_medium="${{ steps.open_alerts_yarn4.outputs.yarn4_open_alerts_head_medium_count }}" + ui_open_low="${{ steps.open_alerts_yarn4.outputs.yarn4_open_alerts_head_low_count }}" + ui_open_unknown="${{ steps.open_alerts_yarn4.outputs.yarn4_open_alerts_head_unknown_count }}" + ui_new_total="${{ steps.open_alerts_yarn4.outputs.yarn4_open_alerts_introduced_instances_count }}" + ui_new_critical="${{ steps.open_alerts_yarn4.outputs.yarn4_open_alerts_introduced_instances_critical_count }}" + ui_new_high="${{ steps.open_alerts_yarn4.outputs.yarn4_open_alerts_introduced_instances_high_count }}" + ui_new_medium="${{ steps.open_alerts_yarn4.outputs.yarn4_open_alerts_introduced_instances_medium_count }}" + ui_new_low="${{ steps.open_alerts_yarn4.outputs.yarn4_open_alerts_introduced_instances_low_count }}" + ui_new_unknown="${{ steps.open_alerts_yarn4.outputs.yarn4_open_alerts_introduced_instances_unknown_count }}" + ui_baseline_missing="${{ steps.open_alerts_yarn4.outputs.yarn4_open_alerts_baseline_missing }}" + branch_name="${GITHUB_HEAD_REF:-$GITHUB_REF_NAME}" + query="is:open branch:${branch_name} tool:\"OWASP Dependency-Track (yarn4)\"" + branch_url="https://github.com/${GITHUB_REPOSITORY}/security/code-scanning?query=$(jq -rn --arg value "$query" '$value|@uri')" + + sarif_uploaded_total="${sarif_uploaded_total:-0}" + ui_open_total="${ui_open_total:-0}" + ui_open_critical="${ui_open_critical:-0}" + ui_open_high="${ui_open_high:-0}" + ui_open_medium="${ui_open_medium:-0}" + ui_open_low="${ui_open_low:-0}" + ui_open_unknown="${ui_open_unknown:-0}" + ui_new_total="${ui_new_total:-0}" + ui_new_critical="${ui_new_critical:-0}" + ui_new_high="${ui_new_high:-0}" + ui_new_medium="${ui_new_medium:-0}" + ui_new_low="${ui_new_low:-0}" + ui_new_unknown="${ui_new_unknown:-0}" + ui_baseline_missing="${ui_baseline_missing:-false}" + + echo "::notice::Yarn4 (GitHub alert instances) new vs base: critical=$ui_new_critical high=$ui_new_high total=$ui_new_total baseline_missing=$ui_baseline_missing" + echo "### Yarn4 code scanning" >> "$GITHUB_STEP_SUMMARY" + echo "- SARIF findings uploaded: $sarif_uploaded_total" >> "$GITHUB_STEP_SUMMARY" + echo "- Open alert instances in branch (GitHub UI):" >> "$GITHUB_STEP_SUMMARY" + echo " - critical: $ui_open_critical" >> "$GITHUB_STEP_SUMMARY" + echo " - high: $ui_open_high" >> "$GITHUB_STEP_SUMMARY" + echo " - medium: $ui_open_medium" >> "$GITHUB_STEP_SUMMARY" + echo " - low: $ui_open_low" >> "$GITHUB_STEP_SUMMARY" + echo " - unknown: $ui_open_unknown" >> "$GITHUB_STEP_SUMMARY" + echo " - total: $ui_open_total" >> "$GITHUB_STEP_SUMMARY" + echo "- New alert instances vs base (head - base):" >> "$GITHUB_STEP_SUMMARY" + echo " - critical: $ui_new_critical" >> "$GITHUB_STEP_SUMMARY" + echo " - high: $ui_new_high" >> "$GITHUB_STEP_SUMMARY" + echo " - medium: $ui_new_medium" >> "$GITHUB_STEP_SUMMARY" + echo " - low: $ui_new_low" >> "$GITHUB_STEP_SUMMARY" + echo " - unknown: $ui_new_unknown" >> "$GITHUB_STEP_SUMMARY" + echo " - total: $ui_new_total" >> "$GITHUB_STEP_SUMMARY" + echo "- Baseline missing for instance comparison (base has no open instances): $ui_baseline_missing" >> "$GITHUB_STEP_SUMMARY" + echo "- Branch alerts URL: $branch_url" >> "$GITHUB_STEP_SUMMARY" + + - name: Enforce newly introduced vulnerability gate (critical/high) + run: | + set -euo pipefail + + crit="${{ steps.open_alerts_yarn4.outputs.yarn4_open_alerts_introduced_instances_critical_count }}" + high="${{ steps.open_alerts_yarn4.outputs.yarn4_open_alerts_introduced_instances_high_count }}" + med="${{ steps.open_alerts_yarn4.outputs.yarn4_open_alerts_introduced_instances_medium_count }}" + low="${{ steps.open_alerts_yarn4.outputs.yarn4_open_alerts_introduced_instances_low_count }}" + branch_name="${GITHUB_HEAD_REF:-$GITHUB_REF_NAME}" + query="is:open branch:${branch_name} tool:\"OWASP Dependency-Track (yarn4)\"" + branch_url="https://github.com/${GITHUB_REPOSITORY}/security/code-scanning?query=$(jq -rn --arg value "$query" '$value|@uri')" + + crit="${crit:-0}" + high="${high:-0}" + med="${med:-0}" + low="${low:-0}" + + echo "New alert instances detected vs base (head - base): critical=$crit high=$high medium=$med low=$low" + + if [ "$crit" -gt 0 ] || [ "$high" -gt 0 ]; then + echo "::error::New critical/high alert instances detected vs base (critical=$crit, high=$high, medium=$med, low=$low). Review: $branch_url" + exit 1 + fi + + echo "No new critical/high alert instances detected vs base" From 09236a5e5382c2192bc8cdf56aded93653fac65f Mon Sep 17 00:00:00 2001 From: idelcano Date: Mon, 9 Mar 2026 08:31:28 +0100 Subject: [PATCH 03/16] change runner from self-hosted to basic-runner --- .github/workflows/master.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/master.yml b/.github/workflows/master.yml index c073f64..83748d3 100644 --- a/.github/workflows/master.yml +++ b/.github/workflows/master.yml @@ -33,7 +33,7 @@ jobs: if: contains(inputs.workflows_to_run, 'run-tests') || contains(inputs.workflows_to_run, 'all') uses: ./.github/workflows/app-test.yml with: - runner: ${{ inputs.runner || (github.event.repository.private && 'self-hosted') || 'ubuntu-latest' }} + runner: ${{ inputs.runner || (github.event.repository.private && 'basic-runner') || 'ubuntu-latest' }} #skip if private repo codeql-scan: @@ -42,11 +42,11 @@ jobs: with: languages: ${{ inputs.languages }} build_mode: ${{ inputs.build_mode }} - runner: ${{ inputs.runner || (github.event.repository.private && 'self-hosted') || 'ubuntu-latest' }} + runner: ${{ inputs.runner || (github.event.repository.private && 'basic-runner') || 'ubuntu-latest' }} build-check: if: contains(inputs.workflows_to_run, 'build-check') || contains(inputs.workflows_to_run, 'all') uses: ./.github/workflows/bundlemon-build-size.yml with: branch_name: ${{ inputs.bundlemonrc_branch_name }} - runner: ${{ inputs.runner || (github.event.repository.private && 'self-hosted') || 'ubuntu-latest' }} + runner: ${{ inputs.runner || (github.event.repository.private && 'basic-runner') || 'ubuntu-latest' }} From 51334d9db66c3d01a4456d6874964d1adc312572 Mon Sep 17 00:00:00 2001 From: idelcano Date: Mon, 9 Mar 2026 08:39:24 +0100 Subject: [PATCH 04/16] Added bom workflows into master --- .github/workflows/master.yml | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/.github/workflows/master.yml b/.github/workflows/master.yml index 83748d3..a3d1f44 100644 --- a/.github/workflows/master.yml +++ b/.github/workflows/master.yml @@ -22,6 +22,11 @@ on: description: "Specify runner. Defaults to checking visibility" required: false type: string + bom_runner: + description: "Specify runner for BOM workflows" + required: false + default: "bom" + type: string workflows_to_run: description: 'Comma-separated list of workflows to run (e.g., flow1,flow2)' required: false @@ -50,3 +55,15 @@ jobs: with: branch_name: ${{ inputs.bundlemonrc_branch_name }} runner: ${{ inputs.runner || (github.event.repository.private && 'basic-runner') || 'ubuntu-latest' }} + + generate-bom-v4: + if: contains(inputs.workflows_to_run, 'generate-bom-v4') || contains(inputs.workflows_to_run, 'all') + uses: ./.github/workflows/generate_bom_v4.yml + with: + runner: ${{ inputs.bom_runner }} + + generate-bom-syft: + if: contains(inputs.workflows_to_run, 'generate-bom-syft') || contains(inputs.workflows_to_run, 'all') + uses: ./.github/workflows/generate_bom_syft.yml + with: + runner: ${{ inputs.bom_runner }} From ff4ae1d073d45a1138f33ea72eadf04621bf62c3 Mon Sep 17 00:00:00 2001 From: idelcano Date: Mon, 9 Mar 2026 08:42:14 +0100 Subject: [PATCH 05/16] add dedicated runners for shared workflows and BOM jobs --- .github/workflows/generate_bom_syft.yml | 11 ++++++++++- .github/workflows/generate_bom_v4.yml | 11 ++++++++++- 2 files changed, 20 insertions(+), 2 deletions(-) diff --git a/.github/workflows/generate_bom_syft.yml b/.github/workflows/generate_bom_syft.yml index f29203d..2cf93ba 100644 --- a/.github/workflows/generate_bom_syft.yml +++ b/.github/workflows/generate_bom_syft.yml @@ -2,6 +2,15 @@ name: Syft SBOM Runner on: workflow_call: + inputs: + runner: + description: "Specify runner for the BOM workflow" + required: false + default: "bom" + type: string + secrets: + DTRACK_API_KEY: + required: true workflow_dispatch: pull_request: types: @@ -15,7 +24,7 @@ concurrency: jobs: generatebom: - runs-on: bom + runs-on: ${{ inputs.runner }} permissions: contents: read actions: read diff --git a/.github/workflows/generate_bom_v4.yml b/.github/workflows/generate_bom_v4.yml index d37e196..ee9e745 100644 --- a/.github/workflows/generate_bom_v4.yml +++ b/.github/workflows/generate_bom_v4.yml @@ -2,6 +2,15 @@ name: Generate Bom Yarn 4 CycloneDX on: workflow_call: + inputs: + runner: + description: "Specify runner for the BOM workflow" + required: false + default: "bom" + type: string + secrets: + DTRACK_API_KEY: + required: true workflow_dispatch: pull_request: types: @@ -15,7 +24,7 @@ concurrency: jobs: generatebomv4: - runs-on: bom + runs-on: ${{ inputs.runner }} permissions: contents: read actions: read From fc3acc85ac8bdbf812c2487d2548b9afab51c684 Mon Sep 17 00:00:00 2001 From: idelcano Date: Mon, 9 Mar 2026 08:47:06 +0100 Subject: [PATCH 06/16] load shared BOM scripts from central workflows repo --- .github/workflows/generate_bom_syft.yml | 15 +++++++++++---- .github/workflows/generate_bom_v4.yml | 15 +++++++++++---- 2 files changed, 22 insertions(+), 8 deletions(-) diff --git a/.github/workflows/generate_bom_syft.yml b/.github/workflows/generate_bom_syft.yml index 2cf93ba..87183af 100644 --- a/.github/workflows/generate_bom_syft.yml +++ b/.github/workflows/generate_bom_syft.yml @@ -49,10 +49,17 @@ jobs: echo "code_scanning_sha=$sha" >> "$GITHUB_OUTPUT" echo "Resolved code scanning target: ref=$ref sha=$sha" - - uses: actions/checkout@v4 + - name: Checkout caller repository + uses: actions/checkout@v4 with: ref: ${{ steps.code_scanning_target.outputs.code_scanning_sha }} + - name: Checkout shared workflows repository + uses: actions/checkout@v4 + with: + repository: EyeSeeTea/github-workflows + path: shared-workflows + - name: Read node version (fallback 20) run: | if [ -f .nvmrc ]; then @@ -87,7 +94,7 @@ jobs: PR_NUMBER: ${{ github.event.pull_request.number }} run: | set -euo pipefail - scripts/upload_bom_and_fetch_metrics.sh \ + bash shared-workflows/scripts/upload_bom_and_fetch_metrics.sh \ --bom-file bom_syft.json \ --project-suffix syft @@ -130,7 +137,7 @@ jobs: run: | set -euo pipefail - python3 scripts/normalize_sarif.py \ + python3 shared-workflows/scripts/normalize_sarif.py \ --input-sarif dtrack-syft.sarif \ --output-sarif dtrack-syft.sarif \ --vdr vdr_syft.json \ @@ -161,7 +168,7 @@ jobs: HEAD_REF_NAME: ${{ github.head_ref }} run: | set -euo pipefail - scripts/report_open_code_scanning_alert_instances.sh \ + bash shared-workflows/scripts/report_open_code_scanning_alert_instances.sh \ --repo "$GITHUB_REPOSITORY" \ --tool-name "$TOOL_NAME" \ --head-ref "refs/heads/${HEAD_REF_NAME:-${GITHUB_REF_NAME}}" \ diff --git a/.github/workflows/generate_bom_v4.yml b/.github/workflows/generate_bom_v4.yml index ee9e745..480bdc3 100644 --- a/.github/workflows/generate_bom_v4.yml +++ b/.github/workflows/generate_bom_v4.yml @@ -49,10 +49,17 @@ jobs: echo "code_scanning_sha=$sha" >> "$GITHUB_OUTPUT" echo "Resolved code scanning target: ref=$ref sha=$sha" - - uses: actions/checkout@v4 + - name: Checkout caller repository + uses: actions/checkout@v4 with: ref: ${{ steps.code_scanning_target.outputs.code_scanning_sha }} + - name: Checkout shared workflows repository + uses: actions/checkout@v4 + with: + repository: EyeSeeTea/github-workflows + path: shared-workflows + - name: Read node version (min 20.18.0 for CycloneDX) run: | if [ -f .nvmrc ]; then @@ -92,7 +99,7 @@ jobs: PR_NUMBER: ${{ github.event.pull_request.number }} run: | set -euo pipefail - scripts/upload_bom_and_fetch_metrics.sh \ + bash shared-workflows/scripts/upload_bom_and_fetch_metrics.sh \ --bom-file bom_berry.json \ --project-suffix berry @@ -135,7 +142,7 @@ jobs: run: | set -euo pipefail - python3 scripts/normalize_sarif.py \ + python3 shared-workflows/scripts/normalize_sarif.py \ --input-sarif dtrack-yarn4.sarif \ --output-sarif dtrack-yarn4.sarif \ --vdr bom_yarn4.json \ @@ -166,7 +173,7 @@ jobs: HEAD_REF_NAME: ${{ github.head_ref }} run: | set -euo pipefail - scripts/report_open_code_scanning_alert_instances.sh \ + bash shared-workflows/scripts/report_open_code_scanning_alert_instances.sh \ --repo "$GITHUB_REPOSITORY" \ --tool-name "$TOOL_NAME" \ --head-ref "refs/heads/${HEAD_REF_NAME:-${GITHUB_REF_NAME}}" \ From 77bf4456b5a5e18c57a29f44937df8379c589377 Mon Sep 17 00:00:00 2001 From: idelcano Date: Mon, 9 Mar 2026 09:20:35 +0100 Subject: [PATCH 07/16] execute only on PR --- .github/workflows/master.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/master.yml b/.github/workflows/master.yml index a3d1f44..ddd35b0 100644 --- a/.github/workflows/master.yml +++ b/.github/workflows/master.yml @@ -57,13 +57,13 @@ jobs: runner: ${{ inputs.runner || (github.event.repository.private && 'basic-runner') || 'ubuntu-latest' }} generate-bom-v4: - if: contains(inputs.workflows_to_run, 'generate-bom-v4') || contains(inputs.workflows_to_run, 'all') + if: github.event_name == 'pull_request' && (contains(inputs.workflows_to_run, 'generate-bom-v4') || contains(inputs.workflows_to_run, 'all')) uses: ./.github/workflows/generate_bom_v4.yml with: runner: ${{ inputs.bom_runner }} generate-bom-syft: - if: contains(inputs.workflows_to_run, 'generate-bom-syft') || contains(inputs.workflows_to_run, 'all') + if: github.event_name == 'pull_request' && (contains(inputs.workflows_to_run, 'generate-bom-syft') || contains(inputs.workflows_to_run, 'all')) uses: ./.github/workflows/generate_bom_syft.yml with: runner: ${{ inputs.bom_runner }} From bd73c17bb021f5097cd5e0f6f8510d1ec3881e6a Mon Sep 17 00:00:00 2001 From: idelcano Date: Tue, 10 Mar 2026 08:08:59 +0100 Subject: [PATCH 08/16] refactor names --- ...bom_syft.yml => dependency-track-syft.yml} | 10 ++++----- ..._bom_v4.yml => dependency-track-yarn4.yml} | 10 ++++----- .github/workflows/master.yml | 21 ++++++++++++------- 3 files changed, 24 insertions(+), 17 deletions(-) rename .github/workflows/{generate_bom_syft.yml => dependency-track-syft.yml} (96%) rename .github/workflows/{generate_bom_v4.yml => dependency-track-yarn4.yml} (97%) diff --git a/.github/workflows/generate_bom_syft.yml b/.github/workflows/dependency-track-syft.yml similarity index 96% rename from .github/workflows/generate_bom_syft.yml rename to .github/workflows/dependency-track-syft.yml index 87183af..dca048a 100644 --- a/.github/workflows/generate_bom_syft.yml +++ b/.github/workflows/dependency-track-syft.yml @@ -1,4 +1,4 @@ -name: Syft SBOM Runner +name: Dependency-Track Syft Scan on: workflow_call: @@ -6,7 +6,7 @@ on: runner: description: "Specify runner for the BOM workflow" required: false - default: "bom" + default: "dependency-track-scan" type: string secrets: DTRACK_API_KEY: @@ -23,7 +23,7 @@ concurrency: cancel-in-progress: true jobs: - generatebom: + dependency-track-syft: runs-on: ${{ inputs.runner }} permissions: contents: read @@ -226,8 +226,8 @@ jobs: ui_new_unknown="${ui_new_unknown:-0}" ui_baseline_missing="${ui_baseline_missing:-false}" - echo "::notice::Syft (GitHub alert instances) new vs base: critical=$ui_new_critical high=$ui_new_high total=$ui_new_total baseline_missing=$ui_baseline_missing" - echo "### Syft code scanning" >> "$GITHUB_STEP_SUMMARY" + echo "::notice::Dependency-Track Syft (GitHub alert instances) new vs base: critical=$ui_new_critical high=$ui_new_high total=$ui_new_total baseline_missing=$ui_baseline_missing" + echo "### Dependency-Track Syft code scanning" >> "$GITHUB_STEP_SUMMARY" echo "- SARIF findings uploaded: $sarif_uploaded_total" >> "$GITHUB_STEP_SUMMARY" echo "- Open alert instances in branch (GitHub UI):" >> "$GITHUB_STEP_SUMMARY" echo " - critical: $ui_open_critical" >> "$GITHUB_STEP_SUMMARY" diff --git a/.github/workflows/generate_bom_v4.yml b/.github/workflows/dependency-track-yarn4.yml similarity index 97% rename from .github/workflows/generate_bom_v4.yml rename to .github/workflows/dependency-track-yarn4.yml index 480bdc3..1d33a55 100644 --- a/.github/workflows/generate_bom_v4.yml +++ b/.github/workflows/dependency-track-yarn4.yml @@ -1,4 +1,4 @@ -name: Generate Bom Yarn 4 CycloneDX +name: Dependency-Track Yarn4 Scan on: workflow_call: @@ -6,7 +6,7 @@ on: runner: description: "Specify runner for the BOM workflow" required: false - default: "bom" + default: "dependency-track-scan" type: string secrets: DTRACK_API_KEY: @@ -23,7 +23,7 @@ concurrency: cancel-in-progress: true jobs: - generatebomv4: + dependency-track-yarn4: runs-on: ${{ inputs.runner }} permissions: contents: read @@ -231,8 +231,8 @@ jobs: ui_new_unknown="${ui_new_unknown:-0}" ui_baseline_missing="${ui_baseline_missing:-false}" - echo "::notice::Yarn4 (GitHub alert instances) new vs base: critical=$ui_new_critical high=$ui_new_high total=$ui_new_total baseline_missing=$ui_baseline_missing" - echo "### Yarn4 code scanning" >> "$GITHUB_STEP_SUMMARY" + echo "::notice::Dependency-Track Yarn4 (GitHub alert instances) new vs base: critical=$ui_new_critical high=$ui_new_high total=$ui_new_total baseline_missing=$ui_baseline_missing" + echo "### Dependency-Track Yarn4 code scanning" >> "$GITHUB_STEP_SUMMARY" echo "- SARIF findings uploaded: $sarif_uploaded_total" >> "$GITHUB_STEP_SUMMARY" echo "- Open alert instances in branch (GitHub UI):" >> "$GITHUB_STEP_SUMMARY" echo " - critical: $ui_open_critical" >> "$GITHUB_STEP_SUMMARY" diff --git a/.github/workflows/master.yml b/.github/workflows/master.yml index ddd35b0..a11b8f3 100644 --- a/.github/workflows/master.yml +++ b/.github/workflows/master.yml @@ -25,13 +25,16 @@ on: bom_runner: description: "Specify runner for BOM workflows" required: false - default: "bom" + default: "dependency-track-scan" type: string workflows_to_run: description: 'Comma-separated list of workflows to run (e.g., flow1,flow2)' required: false default: 'all' #'run-tests,codeql-scan,build-check' type: string + secrets: + DTRACK_API_KEY: + required: false jobs: run-tests: @@ -56,14 +59,18 @@ jobs: branch_name: ${{ inputs.bundlemonrc_branch_name }} runner: ${{ inputs.runner || (github.event.repository.private && 'basic-runner') || 'ubuntu-latest' }} - generate-bom-v4: - if: github.event_name == 'pull_request' && (contains(inputs.workflows_to_run, 'generate-bom-v4') || contains(inputs.workflows_to_run, 'all')) - uses: ./.github/workflows/generate_bom_v4.yml + dependency-track-yarn4: + if: github.event_name == 'pull_request' && (contains(inputs.workflows_to_run, 'dependency-track-yarn4') || contains(inputs.workflows_to_run, 'all')) + uses: ./.github/workflows/dependency-track-yarn4.yml + secrets: + DTRACK_API_KEY: ${{ secrets.DTRACK_API_KEY }} with: runner: ${{ inputs.bom_runner }} - generate-bom-syft: - if: github.event_name == 'pull_request' && (contains(inputs.workflows_to_run, 'generate-bom-syft') || contains(inputs.workflows_to_run, 'all')) - uses: ./.github/workflows/generate_bom_syft.yml + dependency-track-syft: + if: github.event_name == 'pull_request' && (contains(inputs.workflows_to_run, 'dependency-track-syft') || contains(inputs.workflows_to_run, 'all')) + uses: ./.github/workflows/dependency-track-syft.yml + secrets: + DTRACK_API_KEY: ${{ secrets.DTRACK_API_KEY }} with: runner: ${{ inputs.bom_runner }} From 7695d3a401accade17827342c298d6a0f7bf2509 Mon Sep 17 00:00:00 2001 From: idelcano Date: Wed, 11 Mar 2026 09:41:46 +0100 Subject: [PATCH 09/16] change tag name --- .github/workflows/dependency-track-syft.yml | 2 +- .github/workflows/dependency-track-yarn4.yml | 2 +- .github/workflows/master.yml | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/dependency-track-syft.yml b/.github/workflows/dependency-track-syft.yml index dca048a..b1f7a60 100644 --- a/.github/workflows/dependency-track-syft.yml +++ b/.github/workflows/dependency-track-syft.yml @@ -6,7 +6,7 @@ on: runner: description: "Specify runner for the BOM workflow" required: false - default: "dependency-track-scan" + default: "dependency-track-runner" type: string secrets: DTRACK_API_KEY: diff --git a/.github/workflows/dependency-track-yarn4.yml b/.github/workflows/dependency-track-yarn4.yml index 1d33a55..fa8a8d0 100644 --- a/.github/workflows/dependency-track-yarn4.yml +++ b/.github/workflows/dependency-track-yarn4.yml @@ -6,7 +6,7 @@ on: runner: description: "Specify runner for the BOM workflow" required: false - default: "dependency-track-scan" + default: "dependency-track-runner" type: string secrets: DTRACK_API_KEY: diff --git a/.github/workflows/master.yml b/.github/workflows/master.yml index a11b8f3..9629d81 100644 --- a/.github/workflows/master.yml +++ b/.github/workflows/master.yml @@ -60,7 +60,7 @@ jobs: runner: ${{ inputs.runner || (github.event.repository.private && 'basic-runner') || 'ubuntu-latest' }} dependency-track-yarn4: - if: github.event_name == 'pull_request' && (contains(inputs.workflows_to_run, 'dependency-track-yarn4') || contains(inputs.workflows_to_run, 'all')) + if: github.event_name == 'pull_request' && (contains(inputs.workflows_to_run, 'dependency-track-runner') || contains(inputs.workflows_to_run, 'all')) uses: ./.github/workflows/dependency-track-yarn4.yml secrets: DTRACK_API_KEY: ${{ secrets.DTRACK_API_KEY }} @@ -68,7 +68,7 @@ jobs: runner: ${{ inputs.bom_runner }} dependency-track-syft: - if: github.event_name == 'pull_request' && (contains(inputs.workflows_to_run, 'dependency-track-syft') || contains(inputs.workflows_to_run, 'all')) + if: github.event_name == 'pull_request' && (contains(inputs.workflows_to_run, 'dependency-track-runner') || contains(inputs.workflows_to_run, 'all')) uses: ./.github/workflows/dependency-track-syft.yml secrets: DTRACK_API_KEY: ${{ secrets.DTRACK_API_KEY }} From 7510d1c5eec213a5c8d2c08a99c8cf122be20c0c Mon Sep 17 00:00:00 2001 From: idelcano Date: Wed, 11 Mar 2026 09:53:21 +0100 Subject: [PATCH 10/16] fix job names --- .github/workflows/master.yml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/master.yml b/.github/workflows/master.yml index 9629d81..e05aa5b 100644 --- a/.github/workflows/master.yml +++ b/.github/workflows/master.yml @@ -41,7 +41,7 @@ jobs: if: contains(inputs.workflows_to_run, 'run-tests') || contains(inputs.workflows_to_run, 'all') uses: ./.github/workflows/app-test.yml with: - runner: ${{ inputs.runner || (github.event.repository.private && 'basic-runner') || 'ubuntu-latest' }} + runner: ${{ inputs.runner || (github.event.repository.private && 'self-runner') || 'ubuntu-latest' }} #skip if private repo codeql-scan: @@ -50,17 +50,17 @@ jobs: with: languages: ${{ inputs.languages }} build_mode: ${{ inputs.build_mode }} - runner: ${{ inputs.runner || (github.event.repository.private && 'basic-runner') || 'ubuntu-latest' }} + runner: ${{ inputs.runner || (github.event.repository.private && 'self-runner') || 'ubuntu-latest' }} build-check: if: contains(inputs.workflows_to_run, 'build-check') || contains(inputs.workflows_to_run, 'all') uses: ./.github/workflows/bundlemon-build-size.yml with: branch_name: ${{ inputs.bundlemonrc_branch_name }} - runner: ${{ inputs.runner || (github.event.repository.private && 'basic-runner') || 'ubuntu-latest' }} + runner: ${{ inputs.runner || (github.event.repository.private && 'self-runner') || 'ubuntu-latest' }} dependency-track-yarn4: - if: github.event_name == 'pull_request' && (contains(inputs.workflows_to_run, 'dependency-track-runner') || contains(inputs.workflows_to_run, 'all')) + if: github.event_name == 'pull_request' && (contains(inputs.workflows_to_run, 'dependency-track-yarn4') || contains(inputs.workflows_to_run, 'all')) uses: ./.github/workflows/dependency-track-yarn4.yml secrets: DTRACK_API_KEY: ${{ secrets.DTRACK_API_KEY }} @@ -68,7 +68,7 @@ jobs: runner: ${{ inputs.bom_runner }} dependency-track-syft: - if: github.event_name == 'pull_request' && (contains(inputs.workflows_to_run, 'dependency-track-runner') || contains(inputs.workflows_to_run, 'all')) + if: github.event_name == 'pull_request' && (contains(inputs.workflows_to_run, 'dependency-track-syft') || contains(inputs.workflows_to_run, 'all')) uses: ./.github/workflows/dependency-track-syft.yml secrets: DTRACK_API_KEY: ${{ secrets.DTRACK_API_KEY }} From a45c700ec68be212decbf8de0d423372af6c2224 Mon Sep 17 00:00:00 2001 From: idelcano Date: Wed, 11 Mar 2026 10:06:59 +0100 Subject: [PATCH 11/16] fix runner name in workflow call --- .github/workflows/master.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/master.yml b/.github/workflows/master.yml index e05aa5b..909dda1 100644 --- a/.github/workflows/master.yml +++ b/.github/workflows/master.yml @@ -25,7 +25,7 @@ on: bom_runner: description: "Specify runner for BOM workflows" required: false - default: "dependency-track-scan" + default: "dependency-track-runner" type: string workflows_to_run: description: 'Comma-separated list of workflows to run (e.g., flow1,flow2)' From 52a081999f3bf50ef41e9b88700274b36f2799a7 Mon Sep 17 00:00:00 2001 From: idelcano Date: Wed, 11 Mar 2026 12:53:02 +0100 Subject: [PATCH 12/16] Separate workflows --- .github/workflows/dependency-track-syft.yml | 2 +- .github/workflows/dependency-track-yarn4.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/dependency-track-syft.yml b/.github/workflows/dependency-track-syft.yml index b1f7a60..b5989c0 100644 --- a/.github/workflows/dependency-track-syft.yml +++ b/.github/workflows/dependency-track-syft.yml @@ -19,7 +19,7 @@ on: - synchronize concurrency: - group: dtrack-${{ github.workflow }}-${{ github.ref }} + group: dtrack-${{ github.workflow }}-syft-${{ github.ref }} cancel-in-progress: true jobs: diff --git a/.github/workflows/dependency-track-yarn4.yml b/.github/workflows/dependency-track-yarn4.yml index fa8a8d0..821fcac 100644 --- a/.github/workflows/dependency-track-yarn4.yml +++ b/.github/workflows/dependency-track-yarn4.yml @@ -19,7 +19,7 @@ on: - synchronize concurrency: - group: dtrack-${{ github.workflow }}-${{ github.ref }} + group: dtrack-${{ github.workflow }}-yarn4-${{ github.ref }} cancel-in-progress: true jobs: From 086b2f865144336c2f6825edac529bba7516e385 Mon Sep 17 00:00:00 2001 From: idelcano Date: Fri, 13 Mar 2026 08:36:41 +0100 Subject: [PATCH 13/16] fix tag --- .github/workflows/master.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/master.yml b/.github/workflows/master.yml index 909dda1..0b61722 100644 --- a/.github/workflows/master.yml +++ b/.github/workflows/master.yml @@ -41,7 +41,7 @@ jobs: if: contains(inputs.workflows_to_run, 'run-tests') || contains(inputs.workflows_to_run, 'all') uses: ./.github/workflows/app-test.yml with: - runner: ${{ inputs.runner || (github.event.repository.private && 'self-runner') || 'ubuntu-latest' }} + runner: ${{ inputs.runner || (github.event.repository.private && 'self-hosted') || 'ubuntu-latest' }} #skip if private repo codeql-scan: @@ -50,14 +50,14 @@ jobs: with: languages: ${{ inputs.languages }} build_mode: ${{ inputs.build_mode }} - runner: ${{ inputs.runner || (github.event.repository.private && 'self-runner') || 'ubuntu-latest' }} + runner: ${{ inputs.runner || (github.event.repository.private && 'self-hosted') || 'ubuntu-latest' }} build-check: if: contains(inputs.workflows_to_run, 'build-check') || contains(inputs.workflows_to_run, 'all') uses: ./.github/workflows/bundlemon-build-size.yml with: branch_name: ${{ inputs.bundlemonrc_branch_name }} - runner: ${{ inputs.runner || (github.event.repository.private && 'self-runner') || 'ubuntu-latest' }} + runner: ${{ inputs.runner || (github.event.repository.private && 'self-hosted') || 'ubuntu-latest' }} dependency-track-yarn4: if: github.event_name == 'pull_request' && (contains(inputs.workflows_to_run, 'dependency-track-yarn4') || contains(inputs.workflows_to_run, 'all')) From 76ce22c7a239e434d7e52e058957880398479c94 Mon Sep 17 00:00:00 2001 From: idelcano Date: Fri, 13 Mar 2026 08:40:47 +0100 Subject: [PATCH 14/16] changed syft latest by 1.42.2 version --- .github/workflows/dependency-track-syft.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/dependency-track-syft.yml b/.github/workflows/dependency-track-syft.yml index b5989c0..f586637 100644 --- a/.github/workflows/dependency-track-syft.yml +++ b/.github/workflows/dependency-track-syft.yml @@ -79,7 +79,7 @@ jobs: yarn install # Install syft in the container - curl -sSfL https://raw.githubusercontent.com/anchore/syft/main/install.sh \ + curl -sSfL https://raw.githubusercontent.com/anchore/syft/v1.42.2/install.sh \ | sh -s -- -b /usr/local/bin # Generate CycloneDX JSON SBOM From 429e40e98b9fb1446d22402ae1cef917cc8032a3 Mon Sep 17 00:00:00 2001 From: idelcano Date: Fri, 13 Mar 2026 09:20:14 +0100 Subject: [PATCH 15/16] force version --- .github/workflows/dependency-track-syft.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/dependency-track-syft.yml b/.github/workflows/dependency-track-syft.yml index f586637..fdcca0c 100644 --- a/.github/workflows/dependency-track-syft.yml +++ b/.github/workflows/dependency-track-syft.yml @@ -80,7 +80,7 @@ jobs: # Install syft in the container curl -sSfL https://raw.githubusercontent.com/anchore/syft/v1.42.2/install.sh \ - | sh -s -- -b /usr/local/bin + | sh -s -- -b /usr/local/bin v1.42.2 # Generate CycloneDX JSON SBOM syft . -o cyclonedx-json=bom_syft.json From af4657210a6e1c55f1bc3f0ee952778bd09e2403 Mon Sep 17 00:00:00 2001 From: idelcano Date: Fri, 13 Mar 2026 09:21:14 +0100 Subject: [PATCH 16/16] remove workflow_dispatch in master, this should be config in child repo --- .github/workflows/dependency-track-syft.yml | 5 ----- .github/workflows/dependency-track-yarn4.yml | 5 ----- 2 files changed, 10 deletions(-) diff --git a/.github/workflows/dependency-track-syft.yml b/.github/workflows/dependency-track-syft.yml index fdcca0c..f210a13 100644 --- a/.github/workflows/dependency-track-syft.yml +++ b/.github/workflows/dependency-track-syft.yml @@ -12,11 +12,6 @@ on: DTRACK_API_KEY: required: true workflow_dispatch: - pull_request: - types: - - opened - - reopened - - synchronize concurrency: group: dtrack-${{ github.workflow }}-syft-${{ github.ref }} diff --git a/.github/workflows/dependency-track-yarn4.yml b/.github/workflows/dependency-track-yarn4.yml index 821fcac..52498fe 100644 --- a/.github/workflows/dependency-track-yarn4.yml +++ b/.github/workflows/dependency-track-yarn4.yml @@ -12,11 +12,6 @@ on: DTRACK_API_KEY: required: true workflow_dispatch: - pull_request: - types: - - opened - - reopened - - synchronize concurrency: group: dtrack-${{ github.workflow }}-yarn4-${{ github.ref }}