ci(build-all): MCU-based groups, cache seeders, build infra overhaul (#27050)

Signed-off-by: Ramon Roche <mrpollo@gmail.com>
This commit is contained in:
Ramon Roche 2026-04-10 19:49:53 -06:00 committed by GitHub
parent d52fbd9707
commit c0a45cef70
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
4 changed files with 500 additions and 131 deletions

View File

@ -69,6 +69,7 @@ jobs:
runs-on: [runs-on,runner=1cpu-linux-x64,image=ubuntu24-full-x64,"run-id=${{ github.run_id }}",spot=false]
outputs:
matrix: ${{ steps.set-matrix.outputs.matrix }}
seeders: ${{ steps.set-seeders.outputs.seeders }}
timestamp: ${{ steps.set-timestamp.outputs.timestamp }}
branchname: ${{ steps.set-branch.outputs.branchname }}
steps:
@ -82,19 +83,17 @@ jobs:
restore-keys: |
${{ runner.os }}-pip-
- name: Update python packaging to avoid canonicalize_version() error
run: |
pip3 install -U packaging
- name: Install Python Dependencies
uses: py-actions/py-dependency-install@v4
with:
path: "./Tools/setup/requirements.txt"
run: pip3 install -U packaging -r ./Tools/setup/requirements.txt
- id: set-matrix
name: Generate Build Matrix
run: echo "matrix=$(./Tools/ci/generate_board_targets_json.py --group)" >> $GITHUB_OUTPUT
- id: set-seeders
name: Generate Seeder Matrix
run: echo "seeders=$(./Tools/ci/generate_board_targets_json.py --group --seeders)" >> $GITHUB_OUTPUT
- id: set-timestamp
name: Save Current Timestamp
run: echo "timestamp=$(date +"%Y%m%d%H%M%S")" >> $GITHUB_OUTPUT
@ -116,11 +115,52 @@ jobs:
echo "${{ steps.set-branch.outputs.branchname }}"
echo "$(./Tools/ci/generate_board_targets_json.py --group --verbose)"
# ===========================================================================
# CACHE SEEDER JOBS
# ===========================================================================
# Build one representative target per chip family to warm the ccache.
# Matrix jobs fall back to these caches via restore-keys when no
# group-specific cache exists yet. If any seeder fails, the build matrix
# does not start, catching common build errors early.
# ===========================================================================
seed:
name: Seed [${{ matrix.chip_family }}]
needs: group_targets
runs-on: [runs-on,"runner=8cpu-linux-${{ matrix.runner }}","image=ubuntu24-full-${{ matrix.runner }}","run-id=${{ github.run_id }}",spot=false,extras=s3-cache]
strategy:
matrix: ${{ fromJson(needs.group_targets.outputs.seeders) }}
fail-fast: false
container:
image: ${{ matrix.container }}
credentials:
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
steps:
- uses: runs-on/action@v2
- uses: actions/checkout@v6
with:
fetch-depth: 1
- name: Configure Git Safe Directory
run: git config --system --add safe.directory '*'
- uses: ./.github/actions/setup-ccache
id: ccache
with:
cache-key-prefix: ccache-${{ matrix.chip_family }}-${{ matrix.runner }}-seeder
max-size: 400M
- name: Build seed target
run: make ${{ matrix.target }}
- uses: ./.github/actions/save-ccache
if: always()
with:
cache-primary-key: ${{ steps.ccache.outputs.cache-primary-key }}
setup:
name: Build [${{ matrix.runner }}][${{ matrix.group }}]
# runs-on: ubuntu-latest
runs-on: [runs-on,"runner=8cpu-linux-${{ matrix.runner }}","image=ubuntu24-full-${{ matrix.runner }}","run-id=${{ github.run_id }}",spot=false]
needs: group_targets
runs-on: [runs-on,"runner=4cpu-linux-${{ matrix.runner }}","image=ubuntu24-full-${{ matrix.runner }}","run-id=${{ github.run_id }}",spot=false,extras=s3-cache]
needs: [group_targets, seed]
if: "!failure() && !cancelled()"
strategy:
matrix: ${{ fromJson(needs.group_targets.outputs.matrix) }}
fail-fast: false
@ -137,34 +177,29 @@ jobs:
- name: Configure Git Safe Directory
run: git config --system --add safe.directory '*'
# ccache key breakdown:
# ccache-<system os>-<system arch>-<builder group>-
# ccache-<linux>-<arm64>-<aarch64-0>-
# ccache-<linux>-<x64>-<nuttx-0>-
- name: Cache Restore from Key
id: cc_restore
- name: Cache - Restore ccache
id: ccache-restore
uses: actions/cache/restore@v5
with:
path: ~/.ccache
key: ${{ format('ccache-{0}-{1}-{2}', runner.os, matrix.runner, matrix.group) }}
key: ccache-${{ matrix.chip_family }}-${{ matrix.runner }}-${{ matrix.group }}-${{ github.ref_name }}-${{ github.sha }}
restore-keys: |
ccache-${{ runner.os }}-${{ matrix.runner }}-${{ matrix.group }}-
ccache-${{ runner.os }}-${{ matrix.runner }}-
ccache-${{ runner.os }}-${{ matrix.runner }}-
ccache-${{ runner.os }}-
ccache-
ccache-${{ matrix.chip_family }}-${{ matrix.runner }}-${{ matrix.group }}-${{ github.ref_name }}-
ccache-${{ matrix.chip_family }}-${{ matrix.runner }}-${{ matrix.group }}-${{ github.base_ref || 'main' }}-
ccache-${{ matrix.chip_family }}-${{ matrix.runner }}-${{ matrix.group }}-
ccache-${{ matrix.chip_family }}-${{ matrix.runner }}-
- name: Cache Config and Stats
- name: Cache - Configure ccache
run: |
mkdir -p ~/.ccache
echo "base_dir = ${GITHUB_WORKSPACE}" > ~/.ccache/ccache.conf
echo "compression = true" >> ~/.ccache/ccache.conf
echo "compression_level = 6" >> ~/.ccache/ccache.conf
echo "max_size = 120M" >> ~/.ccache/ccache.conf
echo "hash_dir = false" >> ~/.ccache/ccache.conf
echo "compiler_check = content" >> ~/.ccache/ccache.conf
ccache -s
ccache -z
mkdir -p ~/.ccache
echo "base_dir = ${GITHUB_WORKSPACE}" > ~/.ccache/ccache.conf
echo "compression = true" >> ~/.ccache/ccache.conf
echo "compression_level = 6" >> ~/.ccache/ccache.conf
echo "max_size = ${{ matrix.cache_size }}" >> ~/.ccache/ccache.conf
echo "hash_dir = false" >> ~/.ccache/ccache.conf
echo "compiler_check = content" >> ~/.ccache/ccache.conf
ccache -s
ccache -z
- name: Building Artifacts for [${{ matrix.targets }}]
run: |
@ -180,18 +215,10 @@ jobs:
name: px4_${{matrix.group}}_build_artifacts
path: artifacts/
- name: Cache Post Build Stats
if: always()
run: |
ccache -s
ccache -z
- name: Cache Save
if: always()
uses: actions/cache/save@v5
- uses: ./.github/actions/save-ccache
if: success()
with:
path: ~/.ccache
key: ${{ steps.cc_restore.outputs.cache-primary-key }}
cache-primary-key: ${{ steps.ccache-restore.outputs.cache-primary-key }}
# ===========================================================================
# ARTIFACT UPLOAD JOB

View File

@ -0,0 +1,71 @@
# Build All Targets CI Configuration
#
# Controls board grouping, cache sizes, runner specs, and seeder targets
# for the build_all_targets workflow. Forks can customize this file to
# adjust for their infrastructure (e.g., lower cache sizes for GitHub's
# 10GB cache limit, fewer CPU cores for smaller runners).
# Container images
containers:
default: "ghcr.io/px4/px4-dev:v1.17.0-rc2"
voxl2: "ghcr.io/px4/px4-dev-voxl2:v1.7"
# Runner specs
runners:
seeder_cpu: 8
matrix_cpu: 4
# Default ccache max-size for build groups
cache:
default_size: "400M"
# Per-chip overrides for groups with many diverse boards
chip_sizes:
stm32h7: "800M"
stm32f4: "800M"
stm32f7: "800M"
imxrt: "800M"
# Board grouping limits
grouping:
# Max targets per group, tuned for ~10 min wall-clock with warm cache
chip_split_limits:
stm32h7: 10
stm32f7: 12
stm32f4: 20
stm32f1: 39
imxrt: 12
kinetis: 14
s32k: 17
rp2040: 10
special: 10
native: 17
default_split_limit: 12
# Minimum targets for a manufacturer to get a named group
lower_limit: 3
# If last chunk has fewer targets than this, merge into previous chunk
merge_back_threshold: 5
# Labels that isolate builds into the "special" group
special_labels:
- lto
- protected
# NXP chip families are pooled under "nxp-{chip}" regardless of board directory
nxp_chip_families:
- imxrt
- kinetis
- s32k
# Seeder targets: one representative build per chip family
seeders:
stm32h7: "px4_fmu-v6x_default"
stm32f7: "px4_fmu-v5_default"
stm32f4: "px4_fmu-v4_default"
stm32f1: "px4_io-v2_default"
imxrt: "nxp_mr-tropic_default"
kinetis: "nxp_fmuk66-v3_default"
s32k: "nxp_mr-canhubk3_default"
rp2040: "raspberrypi_pico_default"
special: "px4_fmu-v6x_default"
native: "px4_sitl_default"
voxl2: "modalai_voxl2_default"

View File

@ -3,7 +3,6 @@
# Please only modify if you know what you are doing
set -e
echo "### :clock1: Build Times" >> $GITHUB_STEP_SUMMARY
targets=$1
for target in ${targets//,/ }
do
@ -14,6 +13,5 @@ do
diff=$(($stop-$start))
build_time="$(($diff /60/60))h $(($diff /60))m $(($diff % 60))s elapsed"
echo -e "\033[0;32mBuild Time: [$build_time]"
echo "* **$target** - $build_time" >> $GITHUB_STEP_SUMMARY
echo "::endgroup::"
done

View File

@ -16,6 +16,7 @@ kconf.warn_assign_override = False
kconf.warn_assign_redun = False
source_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..')
boards_dir = os.path.join(source_dir, '..', 'boards')
parser = argparse.ArgumentParser(description='Generate build targets')
@ -26,6 +27,8 @@ parser.add_argument('-p', '--pretty', dest='pretty', action='store_true',
parser.add_argument('-g', '--groups', dest='group', action='store_true',
help='Groups targets')
parser.add_argument('-f', '--filter', dest='filter', help='comma separated list of build target name prefixes to include instead of all e.g. "px4_fmu-v5_"')
parser.add_argument('-s', '--seeders', dest='seeders', action='store_true',
help='Output seeder matrix JSON (one entry per chip family)')
args = parser.parse_args()
verbose = args.verbose
@ -35,8 +38,14 @@ if args.filter:
for target in args.filter.split(','):
target_filter.append(target)
default_container = 'ghcr.io/px4/px4-dev:v1.16.0-rc1-258-g0369abd556'
voxl2_container = 'ghcr.io/px4/px4-dev-voxl2:v1.5'
# Load CI configuration from YAML
import yaml
ci_config_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'build_all_config.yml')
with open(ci_config_path) as f:
ci_config = yaml.safe_load(f)
default_container = ci_config['containers']['default']
voxl2_container = ci_config['containers']['voxl2']
build_configs = []
grouped_targets = {}
excluded_boards = ['px4_ros2', 'espressif_esp32'] # TODO: fix and enable
@ -56,6 +65,71 @@ excluded_labels = [
'uavcanv1', # TODO: fix and enable
]
# Labels that mark isolated/special builds (poor cache reuse with normal builds)
special_labels = ci_config.get('special_labels', ['lto', 'protected'])
def detect_chip_family(manufacturer_name, board_name, label):
"""Detect the chip family for a board by reading its NuttX defconfig.
Returns a chip family string used for cache grouping:
stm32h7, stm32f7, stm32f4, stm32f1, imxrt, kinetis, s32k, rp2040, native, special
"""
# Special labels get their own group regardless of chip
if label in special_labels:
return 'special'
board_path = os.path.join(boards_dir, manufacturer_name, board_name)
nsh_defconfig = os.path.join(board_path, 'nuttx-config', 'nsh', 'defconfig')
if not os.path.exists(nsh_defconfig):
# Try bootloader defconfig as fallback
bl_defconfig = os.path.join(board_path, 'nuttx-config', 'bootloader', 'defconfig')
if os.path.exists(bl_defconfig):
nsh_defconfig = bl_defconfig
else:
return 'native'
arch_chip = None
specific_chip = None
with open(nsh_defconfig) as f:
for line in f:
line = line.strip()
if line.startswith('CONFIG_ARCH_CHIP='):
arch_chip = line.split('=')[1].strip('"')
elif line.startswith('CONFIG_ARCH_CHIP_STM32F') and line.endswith('=y'):
specific_chip = line.split('=')[0].replace('CONFIG_ARCH_CHIP_', '')
if arch_chip is None:
return 'native'
# Direct matches for chips that have unique CONFIG_ARCH_CHIP values
if arch_chip == 'stm32h7':
return 'stm32h7'
elif arch_chip == 'stm32f7':
return 'stm32f7'
elif arch_chip == 'imxrt':
return 'imxrt'
elif arch_chip == 'kinetis':
return 'kinetis'
elif arch_chip.startswith('s32k'):
return 's32k'
elif arch_chip == 'rp2040':
return 'rp2040'
elif arch_chip == 'stm32':
# Disambiguate STM32 sub-families using specific chip define
if specific_chip:
if specific_chip.startswith('STM32F1'):
return 'stm32f1'
elif specific_chip.startswith('STM32F4'):
return 'stm32f4'
else:
return 'stm32f4' # Default STM32 to F4
return 'stm32f4'
else:
return 'native'
target_chip_families = {} # target_name -> chip_family mapping
github_action_config = { 'include': build_configs }
extra_args = {}
if args.pretty:
@ -66,11 +140,21 @@ def chunks(arr, size):
for i in range(0, len(arr), size):
yield arr[i:i + size]
MERGE_BACK_THRESHOLD = 5
def chunks_merged(arr, size):
"""Split array into chunks, merging the last chunk back if it's too small."""
result = list(chunks(arr, size))
if len(result) > 1 and len(result[-1]) < MERGE_BACK_THRESHOLD:
result[-2] = result[-2] + result[-1]
result.pop()
return result
def comma_targets(targets):
# turns array of targets into a comma split string
return ",".join(targets)
def process_target(px4board_file, target_name):
def process_target(px4board_file, target_name, manufacturer_name=None, board_dir_name=None, label=None):
# reads through the board file and grabs
# useful information for building
ret = None
@ -107,6 +191,16 @@ def process_target(px4board_file, target_name):
if board_name in board_container_overrides:
container = board_container_overrides[board_name]
# Detect chip family for cache grouping
chip_family = 'native'
if manufacturer_name and board_dir_name:
if platform == 'nuttx':
chip_family = detect_chip_family(manufacturer_name, board_dir_name, label or '')
elif board_name in board_container_overrides or platform in platform_container_overrides:
chip_family = 'native' # voxl2/qurt targets
else:
chip_family = 'native'
# Boards with container overrides get their own group
if board_name in board_container_overrides or platform in platform_container_overrides:
group = 'voxl2'
@ -124,7 +218,7 @@ def process_target(px4board_file, target_name):
else:
if verbose: print(f'unmatched platform: {platform}')
ret = {'target': target_name, 'container': container}
ret = {'target': target_name, 'container': container, 'chip_family': chip_family}
if(args.group):
ret['arch'] = group
@ -147,6 +241,8 @@ grouped_targets['base']['container'] = default_container
grouped_targets['base']['manufacturers'] = {}
grouped_targets['base']['manufacturers']['px4'] = []
grouped_targets['base']['manufacturers']['px4'] += metadata_targets
for mt in metadata_targets:
target_chip_families[mt] = 'native'
for manufacturer in sorted(os.scandir(os.path.join(source_dir, '../boards')), key=lambda e: e.name):
if not manufacturer.is_dir():
@ -177,7 +273,10 @@ for manufacturer in sorted(os.scandir(os.path.join(source_dir, '../boards')), ke
if label in excluded_labels:
if verbose: print(f'excluding label {label} ({target_name})')
continue
target = process_target(files.path, target_name)
target = process_target(files.path, target_name,
manufacturer_name=manufacturer.name,
board_dir_name=board.name,
label=label)
if (args.group and target is not None):
if (target['arch'] not in grouped_targets):
grouped_targets[target['arch']] = {}
@ -186,6 +285,7 @@ for manufacturer in sorted(os.scandir(os.path.join(source_dir, '../boards')), ke
if(manufacturer.name not in grouped_targets[target['arch']]['manufacturers']):
grouped_targets[target['arch']]['manufacturers'][manufacturer.name] = []
grouped_targets[target['arch']]['manufacturers'][manufacturer.name].append(target_name)
target_chip_families[target_name] = target['chip_family']
if target is not None:
build_configs.append(target)
@ -246,6 +346,9 @@ for manufacturer in sorted(os.scandir(os.path.join(source_dir, '../boards')), ke
if manufacturer.name not in grouped_targets[group]['manufacturers']:
grouped_targets[group]['manufacturers'][manufacturer.name] = []
grouped_targets[group]['manufacturers'][manufacturer.name].append(deb_target)
# Inherit chip_family from the default target
default_chip = target_chip_families.get(default_target, 'native')
target_chip_families[deb_target] = default_chip
build_configs.append(target_entry)
if(verbose):
@ -261,109 +364,227 @@ if(verbose):
print("===================")
if (args.group):
# if we are using this script for grouping builds
# we loop trough the manufacturers list and split their targets
# if a manufacturer has more than a LIMIT of boards then we split that
# into sub groups such as "arch-manufacturer name-index"
# example:
# nuttx-px4-0
# nuttx-px4-1
# nuttx-px4-2
# nuttx-ark-0
# nuttx-ark-1
# if the manufacturer doesn't have more targets than LIMIT then we add
# them to a generic group with the following structure "arch-index"
# example:
# nuttx-0
# nuttx-1
# Group targets by chip family for better ccache reuse.
# Targets sharing the same MCU family (e.g. stm32h7) benefit from
# a shared ccache seed since they compile the same NuttX kernel and HAL.
#
# Grouping strategy:
# 1. Collect all targets per (arch, chip_family, manufacturer)
# 2. Within each chip_family, large manufacturers get their own groups
# named "{manufacturer}-{chip_family}[-N]"
# 3. Small manufacturers are merged into "misc-{chip_family}[-N]"
# 4. Special groups: "special" (lto/protected/allyes), "io" (stm32f1),
# "voxl2-0" (unchanged)
# 5. Non-NuttX groups: "base-N", "aarch64-N", "armhf-N" (unchanged)
final_groups = []
last_man = ''
last_arch = ''
SPLIT_LIMIT = 10
LOWER_LIMIT = 5
# Load grouping and cache config
grouping_config = ci_config.get('grouping', {})
CHIP_SPLIT_LIMITS = grouping_config.get('chip_split_limits', {})
DEFAULT_SPLIT_LIMIT = grouping_config.get('default_split_limit', 12)
LOWER_LIMIT = grouping_config.get('lower_limit', 3)
cache_config = ci_config.get('cache', {})
DEFAULT_CACHE_SIZE = cache_config.get('default_size', '400M')
CHIP_CACHE_SIZES = cache_config.get('chip_sizes', {})
if(verbose):
print(f'=:Architectures: [{grouped_targets.keys()}]')
for arch in grouped_targets:
runner = 'x64' if arch in ('nuttx', 'voxl2') else 'arm64'
runner = 'x64'
# armhf and aarch64 Linux boards need the arm64 container image
# which ships the arm-linux-gnueabihf and aarch64-linux-gnu cross compilers
# (the x64 container image does not include them)
if arch in ('armhf', 'aarch64'):
runner = 'arm64'
if(verbose):
print(f'=:Processing: [{arch}]')
temp_group = []
for man in grouped_targets[arch]['manufacturers']:
if(verbose):
print(f'=:Processing: [{arch}][{man}]')
man_len = len(grouped_targets[arch]['manufacturers'][man])
if(man_len > LOWER_LIMIT and man_len < (SPLIT_LIMIT + 1)):
# Manufacturers can have their own group
if arch == 'nuttx':
# Re-bucket NuttX targets by chip_family then manufacturer
chip_man_buckets = {} # (chip_family, manufacturer) -> [target_names]
for man in grouped_targets[arch]['manufacturers']:
for target in grouped_targets[arch]['manufacturers'][man]:
chip = target_chip_families.get(target, 'native')
key = (chip, man)
if key not in chip_man_buckets:
chip_man_buckets[key] = []
chip_man_buckets[key].append(target)
# Collect all chip families present
chip_families_seen = sorted(set(k[0] for k in chip_man_buckets.keys()))
for chip in chip_families_seen:
SPLIT_LIMIT = CHIP_SPLIT_LIMITS.get(chip, DEFAULT_SPLIT_LIMIT)
# Special naming for certain chip families
if chip == 'special':
chip_label = 'special'
elif chip == 'stm32f1':
chip_label = 'io'
elif chip == 'rp2040':
chip_label = 'special' # rp2040 goes into special group
else:
chip_label = chip
# Gather all (manufacturer -> targets) for this chip family
# NXP chip families (imxrt, kinetis, s32k) pool all manufacturers
# under "nxp" since all boards use NXP silicon regardless of
# which directory they live in (e.g., px4/fmu-v6xrt is imxrt).
nxp_chips = tuple(ci_config.get('nxp_chip_families', ['imxrt', 'kinetis', 's32k']))
man_targets = {}
for (c, m), targets in chip_man_buckets.items():
if c == chip:
man_key = 'nxp' if chip in nxp_chips else m
if man_key not in man_targets:
man_targets[man_key] = []
man_targets[man_key].extend(targets)
# Merge rp2040 targets into a flat list for the special group
if chip in ('special', 'rp2040'):
all_targets = []
for m in sorted(man_targets.keys()):
all_targets.extend(man_targets[m])
# These get added to the special bucket below
# We'll handle after the chip loop
continue
if(verbose):
print(f'=:Processing: [{arch}][{man}][{man_len}]==Manufacturers can have their own group')
group_name = arch + "-" + man
targets = comma_targets(grouped_targets[arch]['manufacturers'][man])
final_groups.append({
"container": grouped_targets[arch]['container'],
"targets": targets,
"arch": arch,
"runner": runner,
"group": group_name,
"len": len(grouped_targets[arch]['manufacturers'][man])
})
elif(man_len >= (SPLIT_LIMIT + 1)):
# Split big man groups into subgroups
# example: Pixhawk
if(verbose):
print(f'=:Processing: [{arch}][{man}][{man_len}]==Manufacturers has multiple own groups')
chunk_limit = SPLIT_LIMIT
print(f'=:Processing chip_family: [{chip}] ({chip_label})')
# Split into large-manufacturer groups and misc groups
# For NXP-exclusive chip families, always use the nxp name
# regardless of target count (there's no other manufacturer to pool with)
force_named = chip in nxp_chips
temp_group = [] # small manufacturers pooled here
for man in sorted(man_targets.keys()):
man_len = len(man_targets[man])
if (force_named or man_len > LOWER_LIMIT) and man_len <= SPLIT_LIMIT:
group_name = f"{man}-{chip_label}"
if(verbose):
print(f'=: [{man}][{man_len}] -> {group_name}')
final_groups.append({
"container": grouped_targets[arch]['container'],
"targets": comma_targets(man_targets[man]),
"arch": arch,
"chip_family": chip,
"runner": runner,
"group": group_name,
"len": man_len,
})
elif man_len > SPLIT_LIMIT:
chunk_counter = 0
for chunk in chunks_merged(man_targets[man], SPLIT_LIMIT):
group_name = f"{man}-{chip_label}-{chunk_counter}"
if(verbose):
print(f'=: [{man}][{man_len}] -> {group_name} ({len(chunk)})')
final_groups.append({
"container": grouped_targets[arch]['container'],
"targets": comma_targets(chunk),
"arch": arch,
"chip_family": chip,
"runner": runner,
"group": group_name,
"len": len(chunk),
})
chunk_counter += 1
else:
if(verbose):
print(f'=: [{man}][{man_len}] -> misc pool')
temp_group.extend(man_targets[man])
# Emit misc groups for small manufacturers
if temp_group:
misc_chunks = chunks_merged(temp_group, SPLIT_LIMIT)
num_misc_chunks = len(misc_chunks)
chunk_counter = 0
for chunk in misc_chunks:
if num_misc_chunks == 1:
group_name = f"misc-{chip_label}"
else:
group_name = f"misc-{chip_label}-{chunk_counter}"
if(verbose):
print(f'=: [misc][{len(chunk)}] -> {group_name}')
final_groups.append({
"container": grouped_targets[arch]['container'],
"targets": comma_targets(chunk),
"arch": arch,
"chip_family": chip,
"runner": runner,
"group": group_name,
"len": len(chunk),
})
chunk_counter += 1
# Now handle special + rp2040 targets
SPLIT_LIMIT = CHIP_SPLIT_LIMITS.get('special', DEFAULT_SPLIT_LIMIT)
special_targets = []
for (c, m), targets in chip_man_buckets.items():
if c in ('special', 'rp2040'):
special_targets.extend(targets)
if special_targets:
chunk_counter = 0
for chunk in chunks(grouped_targets[arch]['manufacturers'][man], chunk_limit):
group_name = arch + "-" + man + "-" + str(chunk_counter)
targets = comma_targets(chunk)
for chunk in chunks_merged(special_targets, SPLIT_LIMIT):
if len(special_targets) <= SPLIT_LIMIT:
group_name = 'special'
else:
group_name = f'special-{chunk_counter}'
if(verbose):
print(f'=: [special][{len(chunk)}] -> {group_name}')
final_groups.append({
"container": grouped_targets[arch]['container'],
"targets": targets,
"targets": comma_targets(chunk),
"arch": arch,
"chip_family": "special",
"runner": runner,
"group": group_name,
"len": len(chunk),
})
chunk_counter += 1
else:
if(verbose):
print(f'=:Processing: [{arch}][{man}][{man_len}]==Manufacturers too small group with others')
temp_group.extend(grouped_targets[arch]['manufacturers'][man])
temp_len = len(temp_group)
chunk_counter = 0
if(temp_len > 0 and temp_len < (SPLIT_LIMIT + 1)):
if(verbose):
print(f'=:Processing: [{arch}][orphan][{temp_len}]==Leftover arch can have their own group')
group_name = arch + "-" + str(chunk_counter)
targets = comma_targets(temp_group)
final_groups.append({
"container": grouped_targets[arch]['container'],
"targets": targets,
"arch": arch,
"runner": runner,
"group": group_name,
"len": temp_len
})
elif(temp_len >= (SPLIT_LIMIT + 1)):
# Split big man groups into subgroups
# example: Pixhawk
if(verbose):
print(f'=:Processing: [{arch}][orphan][{temp_len}]==Leftover arch can has multpile group')
chunk_limit = SPLIT_LIMIT
chunk_counter = 0
for chunk in chunks(temp_group, chunk_limit):
group_name = arch + "-" + str(chunk_counter)
targets = comma_targets(chunk)
elif arch == 'voxl2':
# VOXL2 stays as its own group
all_targets = []
for man in grouped_targets[arch]['manufacturers']:
all_targets.extend(grouped_targets[arch]['manufacturers'][man])
if all_targets:
final_groups.append({
"container": grouped_targets[arch]['container'],
"targets": targets,
"targets": comma_targets(all_targets),
"arch": arch,
"chip_family": "native",
"runner": runner,
"group": group_name,
"len": len(chunk),
"group": "voxl2-0",
"len": len(all_targets),
})
chunk_counter += 1
else:
# Non-NuttX groups (base, aarch64, armhf) - keep simple grouping
SPLIT_LIMIT = CHIP_SPLIT_LIMITS.get('native', DEFAULT_SPLIT_LIMIT)
all_targets = []
for man in grouped_targets[arch]['manufacturers']:
all_targets.extend(grouped_targets[arch]['manufacturers'][man])
if all_targets:
chunk_counter = 0
for chunk in chunks_merged(all_targets, SPLIT_LIMIT):
if len(all_targets) <= SPLIT_LIMIT:
group_name = f"{arch}-0"
else:
group_name = f"{arch}-{chunk_counter}"
final_groups.append({
"container": grouped_targets[arch]['container'],
"targets": comma_targets(chunk),
"arch": arch,
"chip_family": "native",
"runner": runner,
"group": group_name,
"len": len(chunk),
})
chunk_counter += 1
# Add cache_size to each group based on chip family
for g in final_groups:
g['cache_size'] = CHIP_CACHE_SIZES.get(g['chip_family'], DEFAULT_CACHE_SIZE)
if(verbose):
import pprint
print("================")
@ -375,6 +596,58 @@ if (args.group):
print("= JSON output =")
print("===============")
print(json.dumps({ "include": final_groups }, **extra_args))
if args.seeders:
# Generate one seeder entry per chip family present in the groups.
# Each seeder builds a representative target to warm the ccache for
# all groups sharing that chip family.
seeder_targets = ci_config.get('seeders', {})
seeder_containers = {
'native': default_container,
}
# Determine which chip families actually have groups
active_families = set()
for g in final_groups:
cf = g['chip_family']
active_families.add(cf)
# voxl2 gets its own seeder with a different container
if g['group'].startswith('voxl2'):
active_families.add('voxl2')
seeders = []
for cf in sorted(active_families):
if cf == 'special':
continue # special group seeds from stm32h7
if cf == 'voxl2':
seeders.append({
'chip_family': 'voxl2',
'target': 'modalai_voxl2_default',
'container': voxl2_container,
'runner': 'x64',
})
elif cf == 'native':
# One seeder per runner arch that has native groups (exclude voxl2
# which has its own seeder with a different container)
native_runners = set()
for g in final_groups:
if g['chip_family'] == 'native' and not g['group'].startswith('voxl2'):
native_runners.add(g['runner'])
for r in sorted(native_runners):
seeders.append({
'chip_family': 'native',
'target': seeder_targets['native'],
'container': default_container,
'runner': r,
})
else:
seeders.append({
'chip_family': cf,
'target': seeder_targets.get(cf, seeder_targets['stm32h7']),
'container': seeder_containers.get(cf, default_container),
'runner': 'x64',
})
print(json.dumps({ "include": seeders }, **extra_args))
else:
print(json.dumps({ "include": final_groups }, **extra_args))
else:
print(json.dumps(github_action_config, **extra_args))