Compare commits
1 Commits
bead/nixos
...
bead/nixos
| Author | SHA1 | Date | |
|---|---|---|---|
| 352c89756f |
@@ -16,5 +16,3 @@ jobs:
|
|||||||
|
|
||||||
- name: Check flake
|
- name: Check flake
|
||||||
run: nix flake check
|
run: nix flake check
|
||||||
env:
|
|
||||||
NIX_CONFIG: "access-tokens = git.johnogle.info=${{ secrets.GITEA_ACCESS_TOKEN }}"
|
|
||||||
|
|||||||
@@ -4,7 +4,6 @@ with lib;
|
|||||||
|
|
||||||
let
|
let
|
||||||
cfg = config.home.roles.communication;
|
cfg = config.home.roles.communication;
|
||||||
isLinux = pkgs.stdenv.isLinux;
|
|
||||||
in
|
in
|
||||||
{
|
{
|
||||||
options.home.roles.communication = {
|
options.home.roles.communication = {
|
||||||
@@ -13,14 +12,14 @@ in
|
|||||||
|
|
||||||
config = mkIf cfg.enable {
|
config = mkIf cfg.enable {
|
||||||
home.packages = [
|
home.packages = [
|
||||||
# For logging back into google chat (cross-platform)
|
# Communication apps
|
||||||
globalInputs.google-cookie-retrieval.packages.${system}.default
|
|
||||||
] ++ optionals isLinux [
|
|
||||||
# Linux-only communication apps (Electron apps don't build on Darwin)
|
|
||||||
pkgs.element-desktop
|
pkgs.element-desktop
|
||||||
# Re-enabled in 25.11 after security issues were resolved
|
# Re-enabled in 25.11 after security issues were resolved
|
||||||
pkgs.fluffychat
|
pkgs.fluffychat
|
||||||
pkgs.nextcloud-talk-desktop
|
pkgs.nextcloud-talk-desktop
|
||||||
|
|
||||||
|
# For logging back into google chat
|
||||||
|
globalInputs.google-cookie-retrieval.packages.${system}.default
|
||||||
];
|
];
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,7 +4,6 @@ with lib;
|
|||||||
|
|
||||||
let
|
let
|
||||||
cfg = config.home.roles.desktop;
|
cfg = config.home.roles.desktop;
|
||||||
isLinux = pkgs.stdenv.isLinux;
|
|
||||||
in
|
in
|
||||||
{
|
{
|
||||||
options.home.roles.desktop = {
|
options.home.roles.desktop = {
|
||||||
@@ -13,29 +12,27 @@ in
|
|||||||
|
|
||||||
config = mkIf cfg.enable {
|
config = mkIf cfg.enable {
|
||||||
home.packages = with pkgs; [
|
home.packages = with pkgs; [
|
||||||
# Cross-platform desktop applications
|
# Desktop applications
|
||||||
bitwarden-desktop
|
bitwarden-desktop
|
||||||
keepassxc
|
|
||||||
xdg-utils # XDG utilities for opening files/URLs with default applications
|
|
||||||
] ++ optionals isLinux [
|
|
||||||
# Linux-only desktop applications
|
|
||||||
dunst
|
dunst
|
||||||
|
keepassxc
|
||||||
unstable.ghostty
|
unstable.ghostty
|
||||||
|
|
||||||
# Linux-only desktop utilities
|
# Desktop utilities
|
||||||
feh # Image viewer and wallpaper setter for X11
|
feh # Image viewer and wallpaper setter for X11
|
||||||
rofi # Application launcher for X11
|
rofi # Application launcher for X11
|
||||||
solaar # Logitech management software
|
solaar # Logitech management software
|
||||||
waybar
|
waybar
|
||||||
wofi # Application launcher for Wayland
|
wofi # Application launcher for Wayland
|
||||||
|
xdg-utils # XDG utilities for opening files/URLs with default applications
|
||||||
|
|
||||||
# Linux-only system utilities with GUI components
|
# System utilities with GUI components
|
||||||
(snapcast.override { pulseaudioSupport = true; })
|
(snapcast.override { pulseaudioSupport = true; })
|
||||||
|
|
||||||
# KDE tiling window management (Linux-only)
|
# KDE tiling window management
|
||||||
kdePackages.krohnkite # Dynamic tiling extension for KWin 6
|
kdePackages.krohnkite # Dynamic tiling extension for KWin 6
|
||||||
|
|
||||||
# KDE PIM applications for email, calendar, and contacts (Linux-only)
|
# KDE PIM applications for email, calendar, and contacts
|
||||||
kdePackages.kmail
|
kdePackages.kmail
|
||||||
kdePackages.kmail-account-wizard
|
kdePackages.kmail-account-wizard
|
||||||
kdePackages.kmailtransport
|
kdePackages.kmailtransport
|
||||||
@@ -43,33 +40,33 @@ in
|
|||||||
kdePackages.kaddressbook
|
kdePackages.kaddressbook
|
||||||
kdePackages.kontact
|
kdePackages.kontact
|
||||||
|
|
||||||
# KDE System components needed for proper integration (Linux-only)
|
# KDE System components needed for proper integration
|
||||||
kdePackages.kded
|
kdePackages.kded
|
||||||
kdePackages.systemsettings
|
kdePackages.systemsettings
|
||||||
kdePackages.kmenuedit
|
kdePackages.kmenuedit
|
||||||
|
|
||||||
# Desktop menu support (Linux-only)
|
# Desktop menu support
|
||||||
kdePackages.plasma-desktop # Contains applications.menu
|
kdePackages.plasma-desktop # Contains applications.menu
|
||||||
|
|
||||||
# KDE Online Accounts support (Linux-only)
|
# KDE Online Accounts support
|
||||||
kdePackages.kaccounts-integration
|
kdePackages.kaccounts-integration
|
||||||
kdePackages.kaccounts-providers
|
kdePackages.kaccounts-providers
|
||||||
kdePackages.signond
|
kdePackages.signond
|
||||||
|
|
||||||
# KDE Mapping (Linux-only)
|
# KDE Mapping
|
||||||
kdePackages.marble # Virtual globe and world atlas
|
kdePackages.marble # Virtual globe and world atlas
|
||||||
|
|
||||||
# KDE Productivity (Linux-only)
|
# KDE Productivity
|
||||||
kdePackages.kate # Advanced text editor with syntax highlighting
|
kdePackages.kate # Advanced text editor with syntax highlighting
|
||||||
kdePackages.okular # Universal document viewer (PDF, ePub, etc.)
|
kdePackages.okular # Universal document viewer (PDF, ePub, etc.)
|
||||||
kdePackages.spectacle # Screenshot capture utility
|
kdePackages.spectacle # Screenshot capture utility
|
||||||
kdePackages.filelight # Visual disk usage analyzer
|
kdePackages.filelight # Visual disk usage analyzer
|
||||||
|
|
||||||
# KDE Multimedia (Linux-only)
|
# KDE Multimedia
|
||||||
kdePackages.gwenview # Image viewer and basic editor
|
kdePackages.gwenview # Image viewer and basic editor
|
||||||
kdePackages.elisa # Music player
|
kdePackages.elisa # Music player
|
||||||
|
|
||||||
# KDE System Utilities (Linux-only)
|
# KDE System Utilities
|
||||||
kdePackages.ark # Archive manager (zip, tar, 7z, etc.)
|
kdePackages.ark # Archive manager (zip, tar, 7z, etc.)
|
||||||
kdePackages.yakuake # Drop-down terminal emulator
|
kdePackages.yakuake # Drop-down terminal emulator
|
||||||
];
|
];
|
||||||
@@ -80,56 +77,51 @@ in
|
|||||||
|
|
||||||
programs.spotify-player.enable = true;
|
programs.spotify-player.enable = true;
|
||||||
|
|
||||||
# Linux-only: GNOME keyring service
|
services.gnome-keyring = {
|
||||||
services.gnome-keyring = mkIf isLinux {
|
|
||||||
enable = true;
|
enable = true;
|
||||||
};
|
};
|
||||||
|
|
||||||
# Linux-only: systemd user services for rbw vault unlock
|
# rbw vault unlock on login and resume from suspend
|
||||||
systemd.user.services = mkIf isLinux {
|
systemd.user.services.rbw-unlock-on-login = {
|
||||||
# rbw vault unlock on login
|
Unit = {
|
||||||
rbw-unlock-on-login = {
|
Description = "Unlock rbw vault at login";
|
||||||
Unit = {
|
After = [ "graphical-session.target" ];
|
||||||
Description = "Unlock rbw vault at login";
|
|
||||||
After = [ "graphical-session.target" ];
|
|
||||||
};
|
|
||||||
Service = {
|
|
||||||
Type = "oneshot";
|
|
||||||
ExecStart = "${pkgs.rbw}/bin/rbw unlock";
|
|
||||||
Environment = "RBW_AGENT=${pkgs.rbw}/bin/rbw-agent";
|
|
||||||
# KillMode = "process" prevents systemd from killing the rbw-agent daemon
|
|
||||||
# when this oneshot service completes. The agent is spawned by rbw unlock
|
|
||||||
# and needs to persist after the service exits.
|
|
||||||
KillMode = "process";
|
|
||||||
};
|
|
||||||
Install = {
|
|
||||||
WantedBy = [ "graphical-session.target" ];
|
|
||||||
};
|
|
||||||
};
|
};
|
||||||
|
Service = {
|
||||||
# rbw vault unlock on resume from suspend
|
Type = "oneshot";
|
||||||
rbw-unlock-on-resume = {
|
ExecStart = "${pkgs.rbw}/bin/rbw unlock";
|
||||||
Unit = {
|
Environment = "RBW_AGENT=${pkgs.rbw}/bin/rbw-agent";
|
||||||
Description = "Unlock rbw vault after resume from suspend";
|
# KillMode = "process" prevents systemd from killing the rbw-agent daemon
|
||||||
After = [ "suspend.target" ];
|
# when this oneshot service completes. The agent is spawned by rbw unlock
|
||||||
};
|
# and needs to persist after the service exits.
|
||||||
Service = {
|
KillMode = "process";
|
||||||
Type = "oneshot";
|
};
|
||||||
ExecStart = "${pkgs.rbw}/bin/rbw unlock";
|
Install = {
|
||||||
Environment = "RBW_AGENT=${pkgs.rbw}/bin/rbw-agent";
|
WantedBy = [ "graphical-session.target" ];
|
||||||
# KillMode = "process" prevents systemd from killing the rbw-agent daemon
|
|
||||||
# when this oneshot service completes. The agent is spawned by rbw unlock
|
|
||||||
# and needs to persist after the service exits.
|
|
||||||
KillMode = "process";
|
|
||||||
};
|
|
||||||
Install = {
|
|
||||||
WantedBy = [ "suspend.target" ];
|
|
||||||
};
|
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
# Linux-only: KDE environment variables for proper integration
|
systemd.user.services.rbw-unlock-on-resume = {
|
||||||
home.sessionVariables = mkIf isLinux {
|
Unit = {
|
||||||
|
Description = "Unlock rbw vault after resume from suspend";
|
||||||
|
After = [ "suspend.target" ];
|
||||||
|
};
|
||||||
|
Service = {
|
||||||
|
Type = "oneshot";
|
||||||
|
ExecStart = "${pkgs.rbw}/bin/rbw unlock";
|
||||||
|
Environment = "RBW_AGENT=${pkgs.rbw}/bin/rbw-agent";
|
||||||
|
# KillMode = "process" prevents systemd from killing the rbw-agent daemon
|
||||||
|
# when this oneshot service completes. The agent is spawned by rbw unlock
|
||||||
|
# and needs to persist after the service exits.
|
||||||
|
KillMode = "process";
|
||||||
|
};
|
||||||
|
Install = {
|
||||||
|
WantedBy = [ "suspend.target" ];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
# KDE environment variables for proper integration
|
||||||
|
home.sessionVariables = {
|
||||||
QT_QPA_PLATFORMTHEME = "kde";
|
QT_QPA_PLATFORMTHEME = "kde";
|
||||||
KDE_SESSION_VERSION = "6";
|
KDE_SESSION_VERSION = "6";
|
||||||
};
|
};
|
||||||
@@ -149,14 +141,13 @@ in
|
|||||||
"x-scheme-handler/https" = "firefox.desktop";
|
"x-scheme-handler/https" = "firefox.desktop";
|
||||||
};
|
};
|
||||||
defaultApplications = {
|
defaultApplications = {
|
||||||
# Web browsers (cross-platform)
|
# Web browsers
|
||||||
"text/html" = "firefox.desktop";
|
"text/html" = "firefox.desktop";
|
||||||
"x-scheme-handler/http" = "firefox.desktop";
|
"x-scheme-handler/http" = "firefox.desktop";
|
||||||
"x-scheme-handler/https" = "firefox.desktop";
|
"x-scheme-handler/https" = "firefox.desktop";
|
||||||
"x-scheme-handler/about" = "firefox.desktop";
|
"x-scheme-handler/about" = "firefox.desktop";
|
||||||
"x-scheme-handler/unknown" = "firefox.desktop";
|
"x-scheme-handler/unknown" = "firefox.desktop";
|
||||||
} // optionalAttrs isLinux {
|
|
||||||
# Linux-only: KDE application associations
|
|
||||||
# Documents
|
# Documents
|
||||||
"application/pdf" = "okular.desktop";
|
"application/pdf" = "okular.desktop";
|
||||||
"text/plain" = "kate.desktop";
|
"text/plain" = "kate.desktop";
|
||||||
@@ -199,11 +190,9 @@ in
|
|||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
# Linux-only: Fix for KDE applications.menu file issue on Plasma 6
|
# Fix for KDE applications.menu file issue on Plasma 6
|
||||||
# KDE still looks for applications.menu but Plasma 6 renamed it to plasma-applications.menu
|
# KDE still looks for applications.menu but Plasma 6 renamed it to plasma-applications.menu
|
||||||
xdg.configFile."menus/applications.menu" = mkIf isLinux {
|
xdg.configFile."menus/applications.menu".source = "${pkgs.kdePackages.plasma-workspace}/etc/xdg/menus/plasma-applications.menu";
|
||||||
source = "${pkgs.kdePackages.plasma-workspace}/etc/xdg/menus/plasma-applications.menu";
|
|
||||||
};
|
|
||||||
|
|
||||||
# Note: modules must be imported at top-level home config
|
# Note: modules must be imported at top-level home config
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -1,317 +0,0 @@
|
|||||||
---
|
|
||||||
description: Batch research and planning for multiple beads with interactive question review
|
|
||||||
model: opus
|
|
||||||
---
|
|
||||||
|
|
||||||
# Beads Batch Research+Plan
|
|
||||||
|
|
||||||
This skill automates the common workflow of:
|
|
||||||
1. Running /beads_research in parallel for multiple beads
|
|
||||||
2. Presenting open questions interactively for user input (bead-by-bead)
|
|
||||||
3. Running /beads_plan for all researched beads (plus any spawned from splits)
|
|
||||||
|
|
||||||
## When to Use
|
|
||||||
|
|
||||||
- You have multiple beads ready for work
|
|
||||||
- You want to research and plan them efficiently before implementation
|
|
||||||
- You prefer to batch your question-answering rather than context-switching between skills
|
|
||||||
|
|
||||||
## Phase 1: Selection
|
|
||||||
|
|
||||||
1. **Get ready beads**: Run `bd ready --limit=20` to list beads with no blockers
|
|
||||||
|
|
||||||
2. **Filter already-researched beads**:
|
|
||||||
For each ready bead, check if it already has research:
|
|
||||||
```bash
|
|
||||||
ls thoughts/beads-{bead-id}/research.md 2>/dev/null
|
|
||||||
```
|
|
||||||
|
|
||||||
Categorize beads:
|
|
||||||
- **Needs research**: No `research.md` exists
|
|
||||||
- **Has research, needs plan**: `research.md` exists but no `plan.md`
|
|
||||||
- **Already planned**: Both `research.md` and `plan.md` exist
|
|
||||||
|
|
||||||
3. **Present selection**:
|
|
||||||
```
|
|
||||||
Ready beads available for batch research+plan:
|
|
||||||
|
|
||||||
NEEDS RESEARCH:
|
|
||||||
- {bead-id}: {title} (type: {type})
|
|
||||||
- ...
|
|
||||||
|
|
||||||
HAS RESEARCH (plan only):
|
|
||||||
- {bead-id}: {title} (type: {type})
|
|
||||||
- ...
|
|
||||||
|
|
||||||
ALREADY PLANNED (skip):
|
|
||||||
- {bead-id}: {title}
|
|
||||||
|
|
||||||
Which beads would you like to process?
|
|
||||||
```
|
|
||||||
|
|
||||||
4. **Use AskUserQuestion** with `multiSelect: true`:
|
|
||||||
- Include bead ID and title for each option
|
|
||||||
- Separate options by category
|
|
||||||
- Allow selection across categories
|
|
||||||
|
|
||||||
## Phase 2: Parallel Research
|
|
||||||
|
|
||||||
For each selected bead that NEEDS RESEARCH, launch a research subagent.
|
|
||||||
|
|
||||||
### Subagent Instructions Template
|
|
||||||
|
|
||||||
```
|
|
||||||
Research bead [BEAD_ID]: [BEAD_TITLE]
|
|
||||||
|
|
||||||
1. **Load bead context**:
|
|
||||||
```bash
|
|
||||||
bd show [BEAD_ID]
|
|
||||||
```
|
|
||||||
|
|
||||||
2. **Create artifact directory**:
|
|
||||||
```bash
|
|
||||||
mkdir -p thoughts/beads-[BEAD_ID]
|
|
||||||
```
|
|
||||||
|
|
||||||
3. **Conduct research** following beads_research.md patterns:
|
|
||||||
- Analyze and decompose the research question
|
|
||||||
- Spawn parallel sub-agent tasks (codebase-locator, codebase-analyzer, etc.)
|
|
||||||
- Synthesize findings
|
|
||||||
|
|
||||||
4. **Write research document** to `thoughts/beads-[BEAD_ID]/research.md`:
|
|
||||||
- Include frontmatter with metadata
|
|
||||||
- Document findings with file:line references
|
|
||||||
- **CRITICAL**: Include "## Open Questions" section listing any unresolved items
|
|
||||||
|
|
||||||
5. **Return summary**:
|
|
||||||
- Research status (complete/partial)
|
|
||||||
- Number of open questions
|
|
||||||
- Key findings summary (2-3 bullet points)
|
|
||||||
- List of open questions verbatim
|
|
||||||
```
|
|
||||||
|
|
||||||
### Launching Subagents
|
|
||||||
|
|
||||||
Use `subagent_type: "opus"` for research subagents (matches beads_research model setting).
|
|
||||||
|
|
||||||
Launch ALL research subagents in a single message for parallel execution:
|
|
||||||
```
|
|
||||||
<Task calls for each selected bead needing research - all in one message>
|
|
||||||
```
|
|
||||||
|
|
||||||
### Collecting Results
|
|
||||||
|
|
||||||
Wait for ALL research subagents to complete. Collect:
|
|
||||||
- Bead ID
|
|
||||||
- Research status
|
|
||||||
- Open questions list
|
|
||||||
- Any errors encountered
|
|
||||||
|
|
||||||
## Phase 3: Interactive Question Review
|
|
||||||
|
|
||||||
Present each bead's open questions sequentially for user input.
|
|
||||||
|
|
||||||
### For Each Bead (in order):
|
|
||||||
|
|
||||||
1. **Present research summary**:
|
|
||||||
```
|
|
||||||
## Bead {N}/{total}: {bead-id} - {title}
|
|
||||||
|
|
||||||
Research complete. Key findings:
|
|
||||||
- {finding 1}
|
|
||||||
- {finding 2}
|
|
||||||
|
|
||||||
Open questions requiring your input:
|
|
||||||
1. {question 1}
|
|
||||||
2. {question 2}
|
|
||||||
|
|
||||||
Additionally:
|
|
||||||
- Should this bead be split into multiple beads? (y/n)
|
|
||||||
- If split, describe the split:
|
|
||||||
```
|
|
||||||
|
|
||||||
2. **Collect user responses**:
|
|
||||||
- Answers to open questions
|
|
||||||
- Split decision (yes/no)
|
|
||||||
- If split: new bead titles and how to divide the work
|
|
||||||
|
|
||||||
3. **Handle splits**:
|
|
||||||
If user indicates a split:
|
|
||||||
```bash
|
|
||||||
# Create new beads for split work
|
|
||||||
bd create --title="{split title 1}" --type={type} --priority={priority} \
|
|
||||||
--description="{description based on user input}"
|
|
||||||
|
|
||||||
# Update original bead if scope narrowed
|
|
||||||
bd update {original-bead-id} --description="{updated description}"
|
|
||||||
```
|
|
||||||
|
|
||||||
Track new bead IDs for inclusion in planning phase.
|
|
||||||
|
|
||||||
4. **Update research document**:
|
|
||||||
Append user answers to `thoughts/beads-{id}/research.md`:
|
|
||||||
```markdown
|
|
||||||
## User Clarifications [{timestamp}]
|
|
||||||
|
|
||||||
Q: {question 1}
|
|
||||||
A: {user answer 1}
|
|
||||||
|
|
||||||
Q: {question 2}
|
|
||||||
A: {user answer 2}
|
|
||||||
|
|
||||||
## Bead Splits
|
|
||||||
{If split: description of split and new bead IDs}
|
|
||||||
```
|
|
||||||
|
|
||||||
### Progress Tracking
|
|
||||||
|
|
||||||
After each bead's questions are answered, confirm before moving to next:
|
|
||||||
```
|
|
||||||
Questions answered for {bead-id}. {N-1} beads remaining.
|
|
||||||
Continue to next bead? (y/n)
|
|
||||||
```
|
|
||||||
|
|
||||||
### Beads with No Questions
|
|
||||||
|
|
||||||
If a bead's research had no open questions:
|
|
||||||
```
|
|
||||||
## Bead {N}/{total}: {bead-id} - {title}
|
|
||||||
|
|
||||||
Research complete with no open questions.
|
|
||||||
|
|
||||||
Key findings:
|
|
||||||
- {finding 1}
|
|
||||||
- {finding 2}
|
|
||||||
|
|
||||||
Should this bead be split? (y/n)
|
|
||||||
```
|
|
||||||
|
|
||||||
## Phase 4: Parallel Planning
|
|
||||||
|
|
||||||
After all questions answered, launch planning subagents for all beads.
|
|
||||||
|
|
||||||
### Beads to Plan
|
|
||||||
|
|
||||||
Include:
|
|
||||||
- Original beads that were researched
|
|
||||||
- Beads that had existing research (from selection phase)
|
|
||||||
- New beads spawned from splits
|
|
||||||
|
|
||||||
### Subagent Instructions Template
|
|
||||||
|
|
||||||
```
|
|
||||||
Create implementation plan for bead [BEAD_ID]: [BEAD_TITLE]
|
|
||||||
|
|
||||||
1. **Load context**:
|
|
||||||
```bash
|
|
||||||
bd show [BEAD_ID]
|
|
||||||
```
|
|
||||||
|
|
||||||
2. **Read research** (it exists and has user clarifications):
|
|
||||||
Read `thoughts/beads-[BEAD_ID]/research.md` FULLY
|
|
||||||
|
|
||||||
3. **Create plan** following beads_plan.md patterns:
|
|
||||||
- Context gathering via sub-agents
|
|
||||||
- Design approach based on research findings and user clarifications
|
|
||||||
- **Skip interactive questions** - they were already answered in research review
|
|
||||||
|
|
||||||
4. **Write plan** to `thoughts/beads-[BEAD_ID]/plan.md`:
|
|
||||||
- Full plan structure with phases
|
|
||||||
- Success criteria (automated and manual)
|
|
||||||
- References to research document
|
|
||||||
|
|
||||||
5. **Update bead**:
|
|
||||||
```bash
|
|
||||||
bd update [BEAD_ID] --notes="Plan created: thoughts/beads-[BEAD_ID]/plan.md"
|
|
||||||
```
|
|
||||||
|
|
||||||
6. **Return summary**:
|
|
||||||
- Plan status (complete/failed)
|
|
||||||
- Number of phases
|
|
||||||
- Estimated complexity (small/medium/large)
|
|
||||||
- Any issues encountered
|
|
||||||
```
|
|
||||||
|
|
||||||
### Launching Subagents
|
|
||||||
|
|
||||||
Use `subagent_type: "opus"` for planning subagents (matches beads_plan model setting).
|
|
||||||
|
|
||||||
Launch ALL planning subagents in a single message:
|
|
||||||
```
|
|
||||||
<Task calls for each bead to plan - all in one message>
|
|
||||||
```
|
|
||||||
|
|
||||||
### Handling Beads Without Research
|
|
||||||
|
|
||||||
For beads that had existing research but user didn't review questions:
|
|
||||||
- Planning subagent reads existing research
|
|
||||||
- If research has unresolved open questions, subagent should flag this in its return
|
|
||||||
|
|
||||||
## Phase 5: Summary
|
|
||||||
|
|
||||||
After all planning completes, present final summary.
|
|
||||||
|
|
||||||
### Summary Format
|
|
||||||
|
|
||||||
```
|
|
||||||
## Batch Research+Plan Complete
|
|
||||||
|
|
||||||
### Successfully Processed:
|
|
||||||
| Bead | Title | Research | Plan | Phases | Complexity |
|
|
||||||
|------|-------|----------|------|--------|------------|
|
|
||||||
| {id} | {title} | Complete | Complete | 3 | medium |
|
|
||||||
| {id} | {title} | Complete | Complete | 2 | small |
|
|
||||||
|
|
||||||
### New Beads (from splits):
|
|
||||||
| Bead | Title | Parent | Status |
|
|
||||||
|------|-------|--------|--------|
|
|
||||||
| {new-id} | {title} | {parent-id} | Planned |
|
|
||||||
|
|
||||||
### Failed:
|
|
||||||
| Bead | Title | Phase Failed | Error |
|
|
||||||
|------|-------|--------------|-------|
|
|
||||||
| {id} | {title} | Research | Timeout |
|
|
||||||
|
|
||||||
### Next Steps:
|
|
||||||
1. Review plans at `thoughts/beads-{id}/plan.md`
|
|
||||||
2. Run `/parallel_beads` to implement all planned beads
|
|
||||||
3. Or run `/beads_implement {id}` for individual implementation
|
|
||||||
|
|
||||||
### Artifacts Created:
|
|
||||||
- Research: thoughts/beads-{id}/research.md (x{N} files)
|
|
||||||
- Plans: thoughts/beads-{id}/plan.md (x{N} files)
|
|
||||||
```
|
|
||||||
|
|
||||||
## Error Handling
|
|
||||||
|
|
||||||
### Research Subagent Failure
|
|
||||||
- Log the failure with bead ID and error
|
|
||||||
- Continue with other beads
|
|
||||||
- Exclude failed beads from question review and planning
|
|
||||||
- Report in final summary
|
|
||||||
|
|
||||||
### Planning Subagent Failure
|
|
||||||
- Log the failure with bead ID and error
|
|
||||||
- Research still valid - can retry planning manually
|
|
||||||
- Report in final summary
|
|
||||||
|
|
||||||
### User Cancellation During Question Review
|
|
||||||
- Save progress to bead notes
|
|
||||||
- Report which beads were completed
|
|
||||||
- User can resume with remaining beads in new session
|
|
||||||
|
|
||||||
### Split Bead Creation Failure
|
|
||||||
- Report error but continue with original bead
|
|
||||||
- User can manually create split beads later
|
|
||||||
|
|
||||||
## Resource Limits
|
|
||||||
|
|
||||||
- Maximum concurrent research subagents: 5
|
|
||||||
- Maximum concurrent planning subagents: 5
|
|
||||||
- If more beads selected, process in batches
|
|
||||||
|
|
||||||
## Notes
|
|
||||||
|
|
||||||
- This skill is designed for the "research+plan before implementation" workflow
|
|
||||||
- Pairs well with `/parallel_beads` for subsequent implementation
|
|
||||||
- Run `/reconcile_beads` after implementation PRs merge
|
|
||||||
@@ -54,8 +54,6 @@ When this command is invoked:
|
|||||||
- Read `thoughts/beads-{bead-id}/plan.md` FULLY
|
- Read `thoughts/beads-{bead-id}/plan.md` FULLY
|
||||||
- Check for any existing checkmarks (- [x]) indicating partial progress
|
- Check for any existing checkmarks (- [x]) indicating partial progress
|
||||||
- Read any research at `thoughts/beads-{bead-id}/research.md`
|
- Read any research at `thoughts/beads-{bead-id}/research.md`
|
||||||
- If plan's Success Criteria references contribution guidelines (e.g., "Per CONTRIBUTING.md:"),
|
|
||||||
verify the original CONTRIBUTING.md still exists and requirements are current
|
|
||||||
|
|
||||||
5. **Mark bead in progress** (if not already):
|
5. **Mark bead in progress** (if not already):
|
||||||
```bash
|
```bash
|
||||||
@@ -129,10 +127,6 @@ All phases completed and automated verification passed:
|
|||||||
- {List manual verification items from plan}
|
- {List manual verification items from plan}
|
||||||
|
|
||||||
Let me know when manual testing is complete so I can close the bead.
|
Let me know when manual testing is complete so I can close the bead.
|
||||||
|
|
||||||
**Contribution guidelines compliance:**
|
|
||||||
- {List any contribution guideline requirements that were part of Success Criteria}
|
|
||||||
- {Note if any requirements could not be automated and need manual review}
|
|
||||||
```
|
```
|
||||||
|
|
||||||
**STOP HERE and wait for user confirmation.**
|
**STOP HERE and wait for user confirmation.**
|
||||||
|
|||||||
@@ -51,32 +51,13 @@ When this command is invoked:
|
|||||||
- Any linked tickets or docs
|
- Any linked tickets or docs
|
||||||
- Use Read tool WITHOUT limit/offset
|
- Use Read tool WITHOUT limit/offset
|
||||||
|
|
||||||
2. **Check for contribution guidelines**:
|
2. **Spawn initial research tasks**:
|
||||||
|
|
||||||
```bash
|
|
||||||
# Check standard locations for contribution guidelines
|
|
||||||
for f in CONTRIBUTING.md .github/CONTRIBUTING.md docs/CONTRIBUTING.md; do
|
|
||||||
if [ -f "$f" ]; then
|
|
||||||
echo "Found: $f"
|
|
||||||
break
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
```
|
|
||||||
|
|
||||||
If found:
|
|
||||||
- Read the file fully
|
|
||||||
- Extract actionable requirements (testing, code style, documentation, PR conventions)
|
|
||||||
- These requirements MUST be incorporated into the plan's Success Criteria
|
|
||||||
|
|
||||||
If not found, note "No contribution guidelines found" and proceed.
|
|
||||||
|
|
||||||
3. **Spawn initial research tasks**:
|
|
||||||
- **codebase-locator**: Find all files related to the task
|
- **codebase-locator**: Find all files related to the task
|
||||||
- **codebase-analyzer**: Understand current implementation
|
- **codebase-analyzer**: Understand current implementation
|
||||||
- **codebase-pattern-finder**: Find similar features to model after
|
- **codebase-pattern-finder**: Find similar features to model after
|
||||||
- **thoughts-locator**: Find any existing plans or decisions
|
- **thoughts-locator**: Find any existing plans or decisions
|
||||||
|
|
||||||
4. **Read all files identified by research**:
|
3. **Read all files identified by research**:
|
||||||
- Read them FULLY into main context
|
- Read them FULLY into main context
|
||||||
- Cross-reference with requirements
|
- Cross-reference with requirements
|
||||||
|
|
||||||
@@ -292,12 +273,6 @@ Always separate into two categories:
|
|||||||
- Performance under real conditions
|
- Performance under real conditions
|
||||||
- Edge cases hard to automate
|
- Edge cases hard to automate
|
||||||
|
|
||||||
**From Contribution Guidelines** (if CONTRIBUTING.md exists):
|
|
||||||
- Include any testing requirements specified in guidelines
|
|
||||||
- Include any code style/linting requirements
|
|
||||||
- Include any documentation requirements
|
|
||||||
- Reference the guideline: "Per CONTRIBUTING.md: {requirement}"
|
|
||||||
|
|
||||||
## Example Invocation
|
## Example Invocation
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|||||||
@@ -51,18 +51,6 @@ When this command is invoked:
|
|||||||
- Use the Read tool WITHOUT limit/offset parameters
|
- Use the Read tool WITHOUT limit/offset parameters
|
||||||
- Read these files yourself in the main context before spawning sub-tasks
|
- Read these files yourself in the main context before spawning sub-tasks
|
||||||
|
|
||||||
### Step 1.5: Check for contribution guidelines
|
|
||||||
|
|
||||||
Before spawning sub-agents, check if the repository has contribution guidelines:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
for f in CONTRIBUTING.md .github/CONTRIBUTING.md docs/CONTRIBUTING.md; do
|
|
||||||
if [ -f "$f" ]; then echo "Found: $f"; break; fi
|
|
||||||
done
|
|
||||||
```
|
|
||||||
|
|
||||||
If found, read the file and note key requirements. These should be included in the research document under a "## Contribution Guidelines" section if relevant to the research question.
|
|
||||||
|
|
||||||
### Step 2: Analyze and decompose the research question
|
### Step 2: Analyze and decompose the research question
|
||||||
- Break down the query into composable research areas
|
- Break down the query into composable research areas
|
||||||
- Identify specific components, patterns, or concepts to investigate
|
- Identify specific components, patterns, or concepts to investigate
|
||||||
@@ -155,10 +143,6 @@ status: complete
|
|||||||
## Architecture Documentation
|
## Architecture Documentation
|
||||||
{Current patterns, conventions found in codebase}
|
{Current patterns, conventions found in codebase}
|
||||||
|
|
||||||
## Contribution Guidelines
|
|
||||||
{If CONTRIBUTING.md exists, summarize key requirements relevant to the research topic}
|
|
||||||
{If no guidelines found, omit this section}
|
|
||||||
|
|
||||||
## Historical Context (from thoughts/)
|
## Historical Context (from thoughts/)
|
||||||
{Relevant insights from thoughts/ with references}
|
{Relevant insights from thoughts/ with references}
|
||||||
|
|
||||||
|
|||||||
@@ -42,46 +42,7 @@ AskUserQuestion with:
|
|||||||
- options from filtered bd ready output
|
- options from filtered bd ready output
|
||||||
```
|
```
|
||||||
|
|
||||||
## Phase 2: Worktree Setup
|
## Phase 2: Parallel Implementation
|
||||||
|
|
||||||
Before launching implementation subagents, create worktrees for all selected beads:
|
|
||||||
|
|
||||||
1. **Get repository name**:
|
|
||||||
```bash
|
|
||||||
REPO_NAME=$(git remote get-url origin | sed 's|.*/||' | sed 's/\.git$//')
|
|
||||||
```
|
|
||||||
|
|
||||||
2. **For each selected bead**, create its worktree:
|
|
||||||
```bash
|
|
||||||
BEAD_ID="[bead-id]"
|
|
||||||
# Check if worktree already exists
|
|
||||||
if [ -d "$HOME/wt/${REPO_NAME}/${BEAD_ID}" ]; then
|
|
||||||
echo "Worktree already exists: ~/wt/${REPO_NAME}/${BEAD_ID}"
|
|
||||||
# Ask user: remove and recreate, or skip this bead?
|
|
||||||
else
|
|
||||||
git worktree add -b "bead/${BEAD_ID}" "$HOME/wt/${REPO_NAME}/${BEAD_ID}"
|
|
||||||
fi
|
|
||||||
```
|
|
||||||
|
|
||||||
3. **Track created worktrees**:
|
|
||||||
Maintain a list of (bead_id, worktree_path) pairs for use in subagent instructions.
|
|
||||||
|
|
||||||
4. **Report status**:
|
|
||||||
```
|
|
||||||
Created worktrees:
|
|
||||||
- nixos-configs-abc → ~/wt/nixos-configs/nixos-configs-abc (branch: bead/nixos-configs-abc)
|
|
||||||
- nixos-configs-xyz → ~/wt/nixos-configs/nixos-configs-xyz (branch: bead/nixos-configs-xyz)
|
|
||||||
|
|
||||||
Skipped (existing worktree):
|
|
||||||
- nixos-configs-123 → Ask user for resolution
|
|
||||||
```
|
|
||||||
|
|
||||||
**Note**: If a worktree or branch already exists, ask the user before proceeding:
|
|
||||||
- Remove existing worktree and branch, then recreate
|
|
||||||
- Skip this bead
|
|
||||||
- Use existing worktree as-is (risky - branch may have diverged)
|
|
||||||
|
|
||||||
## Phase 3: Parallel Implementation
|
|
||||||
|
|
||||||
For each selected bead, launch a subagent using the Task tool. All subagents should be launched in parallel (single message with multiple Task tool calls).
|
For each selected bead, launch a subagent using the Task tool. All subagents should be launched in parallel (single message with multiple Task tool calls).
|
||||||
|
|
||||||
@@ -92,92 +53,50 @@ Each implementation subagent should receive these instructions:
|
|||||||
```
|
```
|
||||||
Work on bead [BEAD_ID]: [BEAD_TITLE]
|
Work on bead [BEAD_ID]: [BEAD_TITLE]
|
||||||
|
|
||||||
Worktree path: [WORKTREE_PATH]
|
1. **Create worktree**:
|
||||||
|
- Branch name: `bead/[BEAD_ID]`
|
||||||
|
- Worktree path: `~/wt/[REPO_NAME]/[BEAD_ID]`
|
||||||
|
- Command: `git worktree add -b bead/[BEAD_ID] ~/wt/[REPO_NAME]/[BEAD_ID]`
|
||||||
|
|
||||||
## CRITICAL: Branch Verification (MUST DO FIRST)
|
2. **Review the bead requirements**:
|
||||||
|
|
||||||
1. **Navigate to worktree**:
|
|
||||||
```bash
|
|
||||||
cd [WORKTREE_PATH]
|
|
||||||
```
|
|
||||||
|
|
||||||
2. **Verify branch** (MANDATORY before ANY modifications):
|
|
||||||
```bash
|
|
||||||
CURRENT_BRANCH=$(git branch --show-current)
|
|
||||||
echo "Current branch: $CURRENT_BRANCH"
|
|
||||||
pwd
|
|
||||||
```
|
|
||||||
|
|
||||||
**ABORT CONDITIONS** - If ANY of these are true, STOP IMMEDIATELY:
|
|
||||||
- Branch is `main` or `master`
|
|
||||||
- Branch does not match `bead/[BEAD_ID]`
|
|
||||||
|
|
||||||
If you detect any abort condition:
|
|
||||||
```
|
|
||||||
ABORTING: Branch verification failed.
|
|
||||||
Expected branch: bead/[BEAD_ID]
|
|
||||||
Actual branch: [CURRENT_BRANCH]
|
|
||||||
Working directory: [pwd output]
|
|
||||||
|
|
||||||
DO NOT PROCEED. Report this error to the orchestrator.
|
|
||||||
```
|
|
||||||
|
|
||||||
## After Verification Passes
|
|
||||||
|
|
||||||
3. **Review the bead requirements**:
|
|
||||||
- Run `bd show [BEAD_ID]` to understand the acceptance criteria
|
- Run `bd show [BEAD_ID]` to understand the acceptance criteria
|
||||||
- Note any external issue references (GitHub issues, Linear tickets, etc.)
|
- Note any external issue references (GitHub issues, Linear tickets, etc.)
|
||||||
|
|
||||||
4. **Extract validation criteria**:
|
3. **Extract validation criteria**:
|
||||||
- Check for a plan: `thoughts/beads-[BEAD_ID]/plan.md`
|
- Check for a plan: `thoughts/beads-[BEAD_ID]/plan.md`
|
||||||
- If plan exists:
|
- If plan exists:
|
||||||
- Read the plan and find the "Automated Verification" section
|
- Read the plan and find the "Automated Verification" section
|
||||||
- Extract each verification command (lines starting with `- [ ]` followed by a command)
|
- Extract each verification command (lines starting with `- [ ]` followed by a command)
|
||||||
- Example: `- [ ] Tests pass: \`make test\`` → extract `make test`
|
- Example: `- [ ] Tests pass: \`make test\`` → extract `make test`
|
||||||
- Note any "Per CONTRIBUTING.md:" requirements for additional validation
|
|
||||||
- Also read the "Manual Verification" section from the plan if present
|
|
||||||
- Save manual verification items for inclusion in the PR description (they won't be executed)
|
|
||||||
- If no plan exists, use best-effort validation:
|
- If no plan exists, use best-effort validation:
|
||||||
- Check if `Makefile` exists → try `make test` and `make lint`
|
- Check if `Makefile` exists → try `make test` and `make lint`
|
||||||
- Check if `flake.nix` exists → try `nix flake check`
|
- Check if `flake.nix` exists → try `nix flake check`
|
||||||
- Check if `package.json` exists → try `npm test`
|
- Check if `package.json` exists → try `npm test`
|
||||||
- **Check for CONTRIBUTING.md** → read and extract testing/linting requirements
|
|
||||||
- Track which requirements can be automated vs need manual review
|
|
||||||
- Automated: commands that can be run (e.g., "run `make test`")
|
|
||||||
- Manual: qualitative checks (e.g., "ensure documentation is updated")
|
|
||||||
- If none found, note "No validation criteria found"
|
- If none found, note "No validation criteria found"
|
||||||
|
|
||||||
5. **Implement the changes**:
|
4. **Implement the changes**:
|
||||||
- Work in the worktree directory
|
- Work in the worktree directory
|
||||||
- Complete all acceptance criteria listed in the bead
|
- Complete all acceptance criteria listed in the bead
|
||||||
|
|
||||||
After implementation, run validation:
|
After implementation, run validation:
|
||||||
- Execute each validation command from step 4
|
- Execute each validation command from step 3
|
||||||
- Track results in this format:
|
- Track results in this format:
|
||||||
```
|
```
|
||||||
VALIDATION_RESULTS:
|
VALIDATION_RESULTS:
|
||||||
- make test: PASS
|
- make test: PASS
|
||||||
- make lint: FAIL (exit code 1: src/foo.ts:23 - missing semicolon)
|
- make lint: FAIL (exit code 1: src/foo.ts:23 - missing semicolon)
|
||||||
- nix flake check: SKIP (not applicable - no flake.nix)
|
- nix flake check: SKIP (command not found)
|
||||||
- cargo test: ERROR (command not found)
|
|
||||||
```
|
```
|
||||||
|
|
||||||
**Status definitions:**
|
|
||||||
- **PASS**: Check executed successfully with no issues
|
|
||||||
- **FAIL**: Check executed but found issues that need attention
|
|
||||||
- **SKIP**: Check not applicable to this project (e.g., no Makefile for `make test`)
|
|
||||||
- **ERROR**: Check could not execute (missing tool, permission error, command not found)
|
|
||||||
|
|
||||||
- If any validation fails:
|
- If any validation fails:
|
||||||
- Continue with PR creation (don't block)
|
- Continue with PR creation (don't block)
|
||||||
- Document failures in bead notes: `bd update [BEAD_ID] --notes="Validation failures: [list]"`
|
- Document failures in bead notes: `bd update [BEAD_ID] --notes="Validation failures: [list]"`
|
||||||
|
|
||||||
6. **Commit and push**:
|
5. **Commit and push**:
|
||||||
- Stage all changes: `git add -A`
|
- Stage all changes: `git add -A`
|
||||||
- Create a descriptive commit message
|
- Create a descriptive commit message
|
||||||
- Push the branch: `git push -u origin bead/[BEAD_ID]`
|
- Push the branch: `git push -u origin bead/[BEAD_ID]`
|
||||||
|
|
||||||
7. **Create a PR**:
|
6. **Create a PR**:
|
||||||
- Detect hosting provider from origin URL: `git remote get-url origin`
|
- Detect hosting provider from origin URL: `git remote get-url origin`
|
||||||
- If URL contains `github.com`, use `gh`; otherwise use `tea` (Gitea/Forgejo)
|
- If URL contains `github.com`, use `gh`; otherwise use `tea` (Gitea/Forgejo)
|
||||||
- PR title: "[BEAD_ID] [BEAD_TITLE]"
|
- PR title: "[BEAD_ID] [BEAD_TITLE]"
|
||||||
@@ -200,27 +119,14 @@ Worktree path: [WORKTREE_PATH]
|
|||||||
## Changes
|
## Changes
|
||||||
- [List of changes made]
|
- [List of changes made]
|
||||||
|
|
||||||
## Validation Steps Completed
|
## Validation
|
||||||
|
[Include validation results from step 4]
|
||||||
|
|
||||||
### Automated Checks
|
|
||||||
| Check | Status | Details |
|
| Check | Status | Details |
|
||||||
|-------|--------|---------|
|
|-------|--------|---------|
|
||||||
| make test | PASS | |
|
| make test | PASS | |
|
||||||
| make lint | FAIL | src/foo.ts:23 - missing semicolon |
|
| make lint | FAIL | src/foo.ts:23 - missing semicolon |
|
||||||
| nix flake check | SKIP | not applicable - no flake.nix |
|
| nix flake check | SKIP | command not found |
|
||||||
| cargo test | ERROR | command not found |
|
|
||||||
|
|
||||||
### Manual Verification Required
|
|
||||||
[If plan has Manual Verification items, list them as unchecked boxes:]
|
|
||||||
- [ ] Verify UI changes match design mockups
|
|
||||||
- [ ] Test on mobile viewport sizes
|
|
||||||
[If no manual verification items: "None specified in plan."]
|
|
||||||
|
|
||||||
### CONTRIBUTING.md Compliance
|
|
||||||
[If CONTRIBUTING.md requirements were extracted:]
|
|
||||||
- [x] Tests pass (verified via `make test`)
|
|
||||||
- [ ] Documentation updated (needs manual review)
|
|
||||||
[If no CONTRIBUTING.md: "No contribution guidelines found."]
|
|
||||||
EOF
|
EOF
|
||||||
)"
|
)"
|
||||||
```
|
```
|
||||||
@@ -240,66 +146,44 @@ Worktree path: [WORKTREE_PATH]
|
|||||||
## Changes
|
## Changes
|
||||||
- [List of changes made]
|
- [List of changes made]
|
||||||
|
|
||||||
## Validation Steps Completed
|
## Validation
|
||||||
|
[Include validation results from step 4]
|
||||||
|
|
||||||
### Automated Checks
|
|
||||||
| Check | Status | Details |
|
| Check | Status | Details |
|
||||||
|-------|--------|---------|
|
|-------|--------|---------|
|
||||||
| make test | PASS | |
|
| make test | PASS | |
|
||||||
| make lint | FAIL | src/foo.ts:23 - missing semicolon |
|
| make lint | FAIL | src/foo.ts:23 - missing semicolon |
|
||||||
| nix flake check | SKIP | not applicable - no flake.nix |
|
| nix flake check | SKIP | command not found |"
|
||||||
| cargo test | ERROR | command not found |
|
|
||||||
|
|
||||||
### Manual Verification Required
|
|
||||||
[If plan has Manual Verification items, list them as unchecked boxes:]
|
|
||||||
- [ ] Verify UI changes match design mockups
|
|
||||||
- [ ] Test on mobile viewport sizes
|
|
||||||
[If no manual verification items: None specified in plan.]
|
|
||||||
|
|
||||||
### CONTRIBUTING.md Compliance
|
|
||||||
[If CONTRIBUTING.md requirements were extracted:]
|
|
||||||
- [x] Tests pass (verified via make test)
|
|
||||||
- [ ] Documentation updated (needs manual review)
|
|
||||||
[If no CONTRIBUTING.md: No contribution guidelines found.]"
|
|
||||||
```
|
```
|
||||||
|
|
||||||
8. **Update bead status**:
|
7. **Update bead status**:
|
||||||
- Mark the bead as "in_review": `bd update [BEAD_ID] --status=in_review`
|
- Mark the bead as "in_review": `bd update [BEAD_ID] --status=in_review`
|
||||||
- Add the PR URL to the bead notes: `bd update [BEAD_ID] --notes="$(bd show [BEAD_ID] --json | jq -r '.notes')
|
- Add the PR URL to the bead notes: `bd update [BEAD_ID] --notes="$(bd show [BEAD_ID] --json | jq -r '.notes')
|
||||||
|
|
||||||
PR: [PR_URL]"`
|
PR: [PR_URL]"`
|
||||||
|
|
||||||
9. **Report results**:
|
8. **Report results**:
|
||||||
- Return:
|
- Return:
|
||||||
- PR URL
|
- PR URL
|
||||||
- Bead ID
|
- Bead ID
|
||||||
- Implementation status (success/failure/blocked)
|
- Implementation status (success/failure/blocked)
|
||||||
- Validation summary: `X passed, Y failed, Z skipped, W errors`
|
- Validation summary: `X passed, Y failed, Z skipped`
|
||||||
- List of any validation failures or errors with details
|
- List of any validation failures with details
|
||||||
- If blocked or unable to complete, explain what's blocking progress
|
- If blocked or unable to complete, explain what's blocking progress
|
||||||
- If validation failed, include the specific failures so the main agent can summarize them for the user
|
- If validation failed, include the specific failures so the main agent can summarize them for the user
|
||||||
```
|
```
|
||||||
|
|
||||||
### Launching Subagents
|
### Launching Subagents
|
||||||
|
|
||||||
For each bead, substitute into the template:
|
|
||||||
- `[BEAD_ID]` - the bead ID
|
|
||||||
- `[BEAD_TITLE]` - the bead title
|
|
||||||
- `[WORKTREE_PATH]` - the worktree path created in Phase 2
|
|
||||||
|
|
||||||
Use `subagent_type: "general-purpose"` for implementation subagents. Launch all selected beads' subagents in a single message for parallel execution:
|
Use `subagent_type: "general-purpose"` for implementation subagents. Launch all selected beads' subagents in a single message for parallel execution:
|
||||||
|
|
||||||
```
|
```
|
||||||
<Task calls for each selected bead - all in one message>
|
<Task calls for each selected bead - all in one message>
|
||||||
```
|
```
|
||||||
|
|
||||||
**Important**: The worktree paths were created in Phase 2. Use the exact paths that were created, e.g.:
|
|
||||||
- `~/wt/nixos-configs/nixos-configs-abc`
|
|
||||||
- `~/wt/nixos-configs/nixos-configs-xyz`
|
|
||||||
|
|
||||||
Collect results from all subagents before proceeding.
|
Collect results from all subagents before proceeding.
|
||||||
|
|
||||||
## Phase 4: Parallel Review
|
## Phase 3: Parallel Review
|
||||||
|
|
||||||
After all implementation subagents complete, launch review subagents for each PR.
|
After all implementation subagents complete, launch review subagents for each PR.
|
||||||
|
|
||||||
@@ -334,7 +218,7 @@ Review PR for bead [BEAD_ID]
|
|||||||
|
|
||||||
Launch all review subagents in parallel.
|
Launch all review subagents in parallel.
|
||||||
|
|
||||||
## Phase 5: Cleanup and Summary
|
## Phase 4: Cleanup and Summary
|
||||||
|
|
||||||
After reviews complete:
|
After reviews complete:
|
||||||
|
|
||||||
@@ -380,21 +264,9 @@ Example output:
|
|||||||
|
|
||||||
## Error Handling
|
## Error Handling
|
||||||
|
|
||||||
- **Worktree creation failures** (Phase 2):
|
|
||||||
- If `git worktree add` fails (branch exists, path exists), prompt user:
|
|
||||||
- Remove existing and retry
|
|
||||||
- Skip this bead
|
|
||||||
- Use existing (with warning about potential divergence)
|
|
||||||
- Do NOT proceed to subagent launch until worktree is confirmed
|
|
||||||
|
|
||||||
- **Branch verification failures** (subagent reports):
|
|
||||||
- If subagent reports it's on `main` or `master`, do NOT retry
|
|
||||||
- Mark bead as failed with reason "Branch verification failed"
|
|
||||||
- Continue with other beads but flag this as a critical issue
|
|
||||||
- Investigation required: the worktree may have been corrupted or not created properly
|
|
||||||
|
|
||||||
- **Subagent failures**: If a subagent fails or times out, note it in the summary but continue with other beads
|
- **Subagent failures**: If a subagent fails or times out, note it in the summary but continue with other beads
|
||||||
- **PR creation failures**: Report the error but continue with reviews of successful PRs
|
- **PR creation failures**: Report the error but continue with reviews of successful PRs
|
||||||
|
- **Worktree conflicts**: If a worktree already exists, ask the user if they want to remove it or skip that bead
|
||||||
|
|
||||||
## Resource Limits
|
## Resource Limits
|
||||||
|
|
||||||
|
|||||||
@@ -4,13 +4,12 @@ description: Reconcile beads with merged PRs and close completed beads
|
|||||||
|
|
||||||
# Reconcile Beads Workflow
|
# Reconcile Beads Workflow
|
||||||
|
|
||||||
This skill reconciles beads that are in `in_review` status with their corresponding PRs. If a PR has been merged, the bead is closed and any linked Gitea issue is also closed.
|
This skill reconciles beads that are in `in_review` status with their corresponding PRs. If a PR has been merged, the bead is closed.
|
||||||
|
|
||||||
## Prerequisites
|
## Prerequisites
|
||||||
|
|
||||||
- Custom status `in_review` must be configured: `bd config set status.custom "in_review"`
|
- Custom status `in_review` must be configured: `bd config set status.custom "in_review"`
|
||||||
- Beads in `in_review` status should have a PR URL in their notes
|
- Beads in `in_review` status should have a PR URL in their notes
|
||||||
- `tea` CLI must be configured for closing Gitea issues
|
|
||||||
|
|
||||||
## Workflow
|
## Workflow
|
||||||
|
|
||||||
@@ -53,34 +52,6 @@ If the PR is merged:
|
|||||||
bd close [BEAD_ID] --reason="PR merged: [PR_URL]"
|
bd close [BEAD_ID] --reason="PR merged: [PR_URL]"
|
||||||
```
|
```
|
||||||
|
|
||||||
### Step 3.1: Close corresponding Gitea issue (if any)
|
|
||||||
|
|
||||||
After closing a bead, check if it has a linked Gitea issue:
|
|
||||||
|
|
||||||
1. **Check for Gitea issue URL in bead notes**:
|
|
||||||
Look for the pattern `Gitea issue: <URL>` in the notes. Extract the URL.
|
|
||||||
|
|
||||||
2. **Extract issue number from URL**:
|
|
||||||
```bash
|
|
||||||
# Example: https://git.johnogle.info/johno/nixos-configs/issues/16 -> 16
|
|
||||||
echo "$GITEA_URL" | grep -oP '/issues/\K\d+'
|
|
||||||
```
|
|
||||||
|
|
||||||
3. **Close the Gitea issue**:
|
|
||||||
```bash
|
|
||||||
tea issues close [ISSUE_NUMBER]
|
|
||||||
```
|
|
||||||
|
|
||||||
4. **Handle errors gracefully**:
|
|
||||||
- If issue is already closed: Log warning, continue
|
|
||||||
- If issue not found: Log warning, continue
|
|
||||||
- If `tea` fails: Log error, continue with other beads
|
|
||||||
|
|
||||||
Example warning output:
|
|
||||||
```
|
|
||||||
Warning: Could not close Gitea issue #16: issue already closed
|
|
||||||
```
|
|
||||||
|
|
||||||
### Step 4: Report summary
|
### Step 4: Report summary
|
||||||
|
|
||||||
Present results:
|
Present results:
|
||||||
@@ -89,17 +60,10 @@ Present results:
|
|||||||
## Beads Reconciliation Summary
|
## Beads Reconciliation Summary
|
||||||
|
|
||||||
### Closed (PR Merged)
|
### Closed (PR Merged)
|
||||||
| Bead | PR | Gitea Issue | Title |
|
| Bead | PR | Title |
|
||||||
|------|-----|-------------|-------|
|
|------|-----|-------|
|
||||||
| beads-abc | #123 | #16 closed | Feature X |
|
| beads-abc | #123 | Feature X |
|
||||||
| beads-xyz | #456 | (none) | Bug fix Y |
|
| beads-xyz | #456 | Bug fix Y |
|
||||||
|
|
||||||
### Gitea Issues Closed
|
|
||||||
| Issue | Bead | Status |
|
|
||||||
|-------|------|--------|
|
|
||||||
| #16 | beads-abc | Closed successfully |
|
|
||||||
| #17 | beads-def | Already closed (skipped) |
|
|
||||||
| #99 | beads-ghi | Error: issue not found |
|
|
||||||
|
|
||||||
### Still in Review
|
### Still in Review
|
||||||
| Bead | PR | Status | Title |
|
| Bead | PR | Status | Title |
|
||||||
@@ -116,14 +80,9 @@ Present results:
|
|||||||
- **Missing PR URL**: Skip the bead and report it
|
- **Missing PR URL**: Skip the bead and report it
|
||||||
- **PR not found**: Report the error but continue with other beads
|
- **PR not found**: Report the error but continue with other beads
|
||||||
- **API errors**: Report and continue
|
- **API errors**: Report and continue
|
||||||
- **Gitea issue already closed**: Log warning, continue (not an error)
|
|
||||||
- **Gitea issue not found**: Log warning, continue (issue may have been deleted)
|
|
||||||
- **No Gitea issue linked**: Normal case, no action needed
|
|
||||||
- **tea command fails**: Log error with output, continue with other beads
|
|
||||||
|
|
||||||
## Notes
|
## Notes
|
||||||
|
|
||||||
- This skill complements `/parallel_beads` which sets beads to `in_review` status
|
- This skill complements `/parallel_beads` which sets beads to `in_review` status
|
||||||
- Run this skill periodically or after merging PRs to keep beads in sync
|
- Run this skill periodically or after merging PRs to keep beads in sync
|
||||||
- Beads with closed (but not merged) PRs are not automatically closed - they may need rework
|
- Beads with closed (but not merged) PRs are not automatically closed - they may need rework
|
||||||
- Gitea issues are only closed for beads that have a `Gitea issue: <URL>` in their notes
|
|
||||||
|
|||||||
@@ -225,16 +225,11 @@
|
|||||||
mu4e-headers-time-format "%H:%M")
|
mu4e-headers-time-format "%H:%M")
|
||||||
|
|
||||||
;; Sending mail via msmtp
|
;; Sending mail via msmtp
|
||||||
;; NOTE: message-sendmail-f-is-evil and --read-envelope-from are required
|
(setq message-send-mail-function 'message-send-mail-with-sendmail
|
||||||
;; to prevent msmtp from stripping the email body when processing headers.
|
sendmail-program (executable-find "msmtp")
|
||||||
;; Without these, multipart messages (especially from org-msg) may arrive
|
message-sendmail-envelope-from 'header
|
||||||
;; with empty bodies.
|
mail-envelope-from 'header
|
||||||
(setq sendmail-program (executable-find "msmtp")
|
mail-specify-envelope-from t))
|
||||||
send-mail-function #'message-send-mail-with-sendmail
|
|
||||||
message-send-mail-function #'message-send-mail-with-sendmail
|
|
||||||
message-sendmail-f-is-evil t
|
|
||||||
message-sendmail-extra-arguments '("--read-envelope-from")
|
|
||||||
message-sendmail-envelope-from 'header))
|
|
||||||
|
|
||||||
;; Whenever you reconfigure a package, make sure to wrap your config in an
|
;; Whenever you reconfigure a package, make sure to wrap your config in an
|
||||||
;; `after!' block, otherwise Doom's defaults may override your settings. E.g.
|
;; `after!' block, otherwise Doom's defaults may override your settings. E.g.
|
||||||
|
|||||||
@@ -4,7 +4,6 @@ with lib;
|
|||||||
|
|
||||||
let
|
let
|
||||||
cfg = config.home.roles.email;
|
cfg = config.home.roles.email;
|
||||||
isLinux = pkgs.stdenv.isLinux;
|
|
||||||
in
|
in
|
||||||
{
|
{
|
||||||
options.home.roles.email = {
|
options.home.roles.email = {
|
||||||
@@ -90,38 +89,34 @@ in
|
|||||||
account default : proton
|
account default : proton
|
||||||
'';
|
'';
|
||||||
|
|
||||||
# Linux-only: Systemd service for mail sync (Darwin uses launchd instead)
|
# Systemd service for mail sync
|
||||||
systemd.user.services = mkIf isLinux {
|
systemd.user.services.mbsync = {
|
||||||
mbsync = {
|
Unit = {
|
||||||
Unit = {
|
Description = "Mailbox synchronization service";
|
||||||
Description = "Mailbox synchronization service";
|
After = [ "network-online.target" ];
|
||||||
After = [ "network-online.target" ];
|
Wants = [ "network-online.target" ];
|
||||||
Wants = [ "network-online.target" ];
|
};
|
||||||
};
|
Service = {
|
||||||
Service = {
|
Type = "oneshot";
|
||||||
Type = "oneshot";
|
ExecStart = "${pkgs.bash}/bin/bash -c 'mkdir -p ~/Mail && ${pkgs.isync}/bin/mbsync -a && (${pkgs.mu}/bin/mu info >/dev/null 2>&1 || ${pkgs.mu}/bin/mu init --maildir ~/Mail --personal-address=john@ogle.fyi) && ${pkgs.mu}/bin/mu index'";
|
||||||
ExecStart = "${pkgs.bash}/bin/bash -c 'mkdir -p ~/Mail && ${pkgs.isync}/bin/mbsync -a && (${pkgs.mu}/bin/mu info >/dev/null 2>&1 || ${pkgs.mu}/bin/mu init --maildir ~/Mail --personal-address=john@ogle.fyi) && ${pkgs.mu}/bin/mu index'";
|
Environment = "PATH=${pkgs.rbw}/bin:${pkgs.coreutils}/bin";
|
||||||
Environment = "PATH=${pkgs.rbw}/bin:${pkgs.coreutils}/bin";
|
StandardOutput = "journal";
|
||||||
StandardOutput = "journal";
|
StandardError = "journal";
|
||||||
StandardError = "journal";
|
|
||||||
};
|
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
# Linux-only: Systemd timer for automatic sync
|
# Systemd timer for automatic sync
|
||||||
systemd.user.timers = mkIf isLinux {
|
systemd.user.timers.mbsync = {
|
||||||
mbsync = {
|
Unit = {
|
||||||
Unit = {
|
Description = "Mailbox synchronization timer";
|
||||||
Description = "Mailbox synchronization timer";
|
};
|
||||||
};
|
Timer = {
|
||||||
Timer = {
|
OnBootSec = "2min";
|
||||||
OnBootSec = "2min";
|
OnUnitActiveSec = "5min";
|
||||||
OnUnitActiveSec = "5min";
|
Unit = "mbsync.service";
|
||||||
Unit = "mbsync.service";
|
};
|
||||||
};
|
Install = {
|
||||||
Install = {
|
WantedBy = [ "timers.target" ];
|
||||||
WantedBy = [ "timers.target" ];
|
|
||||||
};
|
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -4,15 +4,13 @@ with lib;
|
|||||||
|
|
||||||
let
|
let
|
||||||
cfg = config.home.roles.kdeconnect;
|
cfg = config.home.roles.kdeconnect;
|
||||||
isLinux = pkgs.stdenv.isLinux;
|
|
||||||
in
|
in
|
||||||
{
|
{
|
||||||
options.home.roles.kdeconnect = {
|
options.home.roles.kdeconnect = {
|
||||||
enable = mkEnableOption "Enable KDE Connect for device integration";
|
enable = mkEnableOption "Enable KDE Connect for device integration";
|
||||||
};
|
};
|
||||||
|
|
||||||
# KDE Connect services are Linux-only (requires D-Bus and systemd)
|
config = mkIf cfg.enable {
|
||||||
config = mkIf (cfg.enable && isLinux) {
|
|
||||||
services.kdeconnect = {
|
services.kdeconnect = {
|
||||||
enable = true;
|
enable = true;
|
||||||
indicator = true;
|
indicator = true;
|
||||||
|
|||||||
@@ -4,7 +4,6 @@ with lib;
|
|||||||
|
|
||||||
let
|
let
|
||||||
cfg = config.home.roles.sync;
|
cfg = config.home.roles.sync;
|
||||||
isLinux = pkgs.stdenv.isLinux;
|
|
||||||
in
|
in
|
||||||
{
|
{
|
||||||
options.home.roles.sync = {
|
options.home.roles.sync = {
|
||||||
@@ -12,10 +11,9 @@ in
|
|||||||
};
|
};
|
||||||
|
|
||||||
config = mkIf cfg.enable {
|
config = mkIf cfg.enable {
|
||||||
# Linux-only: syncthingtray requires system tray support
|
home.packages = with pkgs; [
|
||||||
home.packages = optionals isLinux (with pkgs; [
|
|
||||||
syncthingtray
|
syncthingtray
|
||||||
]);
|
];
|
||||||
|
|
||||||
services.syncthing = {
|
services.syncthing = {
|
||||||
enable = true;
|
enable = true;
|
||||||
|
|||||||
@@ -3,5 +3,4 @@
|
|||||||
tea-rbw = pkgs.callPackage ./tea-rbw {};
|
tea-rbw = pkgs.callPackage ./tea-rbw {};
|
||||||
app-launcher-server = pkgs.callPackage ./app-launcher-server {};
|
app-launcher-server = pkgs.callPackage ./app-launcher-server {};
|
||||||
claude-code = pkgs.callPackage ./claude-code {};
|
claude-code = pkgs.callPackage ./claude-code {};
|
||||||
mcrcon-rbw = pkgs.callPackage ./mcrcon-rbw {};
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,40 +0,0 @@
|
|||||||
{ pkgs, ... }:
|
|
||||||
|
|
||||||
pkgs.writeShellScriptBin "mcrcon" ''
|
|
||||||
set -euo pipefail
|
|
||||||
|
|
||||||
# Configuration - can be overridden with environment variables
|
|
||||||
MINECRAFT_RCON_HOST="''${MCRCON_HOST:-10.0.0.165}"
|
|
||||||
MINECRAFT_RCON_PORT="''${MCRCON_PORT:-25575}"
|
|
||||||
RBW_ENTRY="minecraft-rcon"
|
|
||||||
|
|
||||||
# Check if rbw is available
|
|
||||||
if ! command -v rbw &> /dev/null; then
|
|
||||||
echo "Error: rbw is not available. Please ensure rbw is installed and configured."
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Retrieve password from Bitwarden
|
|
||||||
if ! MCRCON_PASS=$(rbw get "$RBW_ENTRY" 2>/dev/null); then
|
|
||||||
echo "Error: Failed to retrieve RCON password from rbw entry '$RBW_ENTRY'"
|
|
||||||
echo "Please ensure the entry exists in Bitwarden and rbw is synced."
|
|
||||||
echo ""
|
|
||||||
echo "To create the entry:"
|
|
||||||
echo " 1. Add 'minecraft-rcon' to Bitwarden with the RCON password"
|
|
||||||
echo " 2. Run 'rbw sync' to refresh the local cache"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Export for mcrcon
|
|
||||||
export MCRCON_HOST="$MINECRAFT_RCON_HOST"
|
|
||||||
export MCRCON_PORT="$MINECRAFT_RCON_PORT"
|
|
||||||
export MCRCON_PASS
|
|
||||||
|
|
||||||
# If no arguments provided, start interactive terminal mode
|
|
||||||
if [[ $# -eq 0 ]]; then
|
|
||||||
exec ${pkgs.mcrcon}/bin/mcrcon -t
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Execute mcrcon with all provided arguments
|
|
||||||
exec ${pkgs.mcrcon}/bin/mcrcon "$@"
|
|
||||||
''
|
|
||||||
@@ -21,8 +21,6 @@ in
|
|||||||
|
|
||||||
services.pipewire = {
|
services.pipewire = {
|
||||||
enable = true;
|
enable = true;
|
||||||
alsa.enable = true;
|
|
||||||
alsa.support32Bit = true;
|
|
||||||
pulse.enable = true;
|
pulse.enable = true;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|||||||
@@ -8,21 +8,6 @@ in
|
|||||||
{
|
{
|
||||||
options.roles.nfs-mounts = {
|
options.roles.nfs-mounts = {
|
||||||
enable = mkEnableOption "Enable default NFS mounts";
|
enable = mkEnableOption "Enable default NFS mounts";
|
||||||
server = mkOption {
|
|
||||||
type = types.str;
|
|
||||||
default = "10.0.0.43";
|
|
||||||
description = "IP address or hostname of the NFS server";
|
|
||||||
};
|
|
||||||
remotePath = mkOption {
|
|
||||||
type = types.str;
|
|
||||||
default = "/media";
|
|
||||||
description = "Remote path to mount from the NFS server";
|
|
||||||
};
|
|
||||||
mountPoint = mkOption {
|
|
||||||
type = types.str;
|
|
||||||
default = "/media";
|
|
||||||
description = "Local mount point for the NFS share";
|
|
||||||
};
|
|
||||||
# TODO: implement requireMount
|
# TODO: implement requireMount
|
||||||
requireMount = mkOption {
|
requireMount = mkOption {
|
||||||
type = types.bool;
|
type = types.bool;
|
||||||
@@ -33,8 +18,8 @@ in
|
|||||||
|
|
||||||
config = mkIf cfg.enable
|
config = mkIf cfg.enable
|
||||||
{
|
{
|
||||||
fileSystems.${cfg.mountPoint} = {
|
fileSystems."/media" = {
|
||||||
device = "${cfg.server}:${cfg.remotePath}";
|
device = "10.0.0.43:/media";
|
||||||
fsType = "nfs";
|
fsType = "nfs";
|
||||||
options = [
|
options = [
|
||||||
"defaults"
|
"defaults"
|
||||||
|
|||||||
@@ -8,21 +8,6 @@ in
|
|||||||
{
|
{
|
||||||
options.roles.printing = {
|
options.roles.printing = {
|
||||||
enable = mkEnableOption "Enable default printing setup";
|
enable = mkEnableOption "Enable default printing setup";
|
||||||
printerName = mkOption {
|
|
||||||
type = types.str;
|
|
||||||
default = "MFC-L8900CDW_series";
|
|
||||||
description = "Name for the default printer";
|
|
||||||
};
|
|
||||||
printerUri = mkOption {
|
|
||||||
type = types.str;
|
|
||||||
default = "ipp://brother.oglehome/ipp/print";
|
|
||||||
description = "Device URI for the default printer (e.g., ipp://hostname/ipp/print)";
|
|
||||||
};
|
|
||||||
printerModel = mkOption {
|
|
||||||
type = types.str;
|
|
||||||
default = "everywhere";
|
|
||||||
description = "PPD model for the printer (use 'everywhere' for driverless IPP)";
|
|
||||||
};
|
|
||||||
};
|
};
|
||||||
|
|
||||||
config = mkIf cfg.enable
|
config = mkIf cfg.enable
|
||||||
@@ -36,11 +21,11 @@ in
|
|||||||
};
|
};
|
||||||
|
|
||||||
hardware.printers.ensurePrinters = [{
|
hardware.printers.ensurePrinters = [{
|
||||||
name = cfg.printerName;
|
name = "MFC-L8900CDW_series";
|
||||||
deviceUri = cfg.printerUri;
|
deviceUri = "ipp://brother.oglehome/ipp/print";
|
||||||
model = cfg.printerModel;
|
model = "everywhere";
|
||||||
}];
|
}];
|
||||||
hardware.printers.ensureDefaultPrinter = cfg.printerName;
|
hardware.printers.ensureDefaultPrinter = "MFC-L8900CDW_series";
|
||||||
|
|
||||||
# Fix ensure-printers service to wait for network availability
|
# Fix ensure-printers service to wait for network availability
|
||||||
systemd.services.ensure-printers = {
|
systemd.services.ensure-printers = {
|
||||||
|
|||||||
@@ -8,11 +8,6 @@ in
|
|||||||
{
|
{
|
||||||
options.roles.virtualisation = {
|
options.roles.virtualisation = {
|
||||||
enable = mkEnableOption "Enable virtualisation";
|
enable = mkEnableOption "Enable virtualisation";
|
||||||
dockerUsers = mkOption {
|
|
||||||
type = types.listOf types.str;
|
|
||||||
default = [ "johno" ];
|
|
||||||
description = "List of users to add to the docker group";
|
|
||||||
};
|
|
||||||
};
|
};
|
||||||
|
|
||||||
config = mkIf cfg.enable
|
config = mkIf cfg.enable
|
||||||
@@ -20,6 +15,6 @@ in
|
|||||||
virtualisation.libvirtd.enable = true;
|
virtualisation.libvirtd.enable = true;
|
||||||
programs.virt-manager.enable = true;
|
programs.virt-manager.enable = true;
|
||||||
virtualisation.docker.enable = true;
|
virtualisation.docker.enable = true;
|
||||||
users.extraGroups.docker.members = cfg.dockerUsers;
|
users.extraGroups.docker.members = [ "johno" ];
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,30 +1,6 @@
|
|||||||
#!/usr/bin/env bash
|
#!/usr/bin/env bash
|
||||||
set -euo pipefail
|
set -euo pipefail
|
||||||
|
|
||||||
# Parse arguments
|
|
||||||
while [[ $# -gt 0 ]]; do
|
|
||||||
case $1 in
|
|
||||||
--help|-h)
|
|
||||||
echo "Usage: $0 [OPTIONS]"
|
|
||||||
echo ""
|
|
||||||
echo "Rotate to the next wallpaper in the configured list."
|
|
||||||
echo ""
|
|
||||||
echo "This script increments the currentIndex in home/wallpapers/default.nix,"
|
|
||||||
echo "cycling through available wallpapers. Rebuild your system to apply"
|
|
||||||
echo "the new wallpaper."
|
|
||||||
echo ""
|
|
||||||
echo "Options:"
|
|
||||||
echo " --help, -h Show this help message"
|
|
||||||
exit 0
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
echo "Unknown option: $1"
|
|
||||||
echo "Use --help for usage information"
|
|
||||||
exit 1
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
done
|
|
||||||
|
|
||||||
# Colors for output
|
# Colors for output
|
||||||
RED='\033[0;31m'
|
RED='\033[0;31m'
|
||||||
GREEN='\033[0;32m'
|
GREEN='\033[0;32m'
|
||||||
|
|||||||
@@ -1,30 +1,6 @@
|
|||||||
#!/usr/bin/env bash
|
#!/usr/bin/env bash
|
||||||
set -euo pipefail
|
set -euo pipefail
|
||||||
|
|
||||||
# Parse arguments
|
|
||||||
while [[ $# -gt 0 ]]; do
|
|
||||||
case $1 in
|
|
||||||
--help|-h)
|
|
||||||
echo "Usage: $0 [OPTIONS]"
|
|
||||||
echo ""
|
|
||||||
echo "Update Doom Emacs to the latest commit from the doomemacs repository."
|
|
||||||
echo ""
|
|
||||||
echo "This script fetches the latest commit SHA from the default branch,"
|
|
||||||
echo "updates the rev and sha256 in home/roles/emacs/default.nix, and"
|
|
||||||
echo "prepares the configuration for a system rebuild."
|
|
||||||
echo ""
|
|
||||||
echo "Options:"
|
|
||||||
echo " --help, -h Show this help message"
|
|
||||||
exit 0
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
echo "Unknown option: $1"
|
|
||||||
echo "Use --help for usage information"
|
|
||||||
exit 1
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
done
|
|
||||||
|
|
||||||
# Colors for output
|
# Colors for output
|
||||||
RED='\033[0;31m'
|
RED='\033[0;31m'
|
||||||
GREEN='\033[0;32m'
|
GREEN='\033[0;32m'
|
||||||
|
|||||||
@@ -1,35 +1,6 @@
|
|||||||
#!/usr/bin/env bash
|
#!/usr/bin/env bash
|
||||||
set -euo pipefail
|
set -euo pipefail
|
||||||
|
|
||||||
# Parse arguments
|
|
||||||
while [[ $# -gt 0 ]]; do
|
|
||||||
case $1 in
|
|
||||||
--help|-h)
|
|
||||||
echo "Usage: $0 [OPTIONS]"
|
|
||||||
echo ""
|
|
||||||
echo "Perform a major upgrade of the NixOS configuration."
|
|
||||||
echo ""
|
|
||||||
echo "This script runs the following steps:"
|
|
||||||
echo " 1. Update all flake inputs (nix flake update)"
|
|
||||||
echo " 2. Update Doom Emacs to the latest commit"
|
|
||||||
echo " 3. Update Claude Code to the latest version"
|
|
||||||
echo " 4. Rotate to the next wallpaper"
|
|
||||||
echo ""
|
|
||||||
echo "After completion, review changes with 'git diff' and rebuild"
|
|
||||||
echo "your system with 'sudo nixos-rebuild switch --flake .'"
|
|
||||||
echo ""
|
|
||||||
echo "Options:"
|
|
||||||
echo " --help, -h Show this help message"
|
|
||||||
exit 0
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
echo "Unknown option: $1"
|
|
||||||
echo "Use --help for usage information"
|
|
||||||
exit 1
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
done
|
|
||||||
|
|
||||||
# Colors for output
|
# Colors for output
|
||||||
RED='\033[0;31m'
|
RED='\033[0;31m'
|
||||||
GREEN='\033[0;32m'
|
GREEN='\033[0;32m'
|
||||||
|
|||||||
Reference in New Issue
Block a user