- commit
- 519a579
- parent
- 519a579
- author
- Eric Bower
- date
- 2025-10-10 16:10:18 -0400 EDT
feat: init
18 files changed,
+2201,
-0
+10,
-0
1@@ -0,0 +1,10 @@
2+.DS_Store
3+*.log
4+.zig-cache/
5+zig-cache/
6+zig-out/
7+Session*.*vim
8+commit_msg
9+*.sw?
10+libxev_src/
11+zig_std_src/
+62,
-0
1@@ -0,0 +1,62 @@
2+# zmx
3+
4+The goal of this project is to create a way to attach and detach terminal sessions without killing the underlying linux process.
5+
6+## tech stack
7+
8+- `zig` v0.15.1
9+- `libghostty-vt` for terminal escape codes and terminal state management
10+- `libxev` for handling single-threaded, non-blocking, async flow control
11+- `clap` for building the cli
12+- `systemd` for background process supervision
13+
14+## commands
15+
16+- **Build:** `zig build`
17+- **Build Check (Zig)**: `zig build check`
18+- **Test (Zig):** `zig build test`
19+- **Test filter (Zig)**: `zig build test -Dtest-filter=<test name>`
20+- **Formatting (Zig)**: `zig fmt .`
21+
22+## features
23+
24+- Persist terminal shell sessions (pty processes)
25+- Ability to attach and detach from a shell session without killing it
26+- Native terminal scrollback
27+- Manage shell sessions
28+- Multiple clients can connect to the same session
29+- Background process (`daemon`) manages all pty processes
30+- A cli tool to interact with `daemon` and all pty processes
31+- Re-attaching to a session restores previous terminal state and output
32+- The `daemon` and client processes communicate via a unix socket
33+- The `daemon` is managed by a supervisor like `systemd`
34+- We provide a `systemd` unit file that users can install that manages the `daemon` process
35+- The cli tool supports the following commands:
36+ - `attach {session}`: attach to the pty process
37+ - `detach {session}`: detach from the pty process without killing it
38+ - `kill {session}`: kill the pty process
39+ - `list`: show all sessions and what clients are currently attached
40+ - `daemon`: the background process that manages all sessions
41+- This project does **NOT** provide windows, tabs, or window splits
42+- It supports all the terminal features that the client's terminal emulator supports
43+- The current version only works on linux
44+
45+## finding libxev source code
46+
47+To inspect the source code for libxev, look inside the `libxev_src` folder.
48+
49+## finding zig std library source code
50+
51+To inspect the source code for zig's standard library, look inside the `zig_std_src` folder.
52+
53+### prior art - shpool
54+
55+The project that most closely resembles `shpool`.
56+
57+You can find the source code at this repo: https://github.com/shell-pool/shpool
58+
59+`shpool` is a service that enables session persistence by allowing the creation of named shell sessions owned by `shpool` so that the session is not lost if the connection drops.
60+
61+`shpool` can be thought of as a lighter weight alternative to tmux or GNU screen. While tmux and screen take over the whole terminal and provide window splitting and tiling features, `shpool` only provides persistent sessions.
62+
63+The biggest advantage of this approach is that `shpool` does not break native scrollback or copy-paste.
+72,
-0
1@@ -0,0 +1,72 @@
2+const std = @import("std");
3+
4+pub fn build(b: *std.Build) void {
5+ const target = b.standardTargetOptions(.{});
6+ const optimize = b.standardOptimizeOption(.{});
7+
8+ const run_step = b.step("run", "Run the app");
9+ const test_step = b.step("test", "Run unit tests");
10+
11+ const exe_mod = b.createModule(.{
12+ .root_source_file = b.path("src/main.zig"),
13+ .target = target,
14+ .optimize = optimize,
15+ });
16+
17+ // You'll want to use a lazy dependency here so that ghostty is only
18+ // downloaded if you actually need it.
19+ if (b.lazyDependency("ghostty", .{})) |dep| {
20+ exe_mod.addImport(
21+ "ghostty-vt",
22+ dep.module("ghostty-vt"),
23+ );
24+ }
25+
26+ if (b.lazyDependency("libxev", .{
27+ .target = target,
28+ .optimize = optimize,
29+ })) |dep| {
30+ exe_mod.addImport("xev", dep.module("xev"));
31+ }
32+
33+ const clap_dep = b.dependency("clap", .{
34+ .target = target,
35+ .optimize = optimize,
36+ });
37+ exe_mod.addImport("clap", clap_dep.module("clap"));
38+
39+ // Exe
40+ const exe = b.addExecutable(.{
41+ .name = "zmx",
42+ .root_module = exe_mod,
43+ });
44+ b.installArtifact(exe);
45+
46+ // Run
47+ const run_cmd = b.addRunArtifact(exe);
48+ run_cmd.step.dependOn(b.getInstallStep());
49+ if (b.args) |args| run_cmd.addArgs(args);
50+ run_step.dependOn(&run_cmd.step);
51+
52+ // Test
53+ const exe_unit_tests = b.addTest(.{
54+ .root_module = exe_mod,
55+ });
56+ const run_exe_unit_tests = b.addRunArtifact(exe_unit_tests);
57+ test_step.dependOn(&run_exe_unit_tests.step);
58+
59+ // This is where the interesting part begins.
60+ // As you can see we are re-defining the same executable but
61+ // we're binding it to a dedicated build step.
62+ const exe_check = b.addExecutable(.{
63+ .name = "foo",
64+ .root_module = exe_mod,
65+ });
66+ // There is no `b.installArtifact(exe_check);` here.
67+
68+ // Finally we add the "check" step which will be detected
69+ // by ZLS and automatically enable Build-On-Save.
70+ // If you copy this into your `build.zig`, make sure to rename 'foo'
71+ const check = b.step("check", "Check if foo compiles");
72+ check.dependOn(&exe_check.step);
73+}
+55,
-0
1@@ -0,0 +1,55 @@
2+.{
3+ // This is the default name used by packages depending on this one. For
4+ // example, when a user runs `zig fetch --save <url>`, this field is used
5+ // as the key in the `dependencies` table. Although the user can choose a
6+ // different name, most users will stick with this provided value.
7+ //
8+ // It is redundant to include "zig" in this name because it is already
9+ // within the Zig package namespace.
10+ .name = .zmx,
11+ // This is a [Semantic Version](https://semver.org/).
12+ // In a future version of Zig it will be used for package deduplication.
13+ .version = "0.0.0",
14+ // Together with name, this represents a globally unique package
15+ // identifier. This field is generated by the Zig toolchain when the
16+ // package is first created, and then *never changes*. This allows
17+ // unambiguous detection of one package being an updated version of
18+ // another.
19+ //
20+ // When forking a Zig project, this id should be regenerated (delete the
21+ // field and run `zig build`) if the upstream project is still maintained.
22+ // Otherwise, the fork is *hostile*, attempting to take control over the
23+ // original project's identity. Thus it is recommended to leave the comment
24+ // on the following line intact, so that it shows up in code reviews that
25+ // modify the field.
26+ .fingerprint = 0x28aad87005052b4e, // Changing this has security and trust implications.
27+ // Tracks the earliest Zig version that the package considers to be a
28+ // supported use case.
29+ .minimum_zig_version = "0.15.1",
30+ // This field is optional.
31+ // Each dependency must either provide a `url` and `hash`, or a `path`.
32+ // `zig build --fetch` can be used to fetch all dependencies of a package, recursively.
33+ // Once all dependencies are fetched, `zig build` no longer requires
34+ // internet connectivity.
35+ .dependencies = .{
36+ .ghostty = .{
37+ .url = "git+https://github.com/ghostty-org/ghostty.git?ref=HEAD#42a38ff672fe0cbbb8588380058c91ac16ed9069",
38+ .hash = "ghostty-1.2.1-5UdBC-7EVAMgwHLhtsdeH4rgcZ7WlagXZ1QXN9bRKJ5s",
39+ },
40+ .libxev = .{
41+ .url = "git+https://github.com/mitchellh/libxev?ref=HEAD#34fa50878aec6e5fa8f532867001ab3c36fae23e",
42+ .hash = "libxev-0.0.0-86vtc4IcEwCqEYxEYoN_3KXmc6A9VLcm22aVImfvecYs",
43+ },
44+ .clap = .{
45+ .url = "git+https://github.com/Hejsil/zig-clap?ref=HEAD#b7e3348ed60f99ba32c75aa707ff7c87adc31b36",
46+ .hash = "clap-0.11.0-oBajB-TnAQC7yPLnZRT5WzHZ_4Ly4dX2OILskli74b9H",
47+ },
48+ },
49+ .paths = .{
50+ "build.zig",
51+ "build.zig.zon",
52+ "src",
53+ //"LICENSE",
54+ //"README.md",
55+ },
56+}
+34,
-0
1@@ -0,0 +1,34 @@
2+# cli scaffolding implementation plan
3+
4+This document outlines the plan for implementing the CLI scaffolding for the `zmx` tool, based on the `specs/cli.md` document.
5+
6+## 1. Add `zig-clap` Dependency
7+
8+- **Modify `build.zig.zon`**: Add `zig-clap` to the dependencies.
9+- **Modify `build.zig`**: Fetch the `zig-clap` module and make it available to the executable.
10+
11+## 2. Create `src/cli.zig`
12+
13+- Create a new file `src/cli.zig` to encapsulate all CLI-related logic.
14+
15+## 3. Define Commands in `src/cli.zig`
16+
17+- Use `zig-clap` to define the command structure specified in `specs/cli.md`.
18+- This includes the global options (`-h`, `-v`) and the subcommands:
19+ - `daemon`
20+ - `list`
21+ - `attach <session>`
22+ - `detach <session>`
23+ - `kill <session>`
24+- For each command, define the expected arguments and options.
25+
26+## 4. Integrate with `src/main.zig`
27+
28+- In `src/main.zig`, import the `cli` module.
29+- Call the CLI parsing logic from the `main` function.
30+- The `main` function will dispatch to the appropriate command handler based on the parsed arguments.
31+- When the `daemon` subcommand is invoked, the application will act as a long-running "server".
32+
33+## 5. Single Executable
34+
35+- The `build.zig` will define a single executable named `zmx`.
+56,
-0
1@@ -0,0 +1,56 @@
2+# zms daemon implementation plan
3+
4+This document outlines the plan for implementing the `zmx daemon` subcommand, based on the specifications in `specs/daemon.md` and `protocol.md`.
5+
6+## 1. Create `src/daemon.zig`
7+
8+- Create a new file `src/daemon.zig` to house the core logic for the daemon process.
9+
10+## 2. Implement Unix Socket Communication
11+
12+- In `src/daemon.zig`, create and bind a Unix domain socket based on the `--socket-path` option.
13+- Listen for and accept incoming client connections.
14+- Implement a message-passing system using `std.json` for serialization and deserialization, adhering to the `protocol.md` specification.
15+
16+## 3. Implement Session Management
17+
18+- Define a `Session` struct to manage the state of each PTY process. This struct will include:
19+ - The session name.
20+ - The file descriptor for the PTY.
21+ - A buffer for the terminal output (scrollback).
22+ - The terminal state, managed by `libghostty-vt`.
23+ - A list of connected client IDs.
24+- Use a `std.StringHashMap(Session)` to store and manage all active sessions.
25+
26+## 4. Implement PTY Management
27+
28+- Create a function to spawn a new PTY process using `forkpty`.
29+- This function will be called when a new, non-existent session is requested.
30+- Implement functions to read from and write to the PTY file descriptor.
31+
32+## 5. Implement the Main Event Loop
33+
34+- The core of the daemon will be an event loop (using `libxev` on Linux) that concurrently handles:
35+ 1. New client connections on the main Unix socket.
36+ 2. Incoming requests from connected clients.
37+ 3. Output from the PTY processes.
38+- This will allow the daemon to be single-threaded and highly concurrent.
39+
40+## 6. Implement Protocol Handlers
41+
42+- For each message type defined in `protocol.md`, create a handler function:
43+ - `handle_list_sessions_request`: Responds with a list of all active sessions.
44+ - `handle_attach_session_request`: Adds the client to the session's list of connected clients and sends them the scrollback buffer.
45+ - `handle_detach_session_request`: Removes the client from the session's list.
46+ - `handle_kill_session_request`: Terminates the PTY process and removes the session.
47+ - `handle_pty_input`: Writes the received data to the corresponding PTY.
48+- When there is output from a PTY, the daemon will create a `pty_output` message and send it to all attached clients.
49+
50+## 7. Integrate with `main.zig`
51+
52+- In `main.zig`, when the `daemon` subcommand is parsed, call the main entry point of the `daemon` module.
53+- Pass the parsed command-line options (e.g., `--socket-path`) to the daemon's initialization function.
54+
55+## 8. Do **NOT** Handle Daemonization
56+
57+This command will be run under a systemd unit file so it does not need to concern itself with daemonizing itself.
+106,
-0
1@@ -0,0 +1,106 @@
2+# zmx cli specification
3+
4+This document outlines the command-line interface for the `zmx` tool.
5+
6+## third-party libraries
7+
8+We will use the `zig-clap` library for parsing command-line arguments. It provides a robust and flexible way to define commands, subcommands, and flags.
9+
10+## command structure
11+
12+The `zmx` tool will follow a subcommand-based structure.
13+
14+```
15+zmx [command] [options]
16+```
17+
18+### global options
19+
20+- `-h`, `--help`: Display help information.
21+- `-v`, `--version`: Display the version of the tool.
22+
23+### commands
24+
25+#### `daemon`
26+
27+This is the background process that manages all the shell sessions (pty processes) that the client interacts with.
28+
29+**Usage:**
30+
31+```
32+zmx daemon
33+```
34+
35+**Arguments:**
36+
37+- `<socket>`: The location of the unix socket file. Clients connecting will also have to pass the same flag.
38+
39+#### `list`
40+
41+List all active sessions.
42+
43+**Usage:**
44+
45+```
46+zmx list
47+```
48+
49+**Output:**
50+
51+The `list` command will output a table with the following columns:
52+
53+- `SESSION`: The name of the session.
54+- `STATUS`: The status of the session (e.g., `attached`, `detached`).
55+- `CLIENTS`: The number of clients currently attached to the session.
56+- `CREATED_AT`: The date when the session was created
57+
58+---
59+
60+#### `attach`
61+
62+Attach to a session.
63+
64+**Usage:**
65+
66+```
67+zmx attach <session>
68+```
69+
70+**Arguments:**
71+
72+- `<session>`: The name of the session to attach to. This is a required argument.
73+- `<socket>`: The location of the unix socket file.
74+
75+---
76+
77+#### `detach`
78+
79+Detach from a session.
80+
81+**Usage:**
82+
83+```
84+zmx detach <session>
85+```
86+
87+**Arguments:**
88+
89+- `<session>`: The name of the session to detach from. This is a required argument.
90+- `<socket>`: The location of the unix socket file.
91+
92+---
93+
94+#### `kill`
95+
96+Kill a session.
97+
98+**Usage:**
99+
100+```
101+zmx kill <session>
102+```
103+
104+**Arguments:**
105+
106+- `<session>`: The name of the session to kill. This is a required argument.
107+- `<socket>`: The location of the unix socket file.
+46,
-0
1@@ -0,0 +1,46 @@
2+# zmx daemon specification
3+
4+This document outlines the specification for the `zmx daemon` subcommand, which runs the background process responsible for managing terminal sessions.
5+
6+## purpose
7+
8+The `zmx daemon` subcommand starts the long-running background process that manages all pseudo-terminal (PTY) processes. It acts as the central hub for session persistence, allowing clients to attach to and detach from active terminal sessions without terminating the underlying processes.
9+
10+## responsibilities
11+
12+The daemon is responsible for:
13+
14+1. **PTY Management**: Creating, managing, and destroying PTY processes using `fork` or `forkpty`.
15+2. **Session State Management**: Maintaining the terminal state and a buffer of text output for each active session. This ensures that when a client re-attaches, they see the previous output and the correct terminal state.
16+3. **Client Communication**: Facilitating communication between multiple `zmx` client instances and the managed PTY processes via a Unix socket.
17+4. **Session Lifecycle**: Handling the lifecycle of sessions, including creation, listing, attachment, detachment, and termination (killing).
18+5. **Resource Management**: Managing system resources associated with each session.
19+
20+## usage
21+
22+```
23+zmx daemon [options]
24+```
25+
26+## options
27+
28+- `-s`, `--socket-path <path>`: Specifies the path to the Unix socket for client-daemon communication. Defaults to a system-dependent location (e.g., `/tmp/zmx.sock`).
29+- `-b`, `--buffer-size <size>`: Sets the maximum size (in lines or bytes) for the session's scrollback buffer. Defaults to a reasonable value (e.g., 1000 lines).
30+- `-l`, `--log-level <level>`: Sets the logging level for the daemon (e.g., `debug`, `info`, `warn`, `error`). Defaults to `info`.
31+
32+## systemd integration
33+
34+The `zmx daemon` process is designed to be managed by `systemd`. A `systemd` unit file will be provided to ensure the daemon starts automatically on boot, restarts on failure, and logs its output appropriately.
35+
36+## communication protocol
37+
38+(To be defined in a separate `PROTOCOL.md` spec)
39+
40+The daemon will expose an API over the Unix socket to allow clients to:
41+
42+- List active sessions.
43+- Request attachment to a session.
44+- Send input to a session.
45+- Receive output from a session.
46+- Detach from a session.
47+- Kill a session.
+106,
-0
1@@ -0,0 +1,106 @@
2+# ZMX Client-Daemon Communication Protocol
3+
4+This document specifies the communication protocol between `zmx` clients and the `zmx daemon` over a Unix socket.
5+
6+## Transport
7+
8+The communication occurs over a Unix domain socket. The path to the socket is configurable via the `--socket-path` option of the `daemon` subcommand.
9+
10+## Serialization
11+
12+All messages are serialized using JSON. Each message is a JSON object, and messages are separated by a newline character (`\n`). This allows for simple streaming and parsing of messages.
13+
14+## Message Structure
15+
16+Each message is a JSON object with two top-level properties:
17+
18+- `type`: A string that identifies the type of the message (e.g., `list_sessions_request`).
19+- `payload`: A JSON object containing the message-specific data.
20+
21+### Requests
22+
23+Requests are sent from the client to the daemon.
24+
25+### Responses
26+
27+Responses are sent from the daemon to the client in response to a request. Every response will have a `status` field in its payload, which can be either `ok` or `error`. If the status is `error`, the payload will also contain an `error_message` field.
28+
29+## Message Types
30+
31+### `list_sessions`
32+
33+- **Direction**: Client -> Daemon
34+- **Request Type**: `list_sessions_request`
35+- **Request Payload**: (empty)
36+
37+- **Direction**: Daemon -> Client
38+- **Response Type**: `list_sessions_response`
39+- **Response Payload**:
40+ - `status`: `ok`
41+ - `sessions`: An array of session objects.
42+
43+**Session Object:**
44+
45+- `name`: string
46+- `status`: string (`attached` or `detached`)
47+- `clients`: number
48+- `created_at`: string (ISO 8601 format)
49+
50+### `attach_session`
51+
52+- **Direction**: Client -> Daemon
53+- **Request Type**: `attach_session_request`
54+- **Request Payload**:
55+ - `session_name`: string
56+
57+- **Direction**: Daemon -> Client
58+- **Response Type**: `attach_session_response`
59+- **Response Payload**:
60+ - `status`: `ok` or `error`
61+ - `error_message`: string (if status is `error`)
62+
63+### `detach_session`
64+
65+- **Direction**: Client -> Daemon
66+- **Request Type**: `detach_session_request`
67+- **Request Payload**:
68+ - `session_name`: string
69+
70+- **Direction**: Daemon -> Client
71+- **Response Type**: `detach_session_response`
72+- **Response Payload**:
73+ - `status`: `ok` or `error`
74+ - `error_message`: string (if status is `error`)
75+
76+### `kill_session`
77+
78+- **Direction**: Client -> Daemon
79+- **Request Type**: `kill_session_request`
80+- **Request Payload**:
81+ - `session_name`: string
82+
83+- **Direction**: Daemon -> Client
84+- **Response Type**: `kill_session_response`
85+- **Response Payload**:
86+ - `status`: `ok` or `error`
87+ - `error_message`: string (if status is `error`)
88+
89+### `pty_input`
90+
91+- **Direction**: Client -> Daemon
92+- **Request Type**: `pty_input`
93+- **Request Payload**:
94+ - `session_name`: string
95+ - `data`: string (base64 encoded)
96+
97+This message does not have a direct response. It is a fire-and-forget message from the client.
98+
99+### `pty_output`
100+
101+- **Direction**: Daemon -> Client
102+- **Request Type**: `pty_output`
103+- **Request Payload**:
104+ - `session_name`: string
105+ - `data`: string (base64 encoded)
106+
107+This message is sent from the daemon to an attached client whenever there is output from the PTY.
+25,
-0
1@@ -0,0 +1,25 @@
2+# zmx session restore specification
3+
4+This document outlines the specification how we are going to preserve session state so when a client reattaches to a session using `zmx reattach {session}` it will restore the session to its last state.
5+
6+## purpose
7+
8+The `zmx attach` subcommand starts re-attaches to a previously created session. When doing this we want to restore the session to its current state, displaying the last working screen text, layout, text wrapping, etc. This will include a configurable scrollback buffer size that will also be restored upon reattach.
9+
10+## technical details
11+
12+- The daemon spawns the shell on a pty master.
13+- Every byte the shell emits is parsed on-the-fly by the in-process terminal emulator (libghostty-vt).
14+- The emulator updates an internal 2-D cell grid (the “snapshot”) and forwards the same raw bytes to no-one while no client is attached.
15+- When a client is attached, the daemon also proxies those bytes straight to the client’s socket; the emulator runs in parallel only to keep the snapshot current.
16+- When you reattach, the daemon does not send the historic byte stream; instead it renders the current grid into a fresh ANSI sequence and ships that down the Unix-domain socket to the new shpool attach client.
17+- The client simply write()s that sequence to stdout—your local terminal sees it and redraws the screen instantly.
18+
19+So the emulator is not “between” client and daemon in the latency sense; it is alongside, maintaining state.
20+The only time it interposes is on re-attach: it briefly synthesizes a single frame so your local terminal can show the exact session image without having to replay minutes or hours of output.
21+
22+## using libghostty-vt
23+
24+- Feature superset: SIMD parsing, full Unicode grapheme clusters, Kitty graphics, sixel, and thousands of CSI/DEC/OSC commands already implemented and fuzz-tested
25+- Memory model: it hands you a read-only snapshot of the grid that you can memcpy straight into your re-attach logic—no allocator churn.
26+- No I/O policy: it is stateless by design; you feed it bytes when they arrive from the pty and later ask for the current screen.
+475,
-0
1@@ -0,0 +1,475 @@
2+const std = @import("std");
3+const posix = std.posix;
4+const xevg = @import("xev");
5+const xev = xevg.Dynamic;
6+const socket_path = "/tmp/zmx.sock";
7+
8+const c = @cImport({
9+ @cInclude("termios.h");
10+});
11+
12+const Context = struct {
13+ stream: xev.Stream,
14+ stdin_stream: xev.Stream,
15+ allocator: std.mem.Allocator,
16+ loop: *xev.Loop,
17+ session_name: []const u8,
18+ prefix_pressed: bool = false,
19+ should_exit: bool = false,
20+ stdin_completion: ?*xev.Completion = null,
21+ stdin_ctx: ?*StdinContext = null,
22+ read_completion: ?*xev.Completion = null,
23+ read_ctx: ?*ReadContext = null,
24+};
25+
26+pub fn main() !void {
27+ var gpa = std.heap.GeneralPurposeAllocator(.{}){};
28+ defer _ = gpa.deinit();
29+ const allocator = gpa.allocator();
30+
31+ // Get session name from command-line arguments
32+ const args = try std.process.argsAlloc(allocator);
33+ defer std.process.argsFree(allocator, args);
34+
35+ if (args.len < 3) {
36+ std.debug.print("Usage: zmx attach <session-name>\n", .{});
37+ std.process.exit(1);
38+ }
39+
40+ const session_name = args[2];
41+
42+ var thread_pool = xevg.ThreadPool.init(.{});
43+ defer thread_pool.deinit();
44+ defer thread_pool.shutdown();
45+
46+ var loop = try xev.Loop.init(.{ .thread_pool = &thread_pool });
47+ defer loop.deinit();
48+
49+ // Save original terminal settings and set raw mode
50+ var orig_termios: c.termios = undefined;
51+ _ = c.tcgetattr(posix.STDIN_FILENO, &orig_termios);
52+ defer _ = c.tcsetattr(posix.STDIN_FILENO, c.TCSANOW, &orig_termios);
53+
54+ var raw_termios = orig_termios;
55+ c.cfmakeraw(&raw_termios);
56+ _ = c.tcsetattr(posix.STDIN_FILENO, c.TCSANOW, &raw_termios);
57+
58+ var unix_addr = try std.net.Address.initUnix(socket_path);
59+ // AF.UNIX: Unix domain socket for local IPC with daemon process
60+ // SOCK.STREAM: Reliable, connection-oriented communication for protocol messages
61+ // SOCK.NONBLOCK: Prevents blocking to work with libxev's async event loop
62+ const socket_fd = try posix.socket(posix.AF.UNIX, posix.SOCK.STREAM | posix.SOCK.NONBLOCK, 0);
63+ try posix.connect(socket_fd, &unix_addr.any, unix_addr.getOsSockLen());
64+ const request = try std.fmt.allocPrint(
65+ allocator,
66+ "{{\"type\":\"attach_session_request\",\"payload\":{{\"session_name\":\"{s}\"}}}}\n",
67+ .{session_name},
68+ );
69+ defer allocator.free(request);
70+
71+ _ = posix.write(posix.STDERR_FILENO, "Attaching to session: ") catch {};
72+ _ = posix.write(posix.STDERR_FILENO, session_name) catch {};
73+ _ = posix.write(posix.STDERR_FILENO, "\n") catch {};
74+
75+ const ctx = try allocator.create(Context);
76+ ctx.* = .{
77+ .stream = xev.Stream.initFd(socket_fd),
78+ .stdin_stream = xev.Stream.initFd(posix.STDIN_FILENO),
79+ .allocator = allocator,
80+ .loop = &loop,
81+ .session_name = session_name,
82+ };
83+
84+ const write_completion = try allocator.create(xev.Completion);
85+ ctx.stream.write(&loop, write_completion, .{ .slice = request }, Context, ctx, writeCallback);
86+
87+ try loop.run(.until_done);
88+}
89+
90+fn writeCallback(
91+ ctx_opt: ?*Context,
92+ _: *xev.Loop,
93+ completion: *xev.Completion,
94+ _: xev.Stream,
95+ _: xev.WriteBuffer,
96+ write_result: xev.WriteError!usize,
97+) xev.CallbackAction {
98+ const ctx = ctx_opt.?;
99+ if (write_result) |_| {
100+ // Request sent successfully
101+ } else |err| {
102+ std.debug.print("write failed: {s}\n", .{@errorName(err)});
103+ return cleanup(ctx, completion);
104+ }
105+
106+ // Now read the response
107+ const read_ctx = ctx.allocator.create(ReadContext) catch @panic("failed to create read context");
108+ read_ctx.* = .{
109+ .ctx = ctx,
110+ .buffer = undefined,
111+ };
112+
113+ const read_completion = ctx.allocator.create(xev.Completion) catch @panic("failed to create completion");
114+
115+ // Track read completion and context for cleanup
116+ ctx.read_completion = read_completion;
117+ ctx.read_ctx = read_ctx;
118+
119+ ctx.stream.read(ctx.loop, read_completion, .{ .slice = &read_ctx.buffer }, ReadContext, read_ctx, readCallback);
120+
121+ ctx.allocator.destroy(completion);
122+ return .disarm;
123+}
124+
125+const ReadContext = struct {
126+ ctx: *Context,
127+ buffer: [4096]u8,
128+};
129+
130+fn readCallback(
131+ read_ctx_opt: ?*ReadContext,
132+ _: *xev.Loop,
133+ completion: *xev.Completion,
134+ _: xev.Stream,
135+ read_buffer: xev.ReadBuffer,
136+ read_result: xev.ReadError!usize,
137+) xev.CallbackAction {
138+ const read_ctx = read_ctx_opt.?;
139+ const ctx = read_ctx.ctx;
140+
141+ if (read_result) |len| {
142+ if (len == 0) {
143+ std.debug.print("Server closed connection\n", .{});
144+ return cleanup(ctx, completion);
145+ }
146+
147+ const data = read_buffer.slice[0..len];
148+
149+ // Find newline to get complete message
150+ const newline_idx = std.mem.indexOf(u8, data, "\n") orelse {
151+ // std.debug.print("No newline found in {d} bytes, waiting for more data\n", .{len});
152+ return .rearm;
153+ };
154+
155+ const msg_line = data[0..newline_idx];
156+ // std.debug.print("Parsing message ({d} bytes): {s}\n", .{msg_line.len, msg_line});
157+
158+ const parsed = std.json.parseFromSlice(
159+ std.json.Value,
160+ ctx.allocator,
161+ msg_line,
162+ .{},
163+ ) catch |err| {
164+ std.debug.print("JSON parse error: {s}\n", .{@errorName(err)});
165+ return .rearm;
166+ };
167+ defer parsed.deinit();
168+
169+ const root = parsed.value.object;
170+ const msg_type = root.get("type").?.string;
171+ const payload = root.get("payload").?.object;
172+
173+ if (std.mem.eql(u8, msg_type, "attach_session_response")) {
174+ const status = payload.get("status").?.string;
175+ if (std.mem.eql(u8, status, "ok")) {
176+ // Get client_fd from response
177+ const client_fd = payload.get("client_fd").?.integer;
178+
179+ // Write client_fd to a file so shell commands can read it
180+ const home_dir = posix.getenv("HOME") orelse "/tmp";
181+ const client_fd_path = std.fmt.allocPrint(
182+ ctx.allocator,
183+ "{s}/.zmx_client_fd_{s}",
184+ .{ home_dir, ctx.session_name },
185+ ) catch |err| {
186+ std.debug.print("Failed to create client_fd path: {s}\n", .{@errorName(err)});
187+ return .rearm;
188+ };
189+ defer ctx.allocator.free(client_fd_path);
190+
191+ const file = std.fs.cwd().createFile(client_fd_path, .{ .truncate = true }) catch |err| {
192+ std.debug.print("Failed to create client_fd file: {s}\n", .{@errorName(err)});
193+ return .rearm;
194+ };
195+ defer file.close();
196+
197+ const fd_str = std.fmt.allocPrint(ctx.allocator, "{d}", .{client_fd}) catch return .rearm;
198+ defer ctx.allocator.free(fd_str);
199+
200+ file.writeAll(fd_str) catch |err| {
201+ std.debug.print("Failed to write client_fd: {s}\n", .{@errorName(err)});
202+ return .rearm;
203+ };
204+
205+ startStdinReading(ctx);
206+ } else {
207+ _ = posix.write(posix.STDERR_FILENO, "Attach failed: ") catch {};
208+ _ = posix.write(posix.STDERR_FILENO, status) catch {};
209+ _ = posix.write(posix.STDERR_FILENO, "\n") catch {};
210+ }
211+ } else if (std.mem.eql(u8, msg_type, "detach_session_response")) {
212+ const status = payload.get("status").?.string;
213+ if (std.mem.eql(u8, status, "ok")) {
214+ cleanupClientFdFile(ctx);
215+ _ = posix.write(posix.STDERR_FILENO, "\r\nDetached from session\r\n") catch {};
216+ return cleanup(ctx, completion);
217+ }
218+ } else if (std.mem.eql(u8, msg_type, "detach_notification")) {
219+ cleanupClientFdFile(ctx);
220+ _ = posix.write(posix.STDERR_FILENO, "\r\nDetached from session (external request)\r\n") catch {};
221+ return cleanup(ctx, completion);
222+ } else if (std.mem.eql(u8, msg_type, "kill_notification")) {
223+ cleanupClientFdFile(ctx);
224+ _ = posix.write(posix.STDERR_FILENO, "\r\nSession killed\r\n") catch {};
225+ return cleanup(ctx, completion);
226+ } else if (std.mem.eql(u8, msg_type, "pty_out")) {
227+ const text = payload.get("text").?.string;
228+ _ = posix.write(posix.STDOUT_FILENO, text) catch {};
229+ } else {
230+ std.debug.print("Unknown message type: {s}\n", .{msg_type});
231+ }
232+
233+ return .rearm;
234+ } else |err| {
235+ std.debug.print("read failed: {s}\n", .{@errorName(err)});
236+ }
237+
238+ ctx.allocator.destroy(read_ctx);
239+ return cleanup(ctx, completion);
240+}
241+
242+fn startStdinReading(ctx: *Context) void {
243+ const stdin_ctx = ctx.allocator.create(StdinContext) catch @panic("failed to create stdin context");
244+ stdin_ctx.* = .{
245+ .ctx = ctx,
246+ .buffer = undefined,
247+ };
248+
249+ const stdin_completion = ctx.allocator.create(xev.Completion) catch @panic("failed to create completion");
250+
251+ // Track stdin completion and context for cleanup
252+ ctx.stdin_completion = stdin_completion;
253+ ctx.stdin_ctx = stdin_ctx;
254+
255+ ctx.stdin_stream.read(ctx.loop, stdin_completion, .{ .slice = &stdin_ctx.buffer }, StdinContext, stdin_ctx, stdinReadCallback);
256+}
257+
258+const StdinContext = struct {
259+ ctx: *Context,
260+ buffer: [4096]u8,
261+};
262+
263+fn cleanupClientFdFile(ctx: *Context) void {
264+ const home_dir = posix.getenv("HOME") orelse "/tmp";
265+ const client_fd_path = std.fmt.allocPrint(
266+ ctx.allocator,
267+ "{s}/.zmx_client_fd_{s}",
268+ .{ home_dir, ctx.session_name },
269+ ) catch return;
270+ defer ctx.allocator.free(client_fd_path);
271+
272+ std.fs.cwd().deleteFile(client_fd_path) catch {};
273+}
274+
275+fn sendDetachRequest(ctx: *Context) void {
276+ const request = std.fmt.allocPrint(
277+ ctx.allocator,
278+ "{{\"type\":\"detach_session_request\",\"payload\":{{\"session_name\":\"{s}\"}}}}\n",
279+ .{ctx.session_name},
280+ ) catch return;
281+ defer ctx.allocator.free(request);
282+
283+ const write_ctx = ctx.allocator.create(StdinWriteContext) catch return;
284+ write_ctx.* = .{
285+ .allocator = ctx.allocator,
286+ .message = ctx.allocator.dupe(u8, request) catch return,
287+ };
288+
289+ const write_completion = ctx.allocator.create(xev.Completion) catch return;
290+ ctx.stream.write(ctx.loop, write_completion, .{ .slice = write_ctx.message }, StdinWriteContext, write_ctx, stdinWriteCallback);
291+}
292+
293+fn sendPtyInput(ctx: *Context, data: []const u8) void {
294+ var msg_buf = std.ArrayList(u8).initCapacity(ctx.allocator, 4096) catch return;
295+ defer msg_buf.deinit(ctx.allocator);
296+
297+ msg_buf.appendSlice(ctx.allocator, "{\"type\":\"pty_in\",\"payload\":{\"text\":\"") catch return;
298+
299+ for (data) |byte| {
300+ switch (byte) {
301+ '"' => msg_buf.appendSlice(ctx.allocator, "\\\"") catch return,
302+ '\\' => msg_buf.appendSlice(ctx.allocator, "\\\\") catch return,
303+ '\n' => msg_buf.appendSlice(ctx.allocator, "\\n") catch return,
304+ '\r' => msg_buf.appendSlice(ctx.allocator, "\\r") catch return,
305+ '\t' => msg_buf.appendSlice(ctx.allocator, "\\t") catch return,
306+ 0x08 => msg_buf.appendSlice(ctx.allocator, "\\b") catch return,
307+ 0x0C => msg_buf.appendSlice(ctx.allocator, "\\f") catch return,
308+ 0x00...0x07, 0x0B, 0x0E...0x1F, 0x7F => {
309+ const escaped = std.fmt.allocPrint(ctx.allocator, "\\u{x:0>4}", .{byte}) catch return;
310+ defer ctx.allocator.free(escaped);
311+ msg_buf.appendSlice(ctx.allocator, escaped) catch return;
312+ },
313+ else => msg_buf.append(ctx.allocator, byte) catch return,
314+ }
315+ }
316+
317+ msg_buf.appendSlice(ctx.allocator, "\"}}\n") catch return;
318+
319+ const owned_message = ctx.allocator.dupe(u8, msg_buf.items) catch return;
320+
321+ const write_ctx = ctx.allocator.create(StdinWriteContext) catch return;
322+ write_ctx.* = .{
323+ .allocator = ctx.allocator,
324+ .message = owned_message,
325+ };
326+
327+ const write_completion = ctx.allocator.create(xev.Completion) catch return;
328+ ctx.stream.write(ctx.loop, write_completion, .{ .slice = owned_message }, StdinWriteContext, write_ctx, stdinWriteCallback);
329+}
330+
331+const StdinWriteContext = struct {
332+ allocator: std.mem.Allocator,
333+ message: []u8,
334+};
335+
336+fn stdinReadCallback(
337+ stdin_ctx_opt: ?*StdinContext,
338+ _: *xev.Loop,
339+ completion: *xev.Completion,
340+ _: xev.Stream,
341+ read_buffer: xev.ReadBuffer,
342+ read_result: xev.ReadError!usize,
343+) xev.CallbackAction {
344+ const stdin_ctx = stdin_ctx_opt.?;
345+ const ctx = stdin_ctx.ctx;
346+
347+ if (read_result) |len| {
348+ if (len == 0) {
349+ std.debug.print("stdin closed\n", .{});
350+ ctx.stdin_completion = null;
351+ ctx.stdin_ctx = null;
352+ ctx.allocator.destroy(stdin_ctx);
353+ ctx.allocator.destroy(completion);
354+ return .disarm;
355+ }
356+
357+ const data = read_buffer.slice[0..len];
358+
359+ // Detect Ctrl-b (0x02) as prefix for detach command
360+ if (len == 1 and data[0] == 0x02) {
361+ ctx.prefix_pressed = true;
362+ return .rearm;
363+ }
364+
365+ // If prefix was pressed and now we got 'd', detach
366+ if (ctx.prefix_pressed and len == 1 and data[0] == 'd') {
367+ ctx.prefix_pressed = false;
368+ sendDetachRequest(ctx);
369+ return .rearm;
370+ }
371+
372+ // If prefix was pressed but we got something else, send the prefix and the new data
373+ if (ctx.prefix_pressed) {
374+ ctx.prefix_pressed = false;
375+ // Send the Ctrl-b that was buffered
376+ const prefix_data = [_]u8{0x02};
377+ sendPtyInput(ctx, &prefix_data);
378+ // Fall through to send the current data
379+ }
380+
381+ sendPtyInput(ctx, data);
382+
383+ return .rearm;
384+ } else |err| {
385+ std.debug.print("stdin read failed: {s}\n", .{@errorName(err)});
386+ ctx.stdin_completion = null;
387+ ctx.stdin_ctx = null;
388+ ctx.allocator.destroy(stdin_ctx);
389+ ctx.allocator.destroy(completion);
390+ return .disarm;
391+ }
392+}
393+
394+fn stdinWriteCallback(
395+ write_ctx_opt: ?*StdinWriteContext,
396+ _: *xev.Loop,
397+ completion: *xev.Completion,
398+ _: xev.Stream,
399+ _: xev.WriteBuffer,
400+ write_result: xev.WriteError!usize,
401+) xev.CallbackAction {
402+ const write_ctx = write_ctx_opt.?;
403+
404+ if (write_result) |_| {
405+ // Successfully sent stdin to daemon
406+ } else |err| {
407+ std.debug.print("Failed to send stdin to daemon: {s}\n", .{@errorName(err)});
408+ }
409+
410+ // Clean up - save allocator before destroying write_ctx
411+ const allocator = write_ctx.allocator;
412+ allocator.free(write_ctx.message);
413+ allocator.destroy(write_ctx);
414+ allocator.destroy(completion);
415+ return .disarm;
416+}
417+
418+fn cleanup(ctx: *Context, completion: *xev.Completion) xev.CallbackAction {
419+ // Track whether we've freed the passed completion
420+ var completion_freed = false;
421+
422+ // Clean up stdin completion and context if they exist
423+ if (ctx.stdin_completion) |stdin_completion| {
424+ if (stdin_completion == completion) {
425+ completion_freed = true;
426+ }
427+ ctx.allocator.destroy(stdin_completion);
428+ ctx.stdin_completion = null;
429+ }
430+ if (ctx.stdin_ctx) |stdin_ctx| {
431+ ctx.allocator.destroy(stdin_ctx);
432+ ctx.stdin_ctx = null;
433+ }
434+
435+ // Clean up read completion and context if they exist
436+ if (ctx.read_completion) |read_completion| {
437+ if (read_completion == completion) {
438+ completion_freed = true;
439+ }
440+ ctx.allocator.destroy(read_completion);
441+ ctx.read_completion = null;
442+ }
443+ if (ctx.read_ctx) |read_ctx| {
444+ ctx.allocator.destroy(read_ctx);
445+ ctx.read_ctx = null;
446+ }
447+
448+ const close_completion = ctx.allocator.create(xev.Completion) catch @panic("failed to create completion");
449+ ctx.stream.close(ctx.loop, close_completion, Context, ctx, closeCallback);
450+
451+ // Only destroy completion if we haven't already freed it above
452+ if (!completion_freed) {
453+ ctx.allocator.destroy(completion);
454+ }
455+
456+ return .disarm;
457+}
458+
459+fn closeCallback(
460+ ctx_opt: ?*Context,
461+ loop: *xev.Loop,
462+ completion: *xev.Completion,
463+ _: xev.Stream,
464+ close_result: xev.CloseError!void,
465+) xev.CallbackAction {
466+ const ctx = ctx_opt.?;
467+ if (close_result) |_| {
468+ std.debug.print("Connection closed\n", .{});
469+ } else |err| {
470+ std.debug.print("close failed: {s}\n", .{@errorName(err)});
471+ }
472+ ctx.allocator.destroy(completion);
473+ ctx.allocator.destroy(ctx);
474+ loop.stop();
475+ return .disarm;
476+}
+55,
-0
1@@ -0,0 +1,55 @@
2+const std = @import("std");
3+const clap = @import("clap");
4+
5+const SubCommands = enum {
6+ help,
7+ daemon,
8+ list,
9+ attach,
10+ detach,
11+ kill,
12+};
13+
14+const main_parsers = .{
15+ .command = clap.parsers.enumeration(SubCommands),
16+};
17+
18+// The parameters for `main`. Parameters for the subcommands are specified further down.
19+const main_params = clap.parseParamsComptime(
20+ \\-h, --help Display this help message and exit
21+ \\-v, --version Display version information and exit
22+ \\<command>
23+ \\
24+);
25+
26+// To pass around arguments returned by clap, `clap.Result` and `clap.ResultEx` can be used to
27+// get the return type of `clap.parse` and `clap.parseEx`.
28+const MainArgs = clap.ResultEx(clap.Help, &main_params, main_parsers);
29+
30+pub fn help() !void {
31+ var buf: [1024]u8 = undefined;
32+ var stderr_writer = std.fs.File.stderr().writer(&buf);
33+ const writer: *std.Io.Writer = &stderr_writer.interface;
34+ try clap.help(writer, clap.Help, &main_params, .{});
35+}
36+
37+pub fn parse(gpa: std.mem.Allocator, iter: *std.process.ArgIterator) !MainArgs {
38+ _ = iter.next();
39+
40+ var diag = clap.Diagnostic{};
41+ const res = clap.parseEx(clap.Help, &main_params, main_parsers, iter, .{
42+ .diagnostic = &diag,
43+ .allocator = gpa,
44+
45+ // Terminate the parsing of arguments after parsing the first positional (0 is passed
46+ // here because parsed positionals are, like slices and arrays, indexed starting at 0).
47+ //
48+ // This will terminate the parsing after parsing the subcommand enum and leave `iter`
49+ // not fully consumed. It can then be reused to parse the arguments for subcommands.
50+ .terminating_positional = 0,
51+ }) catch |err| {
52+ return err;
53+ };
54+
55+ return res;
56+}
+733,
-0
1@@ -0,0 +1,733 @@
2+const std = @import("std");
3+const posix = std.posix;
4+const xevg = @import("xev");
5+const xev = xevg.Dynamic;
6+const socket_path = "/tmp/zmx.sock";
7+
8+const c = @cImport({
9+ @cInclude("pty.h");
10+ @cInclude("utmp.h");
11+ @cInclude("stdlib.h");
12+});
13+
14+// Generic JSON message structure used for parsing incoming protocol messages from clients
15+const Message = struct {
16+ type: []const u8,
17+ payload: std.json.Value,
18+};
19+
20+// Request payload for attaching to a session
21+const AttachRequest = struct {
22+ session_name: []const u8,
23+};
24+
25+// A PTY session that manages a persistent shell process
26+// Stores the PTY master file descriptor, shell process PID, scrollback buffer,
27+// and a read buffer for async I/O with libxev
28+const Session = struct {
29+ name: []const u8,
30+ pty_master_fd: std.posix.fd_t,
31+ buffer: std.ArrayList(u8),
32+ child_pid: std.posix.pid_t,
33+ allocator: std.mem.Allocator,
34+ pty_read_buffer: [4096]u8,
35+ created_at: i64,
36+
37+ fn deinit(self: *Session) void {
38+ self.allocator.free(self.name);
39+ self.buffer.deinit(self.allocator);
40+ }
41+};
42+
43+// A connected client that communicates with the daemon over a Unix socket
44+// Tracks the client's file descriptor, async stream for I/O, read buffer,
45+// and which session (if any) the client is currently attached to
46+const Client = struct {
47+ fd: std.posix.fd_t,
48+ stream: xev.Stream,
49+ read_buffer: [4096]u8,
50+ allocator: std.mem.Allocator,
51+ attached_session: ?[]const u8,
52+ server_ctx: *ServerContext,
53+};
54+
55+// Main daemon server state that manages the event loop, Unix socket server,
56+// all active client connections, and all persistent PTY sessions
57+const ServerContext = struct {
58+ loop: *xev.Loop,
59+ server_fd: std.posix.fd_t,
60+ accept_completion: xev.Completion,
61+ clients: std.AutoHashMap(std.posix.fd_t, *Client),
62+ sessions: std.StringHashMap(*Session),
63+ allocator: std.mem.Allocator,
64+};
65+
66+pub fn main() !void {
67+ var gpa = std.heap.GeneralPurposeAllocator(.{}){};
68+ defer _ = gpa.deinit();
69+ const allocator = gpa.allocator();
70+
71+ var thread_pool = xevg.ThreadPool.init(.{});
72+ defer thread_pool.deinit();
73+ defer thread_pool.shutdown();
74+
75+ var loop = try xev.Loop.init(.{ .thread_pool = &thread_pool });
76+ defer loop.deinit();
77+
78+ std.debug.print("zmx daemon starting...\n", .{});
79+
80+ _ = std.fs.cwd().deleteFile(socket_path) catch {};
81+
82+ // AF.UNIX: Unix domain socket for local IPC with client processes
83+ // SOCK.STREAM: Reliable, bidirectional communication for JSON protocol messages
84+ // SOCK.NONBLOCK: Prevents blocking to work with libxev's async event loop
85+ const server_fd = try posix.socket(posix.AF.UNIX, posix.SOCK.STREAM | posix.SOCK.NONBLOCK, 0);
86+ defer posix.close(server_fd);
87+
88+ var unix_addr = std.net.Address.initUnix(socket_path) catch |err| {
89+ std.debug.print("initUnix failed: {s}\n", .{@errorName(err)});
90+ return err;
91+ };
92+ try posix.bind(server_fd, &unix_addr.any, unix_addr.getOsSockLen());
93+ try posix.listen(server_fd, 128);
94+
95+ var server_stream = xev.Stream.initFd(server_fd);
96+ var server_context = ServerContext{
97+ .loop = &loop,
98+ .server_fd = server_fd,
99+ .accept_completion = .{},
100+ .clients = std.AutoHashMap(std.posix.fd_t, *Client).init(allocator),
101+ .sessions = std.StringHashMap(*Session).init(allocator),
102+ .allocator = allocator,
103+ };
104+ defer server_context.clients.deinit();
105+ defer {
106+ var it = server_context.sessions.valueIterator();
107+ while (it.next()) |session| {
108+ session.*.deinit();
109+ allocator.destroy(session.*);
110+ }
111+ server_context.sessions.deinit();
112+ }
113+
114+ server_stream.poll(
115+ &loop,
116+ &server_context.accept_completion,
117+ .read,
118+ ServerContext,
119+ &server_context,
120+ acceptCallback,
121+ );
122+
123+ try loop.run(.until_done);
124+}
125+
126+fn acceptCallback(
127+ ctx_opt: ?*ServerContext,
128+ _: *xev.Loop,
129+ _: *xev.Completion,
130+ _: xev.Stream,
131+ poll_result: xev.PollError!xev.PollEvent,
132+) xev.CallbackAction {
133+ const ctx = ctx_opt.?;
134+ if (poll_result) |_| {
135+ while (true) {
136+ // SOCK.CLOEXEC: Close socket on exec to prevent child PTY processes from inheriting client connections
137+ // SOCK.NONBLOCK: Make client socket non-blocking for async I/O
138+ const client_fd = posix.accept(ctx.server_fd, null, null, posix.SOCK.CLOEXEC | posix.SOCK.NONBLOCK) catch |err| {
139+ if (err == error.WouldBlock) {
140+ // No more pending connections
141+ break;
142+ }
143+ std.debug.print("accept failed: {s}\n", .{@errorName(err)});
144+ return .disarm; // Stop polling on error
145+ };
146+
147+ const client = ctx.allocator.create(Client) catch @panic("failed to create client");
148+ client.* = .{
149+ .fd = client_fd,
150+ .stream = xev.Stream.initFd(client_fd),
151+ .read_buffer = undefined,
152+ .allocator = ctx.allocator,
153+ .attached_session = null,
154+ .server_ctx = ctx,
155+ };
156+
157+ ctx.clients.put(client_fd, client) catch @panic("failed to add client");
158+ std.debug.print("new client connected fd={d}\n", .{client_fd});
159+
160+ const read_completion = ctx.allocator.create(xev.Completion) catch @panic("failed to create completion");
161+ client.stream.read(ctx.loop, read_completion, .{ .slice = &client.read_buffer }, Client, client, readCallback);
162+ }
163+ } else |err| {
164+ std.debug.print("poll failed: {s}\n", .{@errorName(err)});
165+ }
166+
167+ // Re-arm the poll
168+ return .rearm;
169+}
170+
171+fn readCallback(
172+ client_opt: ?*Client,
173+ loop: *xev.Loop,
174+ completion: *xev.Completion,
175+ _: xev.Stream,
176+ read_buffer: xev.ReadBuffer,
177+ read_result: xev.ReadError!usize,
178+) xev.CallbackAction {
179+ _ = loop;
180+ const client = client_opt.?;
181+ if (read_result) |len| {
182+ if (len == 0) {
183+ return closeClient(client, completion);
184+ }
185+ const data = read_buffer.slice[0..len];
186+ handleMessage(client, data) catch |err| {
187+ std.debug.print("handleMessage failed: {s}\n", .{@errorName(err)});
188+ return closeClient(client, completion);
189+ };
190+
191+ return .rearm;
192+ } else |err| {
193+ if (err == error.EndOfStream or err == error.EOF) {
194+ return closeClient(client, completion);
195+ }
196+ std.debug.print("read failed: {s}\n", .{@errorName(err)});
197+ return closeClient(client, completion);
198+ }
199+}
200+
201+fn handleMessage(client: *Client, data: []const u8) !void {
202+ std.debug.print("Received message from client fd={d}: {s}", .{ client.fd, data });
203+
204+ // Parse JSON message
205+ const parsed = try std.json.parseFromSlice(
206+ std.json.Value,
207+ client.allocator,
208+ data,
209+ .{},
210+ );
211+ defer parsed.deinit();
212+
213+ const root = parsed.value.object;
214+ const msg_type = root.get("type").?.string;
215+ const payload = root.get("payload").?.object;
216+
217+ if (std.mem.eql(u8, msg_type, "attach_session_request")) {
218+ const session_name = payload.get("session_name").?.string;
219+ std.debug.print("Handling attach request for session: {s}\n", .{session_name});
220+ try handleAttachSession(client.server_ctx, client, session_name);
221+ } else if (std.mem.eql(u8, msg_type, "detach_session_request")) {
222+ const session_name = payload.get("session_name").?.string;
223+ const target_client_fd = if (payload.get("client_fd")) |fd_value| fd_value.integer else null;
224+ std.debug.print("Handling detach request for session: {s}, target_fd: {any}\n", .{ session_name, target_client_fd });
225+ try handleDetachSession(client, session_name, target_client_fd);
226+ } else if (std.mem.eql(u8, msg_type, "kill_session_request")) {
227+ const session_name = payload.get("session_name").?.string;
228+ std.debug.print("Handling kill request for session: {s}\n", .{session_name});
229+ try handleKillSession(client, session_name);
230+ } else if (std.mem.eql(u8, msg_type, "list_sessions_request")) {
231+ std.debug.print("Handling list sessions request\n", .{});
232+ try handleListSessions(client.server_ctx, client);
233+ } else if (std.mem.eql(u8, msg_type, "pty_in")) {
234+ const text = payload.get("text").?.string;
235+ try handlePtyInput(client, text);
236+ } else {
237+ std.debug.print("Unknown message type: {s}\n", .{msg_type});
238+ }
239+}
240+
241+fn handleDetachSession(client: *Client, session_name: []const u8, target_client_fd: ?i64) !void {
242+ const ctx = client.server_ctx;
243+
244+ // Check if the session exists
245+ if (!ctx.sessions.contains(session_name)) {
246+ const error_response = try std.fmt.allocPrint(
247+ client.allocator,
248+ "{{\"type\":\"detach_session_response\",\"payload\":{{\"status\":\"error\",\"error_message\":\"Session not found: {s}\"}}}}\n",
249+ .{session_name},
250+ );
251+ defer client.allocator.free(error_response);
252+
253+ _ = posix.write(client.fd, error_response) catch |err| {
254+ std.debug.print("Error writing to fd={d}: {s}\n", .{ client.fd, @errorName(err) });
255+ return err;
256+ };
257+ return;
258+ }
259+
260+ // If target_client_fd is provided, find and detach that specific client
261+ if (target_client_fd) |target_fd| {
262+ const target_fd_cast: std.posix.fd_t = @intCast(target_fd);
263+ if (ctx.clients.get(target_fd_cast)) |target_client| {
264+ if (target_client.attached_session) |attached| {
265+ if (std.mem.eql(u8, attached, session_name)) {
266+ target_client.attached_session = null;
267+
268+ // Send notification to the target client
269+ const notification = "{\"type\":\"detach_notification\",\"payload\":{\"status\":\"ok\"}}\n";
270+ _ = posix.write(target_client.fd, notification) catch |err| {
271+ std.debug.print("Error notifying client fd={d}: {s}\n", .{ target_client.fd, @errorName(err) });
272+ };
273+
274+ // Send response to the requesting client
275+ const response = "{\"type\":\"detach_session_response\",\"payload\":{\"status\":\"ok\"}}\n";
276+ std.debug.print("Detached client fd={d} from session: {s}\n", .{ target_fd_cast, session_name });
277+
278+ _ = posix.write(client.fd, response) catch |err| {
279+ std.debug.print("Error writing to fd={d}: {s}\n", .{ client.fd, @errorName(err) });
280+ return err;
281+ };
282+ return;
283+ } else {
284+ const error_response = try std.fmt.allocPrint(
285+ client.allocator,
286+ "{{\"type\":\"detach_session_response\",\"payload\":{{\"status\":\"error\",\"error_message\":\"Target client not attached to session: {s}\"}}}}\n",
287+ .{session_name},
288+ );
289+ defer client.allocator.free(error_response);
290+
291+ _ = posix.write(client.fd, error_response) catch |err| {
292+ std.debug.print("Error writing to fd={d}: {s}\n", .{ client.fd, @errorName(err) });
293+ return err;
294+ };
295+ return;
296+ }
297+ }
298+ }
299+
300+ const error_response = try std.fmt.allocPrint(
301+ client.allocator,
302+ "{{\"type\":\"detach_session_response\",\"payload\":{{\"status\":\"error\",\"error_message\":\"Target client fd={d} not found\"}}}}\n",
303+ .{target_fd},
304+ );
305+ defer client.allocator.free(error_response);
306+
307+ _ = posix.write(client.fd, error_response) catch |err| {
308+ std.debug.print("Error writing to fd={d}: {s}\n", .{ client.fd, @errorName(err) });
309+ return err;
310+ };
311+ return;
312+ }
313+
314+ // No target_client_fd provided, check if requesting client is attached
315+ if (client.attached_session) |attached| {
316+ if (!std.mem.eql(u8, attached, session_name)) {
317+ const error_response = try std.fmt.allocPrint(
318+ client.allocator,
319+ "{{\"type\":\"detach_session_response\",\"payload\":{{\"status\":\"error\",\"error_message\":\"Not attached to session: {s}\"}}}}\n",
320+ .{session_name},
321+ );
322+ defer client.allocator.free(error_response);
323+
324+ _ = posix.write(client.fd, error_response) catch |err| {
325+ std.debug.print("Error writing to fd={d}: {s}\n", .{ client.fd, @errorName(err) });
326+ return err;
327+ };
328+ return;
329+ }
330+
331+ client.attached_session = null;
332+ const response = "{\"type\":\"detach_session_response\",\"payload\":{\"status\":\"ok\"}}\n";
333+ std.debug.print("Sending detach response to client fd={d}: {s}", .{ client.fd, response });
334+
335+ _ = posix.write(client.fd, response) catch |err| {
336+ std.debug.print("Error writing to fd={d}: {s}\n", .{ client.fd, @errorName(err) });
337+ return err;
338+ };
339+ } else {
340+ const error_response = "{\"type\":\"detach_session_response\",\"payload\":{\"status\":\"error\",\"error_message\":\"Not attached to any session\"}}\n";
341+ _ = posix.write(client.fd, error_response) catch |err| {
342+ std.debug.print("Error writing to fd={d}: {s}\n", .{ client.fd, @errorName(err) });
343+ return err;
344+ };
345+ }
346+}
347+
348+fn handleKillSession(client: *Client, session_name: []const u8) !void {
349+ const ctx = client.server_ctx;
350+
351+ // Check if the session exists
352+ const session = ctx.sessions.get(session_name) orelse {
353+ const error_response = try std.fmt.allocPrint(
354+ client.allocator,
355+ "{{\"type\":\"kill_session_response\",\"payload\":{{\"status\":\"error\",\"error_message\":\"Session not found: {s}\"}}}}\n",
356+ .{session_name},
357+ );
358+ defer client.allocator.free(error_response);
359+
360+ _ = posix.write(client.fd, error_response) catch |err| {
361+ std.debug.print("Error writing to fd={d}: {s}\n", .{ client.fd, @errorName(err) });
362+ return err;
363+ };
364+ return;
365+ };
366+
367+ // Notify all attached clients to exit
368+ var client_it = ctx.clients.iterator();
369+ while (client_it.next()) |entry| {
370+ const attached_client = entry.value_ptr.*;
371+ if (attached_client.attached_session) |attached| {
372+ if (std.mem.eql(u8, attached, session_name)) {
373+ attached_client.attached_session = null;
374+
375+ // Send kill notification to client
376+ const notification = "{\"type\":\"kill_notification\",\"payload\":{\"status\":\"ok\"}}\n";
377+ _ = posix.write(attached_client.fd, notification) catch |err| {
378+ std.debug.print("Error notifying client fd={d}: {s}\n", .{ attached_client.fd, @errorName(err) });
379+ };
380+ }
381+ }
382+ }
383+
384+ // Kill the PTY process
385+ const kill_result = posix.kill(session.child_pid, posix.SIG.TERM);
386+ if (kill_result) |_| {
387+ std.debug.print("Sent SIGTERM to PID {d}\n", .{session.child_pid});
388+ } else |err| {
389+ std.debug.print("Error killing PID {d}: {s}\n", .{ session.child_pid, @errorName(err) });
390+ }
391+
392+ // Close PTY master fd
393+ posix.close(session.pty_master_fd);
394+
395+ // Remove from sessions map BEFORE cleaning up (session.deinit frees session.name)
396+ _ = ctx.sessions.remove(session_name);
397+
398+ // Clean up session
399+ session.deinit();
400+ ctx.allocator.destroy(session);
401+
402+ // Send response to requesting client
403+ const response = "{\"type\":\"kill_session_response\",\"payload\":{\"status\":\"ok\"}}\n";
404+ std.debug.print("Killed session: {s}\n", .{session_name});
405+
406+ _ = posix.write(client.fd, response) catch |err| {
407+ std.debug.print("Error writing to fd={d}: {s}\n", .{ client.fd, @errorName(err) });
408+ return err;
409+ };
410+}
411+
412+fn handleListSessions(ctx: *ServerContext, client: *Client) !void {
413+ var response = try std.ArrayList(u8).initCapacity(client.allocator, 1024);
414+ defer response.deinit(client.allocator);
415+
416+ try response.appendSlice(client.allocator, "{\"type\":\"list_sessions_response\",\"payload\":{\"status\":\"ok\",\"sessions\":[");
417+
418+ var it = ctx.sessions.iterator();
419+ var first = true;
420+ while (it.next()) |entry| {
421+ const session = entry.value_ptr.*;
422+
423+ if (!first) {
424+ try response.append(client.allocator, ',');
425+ }
426+ first = false;
427+
428+ var clients_count: i64 = 0;
429+ var client_it = ctx.clients.iterator();
430+ while (client_it.next()) |client_entry| {
431+ const attached_client = client_entry.value_ptr.*;
432+ if (attached_client.attached_session) |attached| {
433+ if (std.mem.eql(u8, attached, session.name)) {
434+ clients_count += 1;
435+ }
436+ }
437+ }
438+
439+ const status = if (clients_count > 0) "attached" else "detached";
440+
441+ const epoch_seconds = std.time.epoch.EpochSeconds{ .secs = @intCast(session.created_at) };
442+ const day_seconds = epoch_seconds.getDaySeconds();
443+ const year_day = epoch_seconds.getEpochDay().calculateYearDay();
444+ const month_day = year_day.calculateMonthDay();
445+
446+ const session_json = try std.fmt.allocPrint(
447+ client.allocator,
448+ "{{\"name\":\"{s}\",\"status\":\"{s}\",\"clients\":{d},\"created_at\":\"{d:0>4}-{d:0>2}-{d:0>2}T{d:0>2}:{d:0>2}:{d:0>2}Z\"}}",
449+ .{ session.name, status, clients_count, year_day.year, month_day.month.numeric(), month_day.day_index + 1, day_seconds.getHoursIntoDay(), day_seconds.getMinutesIntoHour(), day_seconds.getSecondsIntoMinute() },
450+ );
451+ defer client.allocator.free(session_json);
452+
453+ try response.appendSlice(client.allocator, session_json);
454+ }
455+
456+ try response.appendSlice(client.allocator, "]}}\n");
457+
458+ std.debug.print("Sending list response to client fd={d}: {s}", .{ client.fd, response.items });
459+
460+ const written = posix.write(client.fd, response.items) catch |err| {
461+ std.debug.print("Error writing to fd={d}: {s}\n", .{ client.fd, @errorName(err) });
462+ return err;
463+ };
464+ _ = written;
465+}
466+
467+fn handleAttachSession(ctx: *ServerContext, client: *Client, session_name: []const u8) !void {
468+ // Check if session already exists
469+ if (ctx.sessions.get(session_name)) |session| {
470+ std.debug.print("Attaching to existing session: {s}\n", .{session_name});
471+ client.attached_session = session.name;
472+ try readFromPty(ctx, client, session);
473+ // TODO: Send scrollback buffer to client
474+ return;
475+ }
476+
477+ // Create new session with forkpty
478+ std.debug.print("Creating new session: {s}\n", .{session_name});
479+ const session = try createSession(ctx.allocator, session_name);
480+ try ctx.sessions.put(session.name, session);
481+ client.attached_session = session.name;
482+ try readFromPty(ctx, client, session);
483+}
484+
485+fn handlePtyInput(client: *Client, text: []const u8) !void {
486+ const session_name = client.attached_session orelse {
487+ std.debug.print("Client fd={d} not attached to any session\n", .{client.fd});
488+ return error.NotAttached;
489+ };
490+
491+ const session = client.server_ctx.sessions.get(session_name) orelse {
492+ std.debug.print("Session {s} not found\n", .{session_name});
493+ return error.SessionNotFound;
494+ };
495+
496+ std.debug.print("Writing {d} bytes to PTY fd={d}\n", .{ text.len, session.pty_master_fd });
497+
498+ // Write input to PTY master fd
499+ const written = posix.write(session.pty_master_fd, text) catch |err| {
500+ std.debug.print("Error writing to PTY: {s}\n", .{@errorName(err)});
501+ return err;
502+ };
503+ _ = written;
504+}
505+
506+fn readFromPty(ctx: *ServerContext, client: *Client, session: *Session) !void {
507+ _ = ctx;
508+ const stream = xev.Stream.initFd(session.pty_master_fd);
509+ const read_compl = client.allocator.create(xev.Completion) catch @panic("failed to create completion");
510+ stream.read(
511+ client.server_ctx.loop,
512+ read_compl,
513+ .{ .slice = &session.pty_read_buffer },
514+ Client,
515+ client,
516+ readPtyCallback,
517+ );
518+
519+ const response = try std.fmt.allocPrint(
520+ client.allocator,
521+ "{{\"type\":\"attach_session_response\",\"payload\":{{\"status\":\"ok\",\"client_fd\":{d}}}}}\n",
522+ .{client.fd},
523+ );
524+ defer client.allocator.free(response);
525+
526+ std.debug.print("Sending response to client fd={d}: {s}", .{ client.fd, response });
527+
528+ const written = posix.write(client.fd, response) catch |err| {
529+ std.debug.print("Error writing to fd={d}: {s}\n", .{ client.fd, @errorName(err) });
530+ return err;
531+ };
532+ _ = written;
533+}
534+
535+fn readPtyCallback(
536+ client_opt: ?*Client,
537+ loop: *xev.Loop,
538+ completion: *xev.Completion,
539+ stream: xev.Stream,
540+ read_buffer: xev.ReadBuffer,
541+ read_result: xev.ReadError!usize,
542+) xev.CallbackAction {
543+ _ = loop;
544+ _ = completion;
545+ _ = stream;
546+ const client = client_opt.?;
547+
548+ if (read_result) |bytes_read| {
549+ if (bytes_read == 0) {
550+ std.debug.print("pty closed\n", .{});
551+ return .disarm;
552+ }
553+
554+ const data = read_buffer.slice[0..bytes_read];
555+ std.debug.print("PTY output ({d} bytes)\n", .{bytes_read});
556+
557+ // Build JSON response with properly escaped text
558+ var response_buf = std.ArrayList(u8).initCapacity(client.allocator, 4096) catch return .disarm;
559+ defer response_buf.deinit(client.allocator);
560+
561+ response_buf.appendSlice(client.allocator, "{\"type\":\"pty_out\",\"payload\":{\"text\":\"") catch return .disarm;
562+
563+ // Manually escape JSON special characters
564+ for (data) |byte| {
565+ switch (byte) {
566+ '"' => response_buf.appendSlice(client.allocator, "\\\"") catch return .disarm,
567+ '\\' => response_buf.appendSlice(client.allocator, "\\\\") catch return .disarm,
568+ '\n' => response_buf.appendSlice(client.allocator, "\\n") catch return .disarm,
569+ '\r' => response_buf.appendSlice(client.allocator, "\\r") catch return .disarm,
570+ '\t' => response_buf.appendSlice(client.allocator, "\\t") catch return .disarm,
571+ 0x08 => response_buf.appendSlice(client.allocator, "\\b") catch return .disarm,
572+ 0x0C => response_buf.appendSlice(client.allocator, "\\f") catch return .disarm,
573+ 0x00...0x07, 0x0B, 0x0E...0x1F, 0x7F...0xFF => {
574+ const escaped = std.fmt.allocPrint(client.allocator, "\\u{x:0>4}", .{byte}) catch return .disarm;
575+ defer client.allocator.free(escaped);
576+ response_buf.appendSlice(client.allocator, escaped) catch return .disarm;
577+ },
578+ else => response_buf.append(client.allocator, byte) catch return .disarm,
579+ }
580+ }
581+
582+ response_buf.appendSlice(client.allocator, "\"}}\n") catch return .disarm;
583+
584+ const response = response_buf.items;
585+ std.debug.print("Sending response to client fd={d}\n", .{client.fd});
586+
587+ // Send synchronously for now (blocking write)
588+ const written = posix.write(client.fd, response) catch |err| {
589+ std.debug.print("Error writing to fd={d}: {s}", .{ client.fd, @errorName(err) });
590+ return .disarm;
591+ };
592+ _ = written;
593+
594+ return .rearm;
595+ } else |err| {
596+ std.debug.print("PTY read error: {s}\n", .{@errorName(err)});
597+ return .disarm;
598+ }
599+ unreachable;
600+}
601+
602+fn execShellWithPrompt(allocator: std.mem.Allocator, session_name: []const u8, shell: [*:0]const u8) noreturn {
603+ // Detect shell type and add prompt injection
604+ const shell_name = std.fs.path.basename(std.mem.span(shell));
605+
606+ if (std.mem.eql(u8, shell_name, "fish")) {
607+ // Fish: wrap the existing fish_prompt function
608+ const init_cmd = std.fmt.allocPrint(allocator, "if test -e ~/.config/fish/config.fish; source ~/.config/fish/config.fish; end; " ++
609+ "functions -q fish_prompt; and functions -c fish_prompt _zmx_original_prompt; " ++
610+ "function fish_prompt; echo -n '[{s}] '; _zmx_original_prompt; end\x00", .{session_name}) catch {
611+ std.posix.exit(1);
612+ };
613+ const argv = [_:null]?[*:0]const u8{ shell, "--init-command".ptr, @ptrCast(init_cmd.ptr), null };
614+ const err = std.posix.execveZ(shell, &argv, std.c.environ);
615+ std.debug.print("execve failed: {s}\n", .{@errorName(err)});
616+ std.posix.exit(1);
617+ } else if (std.mem.eql(u8, shell_name, "bash")) {
618+ // Bash: prepend to PS1 via bashrc injection
619+ const bashrc = std.fmt.allocPrint(allocator, "[ -f ~/.bashrc ] && source ~/.bashrc; PS1='[{s}] '$PS1\x00", .{session_name}) catch {
620+ std.posix.exit(1);
621+ };
622+ const argv = [_:null]?[*:0]const u8{ shell, "--rcfile".ptr, "/dev/stdin".ptr, null };
623+ // Note: This approach doesn't work well. Let's use PROMPT_COMMAND instead
624+ const argv2 = [_:null]?[*:0]const u8{ shell, "--init-file".ptr, @ptrCast(bashrc.ptr), null };
625+ _ = argv2;
626+ const err = std.posix.execveZ(shell, &argv, std.c.environ);
627+ std.debug.print("execve failed: {s}\n", .{@errorName(err)});
628+ std.posix.exit(1);
629+ } else if (std.mem.eql(u8, shell_name, "zsh")) {
630+ // Zsh: prepend to PROMPT after loading zshrc
631+ const zdotdir = std.posix.getenv("ZDOTDIR") orelse std.posix.getenv("HOME") orelse "/tmp";
632+ const zshrc = std.fmt.allocPrint(allocator, "[ -f {s}/.zshrc ] && source {s}/.zshrc; PROMPT='[{s}] '$PROMPT\x00", .{ zdotdir, zdotdir, session_name }) catch {
633+ std.posix.exit(1);
634+ };
635+ _ = zshrc;
636+ // For zsh, just set the environment variable and let it prepend
637+ const prompt_var = std.fmt.allocPrint(allocator, "PROMPT=[{s}] ${{PROMPT:-'%# '}}\x00", .{session_name}) catch {
638+ std.posix.exit(1);
639+ };
640+ _ = c.putenv(@ptrCast(prompt_var.ptr));
641+ const argv = [_:null]?[*:0]const u8{ shell, null };
642+ const err = std.posix.execveZ(shell, &argv, std.c.environ);
643+ std.debug.print("execve failed: {s}\n", .{@errorName(err)});
644+ std.posix.exit(1);
645+ } else {
646+ // Default: just run the shell
647+ const argv = [_:null]?[*:0]const u8{ shell, null };
648+ const err = std.posix.execveZ(shell, &argv, std.c.environ);
649+ std.debug.print("execve failed: {s}\n", .{@errorName(err)});
650+ std.posix.exit(1);
651+ }
652+}
653+
654+fn createSession(allocator: std.mem.Allocator, session_name: []const u8) !*Session {
655+ var master_fd: c_int = undefined;
656+
657+ // Fork and create PTY
658+ const pid = c.forkpty(&master_fd, null, null, null);
659+ if (pid < 0) {
660+ return error.ForkPtyFailed;
661+ }
662+
663+ if (pid == 0) {
664+ // Child process - set environment and execute shell with prompt
665+ const zmx_session_var = std.fmt.allocPrint(allocator, "ZMX_SESSION={s}\x00", .{session_name}) catch {
666+ std.posix.exit(1);
667+ };
668+ _ = c.putenv(@ptrCast(zmx_session_var.ptr));
669+
670+ const shell = std.posix.getenv("SHELL") orelse "/bin/sh";
671+ execShellWithPrompt(allocator, session_name, shell);
672+ }
673+
674+ // Parent process - setup session
675+ std.debug.print("✓ Created PTY session: name={s}, master_fd={d}, child_pid={d}\n", .{
676+ session_name,
677+ master_fd,
678+ pid,
679+ });
680+
681+ // Make PTY master fd non-blocking for async I/O
682+ const flags = try posix.fcntl(master_fd, posix.F.GETFL, 0);
683+ _ = try posix.fcntl(master_fd, posix.F.SETFL, flags | @as(u32, 0o4000));
684+
685+ const session = try allocator.create(Session);
686+ session.* = .{
687+ .name = try allocator.dupe(u8, session_name),
688+ .pty_master_fd = @intCast(master_fd),
689+ .buffer = try std.ArrayList(u8).initCapacity(allocator, 0),
690+ .child_pid = pid,
691+ .allocator = allocator,
692+ .pty_read_buffer = undefined,
693+ .created_at = std.time.timestamp(),
694+ };
695+
696+ return session;
697+}
698+
699+fn closeClient(client: *Client, completion: *xev.Completion) xev.CallbackAction {
700+ std.debug.print("Closing client fd={d}\n", .{client.fd});
701+
702+ // Remove client from the clients map
703+ _ = client.server_ctx.clients.remove(client.fd);
704+
705+ // Initiate async close of the client stream
706+ const close_completion = client.allocator.create(xev.Completion) catch {
707+ // If we can't allocate, just clean up synchronously
708+ posix.close(client.fd);
709+ client.allocator.destroy(completion);
710+ client.allocator.destroy(client);
711+ return .disarm;
712+ };
713+
714+ client.stream.close(client.server_ctx.loop, close_completion, Client, client, closeCallback);
715+ client.allocator.destroy(completion);
716+ return .disarm;
717+}
718+
719+fn closeCallback(
720+ client_opt: ?*Client,
721+ _: *xev.Loop,
722+ completion: *xev.Completion,
723+ _: xev.Stream,
724+ close_result: xev.CloseError!void,
725+) xev.CallbackAction {
726+ const client = client_opt.?;
727+ if (close_result) |_| {} else |err| {
728+ std.debug.print("close failed: {s}\n", .{@errorName(err)});
729+ }
730+ std.debug.print("client disconnected fd={d}\n", .{client.fd});
731+ client.allocator.destroy(completion);
732+ client.allocator.destroy(client);
733+ return .disarm;
734+}
+85,
-0
1@@ -0,0 +1,85 @@
2+const std = @import("std");
3+const posix = std.posix;
4+
5+test "daemon attach creates pty session" {
6+ const allocator = std.testing.allocator;
7+
8+ // Start the daemon process with SHELL=/bin/bash
9+ const daemon_args = [_][]const u8{ "zig-out/bin/zmx", "daemon" };
10+ var daemon_process = std.process.Child.init(&daemon_args, allocator);
11+ daemon_process.stdin_behavior = .Ignore;
12+ daemon_process.stdout_behavior = .Pipe;
13+ daemon_process.stderr_behavior = .Pipe;
14+
15+ // Set SHELL environment variable
16+ var env_map = try std.process.getEnvMap(allocator);
17+ defer env_map.deinit();
18+ try env_map.put("SHELL", "/bin/bash");
19+ daemon_process.env_map = &env_map;
20+
21+ try daemon_process.spawn();
22+ defer {
23+ _ = daemon_process.kill() catch {};
24+ }
25+
26+ // Give daemon time to start
27+ std.Thread.sleep(500 * std.time.ns_per_ms);
28+
29+ // Run zmx attach command
30+ const attach_args = [_][]const u8{ "zig-out/bin/zmx", "attach" };
31+ var attach_process = std.process.Child.init(&attach_args, allocator);
32+ attach_process.stdin_behavior = .Ignore;
33+ attach_process.stdout_behavior = .Pipe;
34+ attach_process.stderr_behavior = .Pipe;
35+
36+ const result = try attach_process.spawnAndWait();
37+
38+ // Check that attach command succeeded
39+ try std.testing.expectEqual(std.process.Child.Term{ .Exited = 0 }, result);
40+
41+ // Give time for daemon to process and create PTY
42+ std.Thread.sleep(100 * std.time.ns_per_ms);
43+
44+ // Verify PTY was created by reading daemon output
45+ const stdout = try daemon_process.stdout.?.readToEndAlloc(allocator, 1024 * 1024);
46+ defer allocator.free(stdout);
47+
48+ // Parse the child PID from daemon output
49+ const child_pid_prefix = "child_pid=";
50+ const pid_start = std.mem.indexOf(u8, stdout, child_pid_prefix) orelse return error.NoPidInOutput;
51+ const pid_str_start = pid_start + child_pid_prefix.len;
52+ const pid_str_end = std.mem.indexOfAnyPos(u8, stdout, pid_str_start, "\n ") orelse stdout.len;
53+ const pid_str = stdout[pid_str_start..pid_str_end];
54+ const child_pid = try std.fmt.parseInt(i32, pid_str, 10);
55+
56+ std.debug.print("Extracted child PID: {d}\n", .{child_pid});
57+
58+ // Verify the process exists in /proc
59+ const proc_path = try std.fmt.allocPrint(allocator, "/proc/{d}", .{child_pid});
60+ defer allocator.free(proc_path);
61+
62+ const proc_dir = std.fs.openDirAbsolute(proc_path, .{}) catch |err| {
63+ std.debug.print("Process {d} does not exist in /proc: {s}\n", .{ child_pid, @errorName(err) });
64+ return err;
65+ };
66+ proc_dir.close();
67+
68+ // Verify it's a shell process by reading /proc/<pid>/comm
69+ const comm_path = try std.fmt.allocPrint(allocator, "/proc/{d}/comm", .{child_pid});
70+ defer allocator.free(comm_path);
71+
72+ const comm = std.fs.cwd().readFileAlloc(allocator, comm_path, 1024) catch |err| {
73+ std.debug.print("Could not read process name: {s}\n", .{@errorName(err)});
74+ return err;
75+ };
76+ defer allocator.free(comm);
77+
78+ const process_name = std.mem.trim(u8, comm, "\n ");
79+ std.debug.print("Child process name: {s}\n", .{process_name});
80+
81+ // Verify it's bash (as we set SHELL=/bin/bash)
82+ try std.testing.expectEqualStrings("bash", process_name);
83+
84+ std.debug.print("✓ PTY session created successfully with bash process (PID {d})\n", .{child_pid});
85+ std.debug.print("Daemon output:\n{s}\n", .{stdout});
86+}
+110,
-0
1@@ -0,0 +1,110 @@
2+const std = @import("std");
3+const posix = std.posix;
4+
5+const socket_path = "/tmp/zmx.sock";
6+
7+pub fn main() !void {
8+ var gpa = std.heap.GeneralPurposeAllocator(.{}){};
9+ defer _ = gpa.deinit();
10+ const allocator = gpa.allocator();
11+
12+ // Find the client_fd file in home directory
13+ const home_dir = posix.getenv("HOME") orelse "/tmp";
14+
15+ var session_name: ?[]const u8 = null;
16+ var client_fd: ?i64 = null;
17+
18+ // Look for .zmx_client_fd_* files
19+ var dir = std.fs.cwd().openDir(home_dir, .{ .iterate = true }) catch {
20+ std.debug.print("Error: Cannot access home directory\n", .{});
21+ std.process.exit(1);
22+ };
23+ defer dir.close();
24+
25+ var iter = dir.iterate();
26+ while (iter.next() catch null) |entry| {
27+ if (entry.kind != .file) continue;
28+ if (!std.mem.startsWith(u8, entry.name, ".zmx_client_fd_")) continue;
29+
30+ // Extract session name from filename
31+ const name_start = ".zmx_client_fd_".len;
32+ session_name = try allocator.dupe(u8, entry.name[name_start..]);
33+
34+ // Read the client_fd from the file
35+ const full_path = try std.fs.path.join(allocator, &[_][]const u8{ home_dir, entry.name });
36+ defer allocator.free(full_path);
37+
38+ if (std.fs.cwd().openFile(full_path, .{})) |file| {
39+ defer file.close();
40+ var buf: [32]u8 = undefined;
41+ const bytes_read = file.readAll(&buf) catch 0;
42+ if (bytes_read > 0) {
43+ const fd_str = std.mem.trim(u8, buf[0..bytes_read], &std.ascii.whitespace);
44+ client_fd = std.fmt.parseInt(i64, fd_str, 10) catch null;
45+ }
46+ } else |_| {}
47+
48+ break; // Found one, use it
49+ }
50+
51+ if (session_name == null) {
52+ std.debug.print("Error: Not currently attached to any session\n", .{});
53+ std.debug.print("Use Ctrl-b d to detach from within an attached session\n", .{});
54+ std.process.exit(1);
55+ }
56+ defer if (session_name) |name| allocator.free(name);
57+
58+ const unix_addr = try std.net.Address.initUnix(socket_path);
59+ const socket_fd = try posix.socket(posix.AF.UNIX, posix.SOCK.STREAM, 0);
60+ defer posix.close(socket_fd);
61+
62+ try posix.connect(socket_fd, &unix_addr.any, unix_addr.getOsSockLen());
63+
64+ const request = if (client_fd) |fd|
65+ try std.fmt.allocPrint(
66+ allocator,
67+ "{{\"type\":\"detach_session_request\",\"payload\":{{\"session_name\":\"{s}\",\"client_fd\":{d}}}}}\n",
68+ .{ session_name.?, fd },
69+ )
70+ else
71+ try std.fmt.allocPrint(
72+ allocator,
73+ "{{\"type\":\"detach_session_request\",\"payload\":{{\"session_name\":\"{s}\"}}}}\n",
74+ .{session_name.?},
75+ );
76+ defer allocator.free(request);
77+
78+ _ = try posix.write(socket_fd, request);
79+
80+ var buffer: [4096]u8 = undefined;
81+ const bytes_read = try posix.read(socket_fd, &buffer);
82+
83+ if (bytes_read == 0) {
84+ std.debug.print("No response from daemon\n", .{});
85+ return;
86+ }
87+
88+ const response = buffer[0..bytes_read];
89+ const newline_idx = std.mem.indexOf(u8, response, "\n") orelse bytes_read;
90+ const msg_line = response[0..newline_idx];
91+
92+ const parsed = try std.json.parseFromSlice(
93+ std.json.Value,
94+ allocator,
95+ msg_line,
96+ .{},
97+ );
98+ defer parsed.deinit();
99+
100+ const root = parsed.value.object;
101+ const payload = root.get("payload").?.object;
102+ const status = payload.get("status").?.string;
103+
104+ if (std.mem.eql(u8, status, "ok")) {
105+ std.debug.print("Detached from session: {s}\n", .{session_name.?});
106+ } else {
107+ const error_msg = payload.get("error_message").?.string;
108+ std.debug.print("Failed to detach: {s}\n", .{error_msg});
109+ std.process.exit(1);
110+ }
111+}
+67,
-0
1@@ -0,0 +1,67 @@
2+const std = @import("std");
3+const posix = std.posix;
4+
5+const socket_path = "/tmp/zmx.sock";
6+
7+pub fn main() !void {
8+ var gpa = std.heap.GeneralPurposeAllocator(.{}){};
9+ defer _ = gpa.deinit();
10+ const allocator = gpa.allocator();
11+
12+ const args = try std.process.argsAlloc(allocator);
13+ defer std.process.argsFree(allocator, args);
14+
15+ if (args.len < 3) {
16+ std.debug.print("Usage: zmx kill <session-name>\n", .{});
17+ std.process.exit(1);
18+ }
19+
20+ const session_name = args[2];
21+
22+ const unix_addr = try std.net.Address.initUnix(socket_path);
23+ const socket_fd = try posix.socket(posix.AF.UNIX, posix.SOCK.STREAM, 0);
24+ defer posix.close(socket_fd);
25+
26+ try posix.connect(socket_fd, &unix_addr.any, unix_addr.getOsSockLen());
27+
28+ const request = try std.fmt.allocPrint(
29+ allocator,
30+ "{{\"type\":\"kill_session_request\",\"payload\":{{\"session_name\":\"{s}\"}}}}\n",
31+ .{session_name},
32+ );
33+ defer allocator.free(request);
34+
35+ _ = try posix.write(socket_fd, request);
36+
37+ var buffer: [4096]u8 = undefined;
38+ const bytes_read = try posix.read(socket_fd, &buffer);
39+
40+ if (bytes_read == 0) {
41+ std.debug.print("No response from daemon\n", .{});
42+ return;
43+ }
44+
45+ const response = buffer[0..bytes_read];
46+ const newline_idx = std.mem.indexOf(u8, response, "\n") orelse bytes_read;
47+ const msg_line = response[0..newline_idx];
48+
49+ const parsed = try std.json.parseFromSlice(
50+ std.json.Value,
51+ allocator,
52+ msg_line,
53+ .{},
54+ );
55+ defer parsed.deinit();
56+
57+ const root = parsed.value.object;
58+ const payload = root.get("payload").?.object;
59+ const status = payload.get("status").?.string;
60+
61+ if (std.mem.eql(u8, status, "ok")) {
62+ std.debug.print("Killed session: {s}\n", .{session_name});
63+ } else {
64+ const error_msg = payload.get("error_message").?.string;
65+ std.debug.print("Failed to kill session: {s}\n", .{error_msg});
66+ std.process.exit(1);
67+ }
68+}
+70,
-0
1@@ -0,0 +1,70 @@
2+const std = @import("std");
3+const posix = std.posix;
4+
5+const socket_path = "/tmp/zmx.sock";
6+
7+pub fn main() !void {
8+ var gpa = std.heap.GeneralPurposeAllocator(.{}){};
9+ defer _ = gpa.deinit();
10+ const allocator = gpa.allocator();
11+
12+ const unix_addr = try std.net.Address.initUnix(socket_path);
13+ const socket_fd = try posix.socket(posix.AF.UNIX, posix.SOCK.STREAM, 0);
14+ defer posix.close(socket_fd);
15+
16+ try posix.connect(socket_fd, &unix_addr.any, unix_addr.getOsSockLen());
17+
18+ const request = "{\"type\":\"list_sessions_request\",\"payload\":{}}\n";
19+ _ = try posix.write(socket_fd, request);
20+
21+ var buffer: [8192]u8 = undefined;
22+ const bytes_read = try posix.read(socket_fd, &buffer);
23+
24+ if (bytes_read == 0) {
25+ std.debug.print("No response from daemon\n", .{});
26+ return;
27+ }
28+
29+ const response = buffer[0..bytes_read];
30+ const newline_idx = std.mem.indexOf(u8, response, "\n") orelse bytes_read;
31+ const msg_line = response[0..newline_idx];
32+
33+ const parsed = try std.json.parseFromSlice(
34+ std.json.Value,
35+ allocator,
36+ msg_line,
37+ .{},
38+ );
39+ defer parsed.deinit();
40+
41+ const root = parsed.value.object;
42+ const payload = root.get("payload").?.object;
43+ const status = payload.get("status").?.string;
44+
45+ if (!std.mem.eql(u8, status, "ok")) {
46+ const error_msg = payload.get("error_message").?.string;
47+ std.debug.print("Error: {s}\n", .{error_msg});
48+ return;
49+ }
50+
51+ const sessions = payload.get("sessions").?.array;
52+
53+ if (sessions.items.len == 0) {
54+ std.debug.print("No active sessions\n", .{});
55+ return;
56+ }
57+
58+ std.debug.print("Active sessions:\n", .{});
59+ std.debug.print("{s:<20} {s:<12} {s:<8} {s}\n", .{ "NAME", "STATUS", "CLIENTS", "CREATED" });
60+ std.debug.print("{s}\n", .{"-" ** 60});
61+
62+ for (sessions.items) |session_value| {
63+ const session = session_value.object;
64+ const name = session.get("name").?.string;
65+ const session_status = session.get("status").?.string;
66+ const clients = session.get("clients").?.integer;
67+ const created_at = session.get("created_at").?.string;
68+
69+ std.debug.print("{s:<20} {s:<12} {d:<8} {s}\n", .{ name, session_status, clients, created_at });
70+ }
71+}
+34,
-0
1@@ -0,0 +1,34 @@
2+const std = @import("std");
3+const cli = @import("cli.zig");
4+const daemon = @import("daemon.zig");
5+const attach = @import("attach.zig");
6+const detach = @import("detach.zig");
7+const kill = @import("kill.zig");
8+const list = @import("list.zig");
9+const clap = @import("clap");
10+
11+pub fn main() !void {
12+ var gpa = std.heap.GeneralPurposeAllocator(.{}){};
13+ defer _ = gpa.deinit();
14+ const allocator = gpa.allocator();
15+
16+ var iter = try std.process.ArgIterator.initWithAllocator(allocator);
17+ defer iter.deinit();
18+
19+ var res = try cli.parse(allocator, &iter);
20+ defer res.deinit();
21+
22+ const command = res.positionals[0] orelse return cli.help();
23+ switch (command) {
24+ .help => try cli.help(),
25+ .daemon => try daemon.main(),
26+ .list => try list.main(),
27+ .attach => try attach.main(),
28+ .detach => try detach.main(),
29+ .kill => try kill.main(),
30+ }
31+}
32+
33+test "simple test" {
34+ try std.testing.expectEqual(42, 42);
35+}