Inline All The Things (#8946)

I used the new CPU counter mode in Instruments.app to track down
functions that had instruction delivery bottlenecks (indicating i-cache
misses) and picked a bunch of trivial functions to mark as inline (plus
a couple that are only used once or twice and which benefit from
inlining).

The size of `macos-arm64/libghostty-fat.a` built with `zig build
-Doptimize=ReleaseFast -Dxcframework-target=native` goes from
`145,538,856` bytes on `main` to `145,595,952` on this branch, a
negligible increase.

These changes resulted in some pretty sizable improvements in vtebench
results on my machine (Apple M3 Max):
<img width="983" height="696" alt="image"
src="https://github.com/user-attachments/assets/cac595ca-7616-48ed-983c-208c2ca2023f"
/>

With this, the only vtebench test we're slower than Alacritty in (on my
machine, at 130x51 window size) is `dense_cells` (which, IMO, is so
artificial that optimizing for it might actually negatively impact real
world performance).

I also did a pretty simple improvement to how we copy the screen in the
renderer, gave it its own page pool for less memory churn. Further
optimization in that area should be explored since in some scenarios it
seems like as much as 35% of the time on the `io-reader` thread is spent
waiting for the lock.

> [!NOTE]
> Before this is merged, someone really ought to test this on an x86
processor to see how the performance compares there, since this *is*
tuning for my processor specifically, and I know that M chips have
pretty big i-cache compared to some x86 processors which could impact
the performance characteristics of these changes.
pull/8974/head
Mitchell Hashimoto 2025-09-30 08:13:39 -07:00 committed by GitHub
commit 150fb18ca1
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
10 changed files with 176 additions and 151 deletions

View File

@ -23,7 +23,7 @@ pub fn DoublyLinkedList(comptime T: type) type {
/// Arguments:
/// node: Pointer to a node in the list.
/// new_node: Pointer to the new node to insert.
pub fn insertAfter(list: *Self, node: *Node, new_node: *Node) void {
pub inline fn insertAfter(list: *Self, node: *Node, new_node: *Node) void {
new_node.prev = node;
if (node.next) |next_node| {
// Intermediate node.
@ -42,7 +42,7 @@ pub fn DoublyLinkedList(comptime T: type) type {
/// Arguments:
/// node: Pointer to a node in the list.
/// new_node: Pointer to the new node to insert.
pub fn insertBefore(list: *Self, node: *Node, new_node: *Node) void {
pub inline fn insertBefore(list: *Self, node: *Node, new_node: *Node) void {
new_node.next = node;
if (node.prev) |prev_node| {
// Intermediate node.
@ -60,7 +60,7 @@ pub fn DoublyLinkedList(comptime T: type) type {
///
/// Arguments:
/// new_node: Pointer to the new node to insert.
pub fn append(list: *Self, new_node: *Node) void {
pub inline fn append(list: *Self, new_node: *Node) void {
if (list.last) |last| {
// Insert after last.
list.insertAfter(last, new_node);
@ -74,7 +74,7 @@ pub fn DoublyLinkedList(comptime T: type) type {
///
/// Arguments:
/// new_node: Pointer to the new node to insert.
pub fn prepend(list: *Self, new_node: *Node) void {
pub inline fn prepend(list: *Self, new_node: *Node) void {
if (list.first) |first| {
// Insert before first.
list.insertBefore(first, new_node);
@ -91,7 +91,7 @@ pub fn DoublyLinkedList(comptime T: type) type {
///
/// Arguments:
/// node: Pointer to the node to be removed.
pub fn remove(list: *Self, node: *Node) void {
pub inline fn remove(list: *Self, node: *Node) void {
if (node.prev) |prev_node| {
// Intermediate node.
prev_node.next = node.next;
@ -113,7 +113,7 @@ pub fn DoublyLinkedList(comptime T: type) type {
///
/// Returns:
/// A pointer to the last node in the list.
pub fn pop(list: *Self) ?*Node {
pub inline fn pop(list: *Self) ?*Node {
const last = list.last orelse return null;
list.remove(last);
return last;
@ -123,7 +123,7 @@ pub fn DoublyLinkedList(comptime T: type) type {
///
/// Returns:
/// A pointer to the first node in the list.
pub fn popFirst(list: *Self) ?*Node {
pub inline fn popFirst(list: *Self) ?*Node {
const first = list.first orelse return null;
list.remove(first);
return first;

View File

@ -95,6 +95,9 @@ pub fn Renderer(comptime GraphicsAPI: type) type {
/// Allocator that can be used
alloc: std.mem.Allocator,
/// MemoryPool for PageList pages which we use when cloning the screen.
page_pool: terminal.PageList.MemoryPool,
/// This mutex must be held whenever any state used in `drawFrame` is
/// being modified, and also when it's being accessed in `drawFrame`.
draw_mutex: std.Thread.Mutex = .{},
@ -676,8 +679,19 @@ pub fn Renderer(comptime GraphicsAPI: type) type {
};
errdefer if (display_link) |v| v.release();
// We preheat the page pool with 4 pages- this is an arbitrary
// choice based on what seems reasonable for the number of pages
// used by the viewport area.
var page_pool: terminal.PageList.MemoryPool = try .init(
alloc,
std.heap.page_allocator,
4,
);
errdefer page_pool.deinit();
var result: Self = .{
.alloc = alloc,
.page_pool = page_pool,
.config = options.config,
.surface_mailbox = options.surface_mailbox,
.grid_metrics = font_critical.metrics,
@ -760,6 +774,8 @@ pub fn Renderer(comptime GraphicsAPI: type) type {
}
pub fn deinit(self: *Self) void {
self.page_pool.deinit();
self.swap_chain.deinit();
if (DisplayLink != void) {
@ -1092,6 +1108,13 @@ pub fn Renderer(comptime GraphicsAPI: type) type {
full_rebuild: bool,
};
// Empty our page pool, but retain capacity.
self.page_pool.reset(.retain_capacity);
var arena: std.heap.ArenaAllocator = .init(self.alloc);
defer arena.deinit();
const alloc = arena.allocator();
// Update all our data as tightly as possible within the mutex.
var critical: Critical = critical: {
// const start = try std.time.Instant.now();
@ -1148,12 +1171,12 @@ pub fn Renderer(comptime GraphicsAPI: type) type {
// We used to share terminal state, but we've since learned through
// analysis that it is faster to copy the terminal state than to
// hold the lock while rebuilding GPU cells.
var screen_copy = try state.terminal.screen.clone(
self.alloc,
const screen_copy = try state.terminal.screen.clonePool(
alloc,
&self.page_pool,
.{ .viewport = .{} },
null,
);
errdefer screen_copy.deinit();
// Whether to draw our cursor or not.
const cursor_style = if (state.terminal.flags.password_input)
@ -1169,9 +1192,8 @@ pub fn Renderer(comptime GraphicsAPI: type) type {
const preedit: ?renderer.State.Preedit = preedit: {
if (cursor_style == null) break :preedit null;
const p = state.preedit orelse break :preedit null;
break :preedit try p.clone(self.alloc);
break :preedit try p.clone(alloc);
};
errdefer if (preedit) |p| p.deinit(self.alloc);
// If we have Kitty graphics data, we enter a SLOW SLOW SLOW path.
// We only do this if the Kitty image state is dirty meaning only if
@ -1241,10 +1263,6 @@ pub fn Renderer(comptime GraphicsAPI: type) type {
.full_rebuild = full_rebuild,
};
};
defer {
critical.screen.deinit();
if (critical.preedit) |p| p.deinit(self.alloc);
}
// Build our GPU cells
try self.rebuildCells(

View File

@ -1861,7 +1861,7 @@ pub fn maxSize(self: *const PageList) usize {
}
/// Returns true if we need to grow into our active area.
fn growRequiredForActive(self: *const PageList) bool {
inline fn growRequiredForActive(self: *const PageList) bool {
var rows: usize = 0;
var page = self.pages.last;
while (page) |p| : (page = p.prev) {
@ -2047,7 +2047,7 @@ pub fn adjustCapacity(
/// Create a new page node. This does not add it to the list and this
/// does not do any memory size accounting with max_size/page_size.
fn createPage(
inline fn createPage(
self: *PageList,
cap: Capacity,
) Allocator.Error!*List.Node {
@ -2055,7 +2055,7 @@ fn createPage(
return try createPageExt(&self.pool, cap, &self.page_size);
}
fn createPageExt(
inline fn createPageExt(
pool: *MemoryPool,
cap: Capacity,
total_size: ?*usize,
@ -3394,7 +3394,7 @@ pub const Pin = struct {
y: size.CellCountInt = 0,
x: size.CellCountInt = 0,
pub fn rowAndCell(self: Pin) struct {
pub inline fn rowAndCell(self: Pin) struct {
row: *pagepkg.Row,
cell: *pagepkg.Cell,
} {
@ -3407,7 +3407,7 @@ pub const Pin = struct {
/// Returns the cells for the row that this pin is on. The subset determines
/// what subset of the cells are returned. The "left/right" subsets are
/// inclusive of the x coordinate of the pin.
pub fn cells(self: Pin, subset: CellSubset) []pagepkg.Cell {
pub inline fn cells(self: Pin, subset: CellSubset) []pagepkg.Cell {
const rac = self.rowAndCell();
const all = self.node.data.getCells(rac.row);
return switch (subset) {
@ -3419,12 +3419,12 @@ pub const Pin = struct {
/// Returns the grapheme codepoints for the given cell. These are only
/// the EXTRA codepoints and not the first codepoint.
pub fn grapheme(self: Pin, cell: *const pagepkg.Cell) ?[]u21 {
pub inline fn grapheme(self: Pin, cell: *const pagepkg.Cell) ?[]u21 {
return self.node.data.lookupGrapheme(cell);
}
/// Returns the style for the given cell in this pin.
pub fn style(self: Pin, cell: *const pagepkg.Cell) stylepkg.Style {
pub inline fn style(self: Pin, cell: *const pagepkg.Cell) stylepkg.Style {
if (cell.style_id == stylepkg.default_id) return .{};
return self.node.data.styles.get(
self.node.data.memory,
@ -3433,12 +3433,12 @@ pub const Pin = struct {
}
/// Check if this pin is dirty.
pub fn isDirty(self: Pin) bool {
pub inline fn isDirty(self: Pin) bool {
return self.node.data.isRowDirty(self.y);
}
/// Mark this pin location as dirty.
pub fn markDirty(self: Pin) void {
pub inline fn markDirty(self: Pin) void {
var set = self.node.data.dirtyBitSet();
set.set(self.y);
}
@ -3507,7 +3507,7 @@ pub const Pin = struct {
/// pointFromPin and building up the iterator from points.
///
/// The limit pin is inclusive.
pub fn pageIterator(
pub inline fn pageIterator(
self: Pin,
direction: Direction,
limit: ?Pin,
@ -3529,7 +3529,7 @@ pub const Pin = struct {
};
}
pub fn rowIterator(
pub inline fn rowIterator(
self: Pin,
direction: Direction,
limit: ?Pin,
@ -3546,7 +3546,7 @@ pub const Pin = struct {
};
}
pub fn cellIterator(
pub inline fn cellIterator(
self: Pin,
direction: Direction,
limit: ?Pin,
@ -3647,14 +3647,14 @@ pub const Pin = struct {
return false;
}
pub fn eql(self: Pin, other: Pin) bool {
pub inline fn eql(self: Pin, other: Pin) bool {
return self.node == other.node and
self.y == other.y and
self.x == other.x;
}
/// Move the pin left n columns. n must fit within the size.
pub fn left(self: Pin, n: usize) Pin {
pub inline fn left(self: Pin, n: usize) Pin {
assert(n <= self.x);
var result = self;
result.x -= std.math.cast(size.CellCountInt, n) orelse result.x;
@ -3662,7 +3662,7 @@ pub const Pin = struct {
}
/// Move the pin right n columns. n must fit within the size.
pub fn right(self: Pin, n: usize) Pin {
pub inline fn right(self: Pin, n: usize) Pin {
assert(self.x + n < self.node.data.size.cols);
var result = self;
result.x +|= std.math.cast(size.CellCountInt, n) orelse
@ -3671,14 +3671,14 @@ pub const Pin = struct {
}
/// Move the pin left n columns, stopping at the start of the row.
pub fn leftClamp(self: Pin, n: size.CellCountInt) Pin {
pub inline fn leftClamp(self: Pin, n: size.CellCountInt) Pin {
var result = self;
result.x -|= n;
return result;
}
/// Move the pin right n columns, stopping at the end of the row.
pub fn rightClamp(self: Pin, n: size.CellCountInt) Pin {
pub inline fn rightClamp(self: Pin, n: size.CellCountInt) Pin {
var result = self;
result.x = @min(self.x +| n, self.node.data.size.cols - 1);
return result;
@ -3740,7 +3740,7 @@ pub const Pin = struct {
/// Move the pin down a certain number of rows, or return null if
/// the pin goes beyond the end of the screen.
pub fn down(self: Pin, n: usize) ?Pin {
pub inline fn down(self: Pin, n: usize) ?Pin {
return switch (self.downOverflow(n)) {
.offset => |v| v,
.overflow => null,
@ -3749,7 +3749,7 @@ pub const Pin = struct {
/// Move the pin up a certain number of rows, or return null if
/// the pin goes beyond the start of the screen.
pub fn up(self: Pin, n: usize) ?Pin {
pub inline fn up(self: Pin, n: usize) ?Pin {
return switch (self.upOverflow(n)) {
.offset => |v| v,
.overflow => null,

View File

@ -314,7 +314,7 @@ pub fn next(self: *Parser, c: u8) [3]?Action {
};
}
pub fn collect(self: *Parser, c: u8) void {
pub inline fn collect(self: *Parser, c: u8) void {
if (self.intermediates_idx >= MAX_INTERMEDIATE) {
log.warn("invalid intermediates count", .{});
return;
@ -324,7 +324,7 @@ pub fn collect(self: *Parser, c: u8) void {
self.intermediates_idx += 1;
}
fn doAction(self: *Parser, action: TransitionAction, c: u8) ?Action {
inline fn doAction(self: *Parser, action: TransitionAction, c: u8) ?Action {
return switch (action) {
.none, .ignore => null,
.print => Action{ .print = c },
@ -410,7 +410,7 @@ fn doAction(self: *Parser, action: TransitionAction, c: u8) ?Action {
};
}
pub fn clear(self: *Parser) void {
pub inline fn clear(self: *Parser) void {
self.intermediates_idx = 0;
self.params_idx = 0;
self.params_sep = .initEmpty();

View File

@ -533,13 +533,13 @@ pub fn adjustCapacity(
return new_node;
}
pub fn cursorCellRight(self: *Screen, n: size.CellCountInt) *pagepkg.Cell {
pub inline fn cursorCellRight(self: *Screen, n: size.CellCountInt) *pagepkg.Cell {
assert(self.cursor.x + n < self.pages.cols);
const cell: [*]pagepkg.Cell = @ptrCast(self.cursor.page_cell);
return @ptrCast(cell + n);
}
pub fn cursorCellLeft(self: *Screen, n: size.CellCountInt) *pagepkg.Cell {
pub inline fn cursorCellLeft(self: *Screen, n: size.CellCountInt) *pagepkg.Cell {
assert(self.cursor.x >= n);
const cell: [*]pagepkg.Cell = @ptrCast(self.cursor.page_cell);
return @ptrCast(cell - n);
@ -959,7 +959,7 @@ fn cursorScrollAboveRotate(self: *Screen) !void {
/// Move the cursor down if we're not at the bottom of the screen. Otherwise
/// scroll. Currently only used for testing.
fn cursorDownOrScroll(self: *Screen) !void {
inline fn cursorDownOrScroll(self: *Screen) !void {
if (self.cursor.y + 1 < self.pages.rows) {
self.cursorDown(1);
} else {
@ -1034,7 +1034,7 @@ pub fn cursorCopy(self: *Screen, other: Cursor, opts: struct {
/// page than the old AND we have a style or hyperlink set. In that case,
/// we must release our old one and insert the new one, since styles are
/// stored per-page.
fn cursorChangePin(self: *Screen, new: Pin) void {
inline fn cursorChangePin(self: *Screen, new: Pin) void {
// Moving the cursor affects text run splitting (ligatures) so
// we must mark the old and new page dirty. We do this as long
// as the pins are not equal
@ -1108,7 +1108,7 @@ fn cursorChangePin(self: *Screen, new: Pin) void {
/// Mark the cursor position as dirty.
/// TODO: test
pub fn cursorMarkDirty(self: *Screen) void {
pub inline fn cursorMarkDirty(self: *Screen) void {
self.cursor.page_pin.markDirty();
}
@ -1160,7 +1160,7 @@ pub const Scroll = union(enum) {
};
/// Scroll the viewport of the terminal grid.
pub fn scroll(self: *Screen, behavior: Scroll) void {
pub inline fn scroll(self: *Screen, behavior: Scroll) void {
defer self.assertIntegrity();
if (comptime build_options.kitty_graphics) {
@ -1181,7 +1181,7 @@ pub fn scroll(self: *Screen, behavior: Scroll) void {
/// See PageList.scrollClear. In addition to that, we reset the cursor
/// to be on top.
pub fn scrollClear(self: *Screen) !void {
pub inline fn scrollClear(self: *Screen) !void {
defer self.assertIntegrity();
try self.pages.scrollClear();
@ -1196,14 +1196,14 @@ pub fn scrollClear(self: *Screen) !void {
}
/// Returns true if the viewport is scrolled to the bottom of the screen.
pub fn viewportIsBottom(self: Screen) bool {
pub inline fn viewportIsBottom(self: Screen) bool {
return self.pages.viewport == .active;
}
/// Erase the region specified by tl and br, inclusive. This will physically
/// erase the rows meaning the memory will be reclaimed (if the underlying
/// page is empty) and other rows will be shifted up.
pub fn eraseRows(
pub inline fn eraseRows(
self: *Screen,
tl: point.Point,
bl: ?point.Point,
@ -1539,7 +1539,7 @@ pub fn splitCellBoundary(
/// Returns the blank cell to use when doing terminal operations that
/// require preserving the bg color.
pub fn blankCell(self: *const Screen) Cell {
pub inline fn blankCell(self: *const Screen) Cell {
if (self.cursor.style_id == style.default_id) return .{};
return self.cursor.style.bgCell() orelse .{};
}
@ -1557,7 +1557,7 @@ pub fn blankCell(self: *const Screen) Cell {
/// probably means the system is in trouble anyways. I'd like to improve this
/// in the future but it is not a priority particularly because this scenario
/// (resize) is difficult.
pub fn resize(
pub inline fn resize(
self: *Screen,
cols: size.CellCountInt,
rows: size.CellCountInt,
@ -1568,7 +1568,7 @@ pub fn resize(
/// Resize the screen without any reflow. In this mode, columns/rows will
/// be truncated as they are shrunk. If they are grown, the new space is filled
/// with zeros.
pub fn resizeWithoutReflow(
pub inline fn resizeWithoutReflow(
self: *Screen,
cols: size.CellCountInt,
rows: size.CellCountInt,

View File

@ -191,7 +191,7 @@ pub const Page = struct {
/// The backing memory is always allocated using mmap directly.
/// You cannot use custom allocators with this structure because
/// it is critical to performance that we use mmap.
pub fn init(cap: Capacity) !Page {
pub inline fn init(cap: Capacity) !Page {
const l = layout(cap);
// We use mmap directly to avoid Zig allocator overhead
@ -215,7 +215,7 @@ pub const Page = struct {
/// Initialize a new page using the given backing memory.
/// It is up to the caller to not call deinit on these pages.
pub fn initBuf(buf: OffsetBuf, l: Layout) Page {
pub inline fn initBuf(buf: OffsetBuf, l: Layout) Page {
const cap = l.capacity;
const rows = buf.member(Row, l.rows_start);
const cells = buf.member(Cell, l.cells_start);
@ -270,13 +270,13 @@ pub const Page = struct {
/// Deinitialize the page, freeing any backing memory. Do NOT call
/// this if you allocated the backing memory yourself (i.e. you used
/// initBuf).
pub fn deinit(self: *Page) void {
pub inline fn deinit(self: *Page) void {
posix.munmap(self.memory);
self.* = undefined;
}
/// Reinitialize the page with the same capacity.
pub fn reinit(self: *Page) void {
pub inline fn reinit(self: *Page) void {
// We zero the page memory as u64 instead of u8 because
// we can and it's empirically quite a bit faster.
@memset(@as([*]u64, @ptrCast(self.memory))[0 .. self.memory.len / 8], 0);
@ -306,7 +306,7 @@ pub const Page = struct {
/// Temporarily pause integrity checks. This is useful when you are
/// doing a lot of operations that would trigger integrity check
/// violations but you know the page will end up in a consistent state.
pub fn pauseIntegrityChecks(self: *Page, v: bool) void {
pub inline fn pauseIntegrityChecks(self: *Page, v: bool) void {
if (build_options.slow_runtime_safety) {
if (v) {
self.pause_integrity_checks += 1;
@ -319,7 +319,7 @@ pub const Page = struct {
/// A helper that can be used to assert the integrity of the page
/// when runtime safety is enabled. This is a no-op when runtime
/// safety is disabled. This uses the libc allocator.
pub fn assertIntegrity(self: *const Page) void {
pub inline fn assertIntegrity(self: *const Page) void {
if (comptime build_options.slow_runtime_safety) {
var debug_allocator: std.heap.DebugAllocator(.{}) = .init;
defer _ = debug_allocator.deinit();
@ -603,7 +603,7 @@ pub const Page = struct {
/// Clone the contents of this page. This will allocate new memory
/// using the page allocator. If you want to manage memory manually,
/// use cloneBuf.
pub fn clone(self: *const Page) !Page {
pub inline fn clone(self: *const Page) !Page {
const backing = try posix.mmap(
null,
self.memory.len,
@ -619,7 +619,7 @@ pub const Page = struct {
/// Clone the entire contents of this page.
///
/// The buffer must be at least the size of self.memory.
pub fn cloneBuf(self: *const Page, buf: []align(std.heap.page_size_min) u8) Page {
pub inline fn cloneBuf(self: *const Page, buf: []align(std.heap.page_size_min) u8) Page {
assert(buf.len >= self.memory.len);
// The entire concept behind a page is that everything is stored
@ -671,7 +671,7 @@ pub const Page = struct {
/// If the other page has more columns, the extra columns will be
/// truncated. If the other page has fewer columns, the extra columns
/// will be zeroed.
pub fn cloneFrom(
pub inline fn cloneFrom(
self: *Page,
other: *const Page,
y_start: usize,
@ -695,7 +695,7 @@ pub const Page = struct {
}
/// Clone a single row from another page into this page.
pub fn cloneRowFrom(
pub inline fn cloneRowFrom(
self: *Page,
other: *const Page,
dst_row: *Row,
@ -912,13 +912,13 @@ pub const Page = struct {
}
/// Get a single row. y must be valid.
pub fn getRow(self: *const Page, y: usize) *Row {
pub inline fn getRow(self: *const Page, y: usize) *Row {
assert(y < self.size.rows);
return &self.rows.ptr(self.memory)[y];
}
/// Get the cells for a row.
pub fn getCells(self: *const Page, row: *Row) []Cell {
pub inline fn getCells(self: *const Page, row: *Row) []Cell {
if (build_options.slow_runtime_safety) {
const rows = self.rows.ptr(self.memory);
const cells = self.cells.ptr(self.memory);
@ -931,7 +931,7 @@ pub const Page = struct {
}
/// Get the row and cell for the given X/Y within this page.
pub fn getRowAndCell(self: *const Page, x: usize, y: usize) struct {
pub inline fn getRowAndCell(self: *const Page, x: usize, y: usize) struct {
row: *Row,
cell: *Cell,
} {
@ -1016,7 +1016,7 @@ pub const Page = struct {
}
/// Swap two cells within the same row as quickly as possible.
pub fn swapCells(
pub inline fn swapCells(
self: *Page,
src: *Cell,
dst: *Cell,
@ -1077,7 +1077,7 @@ pub const Page = struct {
/// active, Page cannot know this and it will still be ref counted down.
/// The best solution for this is to artificially increment the ref count
/// prior to calling this function.
pub fn clearCells(
pub inline fn clearCells(
self: *Page,
row: *Row,
left: usize,
@ -1127,14 +1127,14 @@ pub const Page = struct {
}
/// Returns the hyperlink ID for the given cell.
pub fn lookupHyperlink(self: *const Page, cell: *const Cell) ?hyperlink.Id {
pub inline fn lookupHyperlink(self: *const Page, cell: *const Cell) ?hyperlink.Id {
const cell_offset = getOffset(Cell, self.memory, cell);
const map = self.hyperlink_map.map(self.memory);
return map.get(cell_offset);
}
/// Clear the hyperlink from the given cell.
pub fn clearHyperlink(self: *Page, row: *Row, cell: *Cell) void {
pub inline fn clearHyperlink(self: *Page, row: *Row, cell: *Cell) void {
defer self.assertIntegrity();
// Get our ID
@ -1258,7 +1258,7 @@ pub const Page = struct {
/// Caller is responsible for updating the refcount in the hyperlink
/// set as necessary by calling `use` if the id was not acquired with
/// `add`.
pub fn setHyperlink(self: *Page, row: *Row, cell: *Cell, id: hyperlink.Id) error{HyperlinkMapOutOfMemory}!void {
pub inline fn setHyperlink(self: *Page, row: *Row, cell: *Cell, id: hyperlink.Id) error{HyperlinkMapOutOfMemory}!void {
defer self.assertIntegrity();
const cell_offset = getOffset(Cell, self.memory, cell);
@ -1300,7 +1300,7 @@ pub const Page = struct {
/// Move the hyperlink from one cell to another. This can't fail
/// because we avoid any allocations since we're just moving data.
/// Destination must NOT have a hyperlink.
fn moveHyperlink(self: *Page, src: *Cell, dst: *Cell) void {
inline fn moveHyperlink(self: *Page, src: *Cell, dst: *Cell) void {
assert(src.hyperlink);
assert(!dst.hyperlink);
@ -1320,19 +1320,19 @@ pub const Page = struct {
/// Returns the number of hyperlinks in the page. This isn't the byte
/// size but the total number of unique cells that have hyperlink data.
pub fn hyperlinkCount(self: *const Page) usize {
pub inline fn hyperlinkCount(self: *const Page) usize {
return self.hyperlink_map.map(self.memory).count();
}
/// Returns the hyperlink capacity for the page. This isn't the byte
/// size but the number of unique cells that can have hyperlink data.
pub fn hyperlinkCapacity(self: *const Page) usize {
pub inline fn hyperlinkCapacity(self: *const Page) usize {
return self.hyperlink_map.map(self.memory).capacity();
}
/// Set the graphemes for the given cell. This asserts that the cell
/// has no graphemes set, and only contains a single codepoint.
pub fn setGraphemes(
pub inline fn setGraphemes(
self: *Page,
row: *Row,
cell: *Cell,
@ -1433,7 +1433,7 @@ pub const Page = struct {
/// Returns the codepoints for the given cell. These are the codepoints
/// in addition to the first codepoint. The first codepoint is NOT
/// included since it is on the cell itself.
pub fn lookupGrapheme(self: *const Page, cell: *const Cell) ?[]u21 {
pub inline fn lookupGrapheme(self: *const Page, cell: *const Cell) ?[]u21 {
const cell_offset = getOffset(Cell, self.memory, cell);
const map = self.grapheme_map.map(self.memory);
const slice = map.get(cell_offset) orelse return null;
@ -1446,7 +1446,7 @@ pub const Page = struct {
/// WARNING: This will NOT change the content_tag on the cells because
/// there are scenarios where we want to move graphemes without changing
/// the content tag. Callers beware but assertIntegrity should catch this.
fn moveGrapheme(self: *Page, src: *Cell, dst: *Cell) void {
inline fn moveGrapheme(self: *Page, src: *Cell, dst: *Cell) void {
if (build_options.slow_runtime_safety) {
assert(src.hasGrapheme());
assert(!dst.hasGrapheme());
@ -1462,7 +1462,7 @@ pub const Page = struct {
}
/// Clear the graphemes for a given cell.
pub fn clearGrapheme(self: *Page, row: *Row, cell: *Cell) void {
pub inline fn clearGrapheme(self: *Page, row: *Row, cell: *Cell) void {
defer self.assertIntegrity();
if (build_options.slow_runtime_safety) assert(cell.hasGrapheme());
@ -1488,13 +1488,13 @@ pub const Page = struct {
/// Returns the number of graphemes in the page. This isn't the byte
/// size but the total number of unique cells that have grapheme data.
pub fn graphemeCount(self: *const Page) usize {
pub inline fn graphemeCount(self: *const Page) usize {
return self.grapheme_map.map(self.memory).count();
}
/// Returns the grapheme capacity for the page. This isn't the byte
/// size but the number of unique cells that can have grapheme data.
pub fn graphemeCapacity(self: *const Page) usize {
pub inline fn graphemeCapacity(self: *const Page) usize {
return self.grapheme_map.map(self.memory).capacity();
}
@ -1676,7 +1676,7 @@ pub const Page = struct {
/// The returned value is a DynamicBitSetUnmanaged but it is NOT
/// actually dynamic; do NOT call resize on this. It is safe to
/// read and write but do not resize it.
pub fn dirtyBitSet(self: *const Page) std.DynamicBitSetUnmanaged {
pub inline fn dirtyBitSet(self: *const Page) std.DynamicBitSetUnmanaged {
return .{
.bit_length = self.capacity.rows,
.masks = self.dirty.ptr(self.memory),
@ -1686,14 +1686,14 @@ pub const Page = struct {
/// Returns true if the given row is dirty. This is NOT very
/// efficient if you're checking many rows and you should use
/// dirtyBitSet directly instead.
pub fn isRowDirty(self: *const Page, y: usize) bool {
pub inline fn isRowDirty(self: *const Page, y: usize) bool {
return self.dirtyBitSet().isSet(y);
}
/// Returns true if this page is dirty at all. If you plan on
/// checking any additional rows, you should use dirtyBitSet and
/// check this on your own so you have the set available.
pub fn isDirty(self: *const Page) bool {
pub inline fn isDirty(self: *const Page) bool {
return self.dirtyBitSet().findFirstSet() != null;
}
@ -1722,7 +1722,7 @@ pub const Page = struct {
/// The memory layout for a page given a desired minimum cols
/// and rows size.
pub fn layout(cap: Capacity) Layout {
pub inline fn layout(cap: Capacity) Layout {
const rows_count: usize = @intCast(cap.rows);
const rows_start = 0;
const rows_end: usize = rows_start + (rows_count * @sizeOf(Row));

View File

@ -56,7 +56,7 @@ pub const Point = union(Tag) {
screen: Coordinate,
history: Coordinate,
pub fn coord(self: Point) Coordinate {
pub inline fn coord(self: Point) Coordinate {
return switch (self) {
.active,
.viewport,

View File

@ -31,7 +31,7 @@ pub fn Offset(comptime T: type) type {
};
/// Returns a pointer to the start of the data, properly typed.
pub fn ptr(self: Self, base: anytype) [*]T {
pub inline fn ptr(self: Self, base: anytype) [*]T {
// The offset must be properly aligned for the type since
// our return type is naturally aligned. We COULD modify this
// to return arbitrary alignment, but its not something we need.

View File

@ -64,7 +64,7 @@ pub fn Stream(comptime Handler: type) type {
}
/// Process a string of characters.
pub fn nextSlice(self: *Self, input: []const u8) !void {
pub inline fn nextSlice(self: *Self, input: []const u8) !void {
// Disable SIMD optimizations if build requests it or if our
// manual debug mode is on.
if (comptime debug or !build_options.simd) {
@ -87,7 +87,7 @@ pub fn Stream(comptime Handler: type) type {
}
}
fn nextSliceCapped(self: *Self, input: []const u8, cp_buf: []u32) !void {
inline fn nextSliceCapped(self: *Self, input: []const u8, cp_buf: []u32) !void {
assert(input.len <= cp_buf.len);
var offset: usize = 0;
@ -144,7 +144,7 @@ pub fn Stream(comptime Handler: type) type {
///
/// Expects input to start with 0x1B, use consumeUntilGround first
/// if the stream may be in the middle of an escape sequence.
fn consumeAllEscapes(self: *Self, input: []const u8) !usize {
inline fn consumeAllEscapes(self: *Self, input: []const u8) !usize {
var offset: usize = 0;
while (input[offset] == 0x1B) {
self.parser.state = .escape;
@ -158,7 +158,7 @@ pub fn Stream(comptime Handler: type) type {
/// Parses escape sequences until the parser reaches the ground state.
/// Returns the number of bytes consumed from the provided input.
fn consumeUntilGround(self: *Self, input: []const u8) !usize {
inline fn consumeUntilGround(self: *Self, input: []const u8) !usize {
var offset: usize = 0;
while (self.parser.state != .ground) {
if (offset >= input.len) return input.len;
@ -171,7 +171,7 @@ pub fn Stream(comptime Handler: type) type {
/// Like nextSlice but takes one byte and is necessarily a scalar
/// operation that can't use SIMD. Prefer nextSlice if you can and
/// try to get multiple bytes at once.
pub fn next(self: *Self, c: u8) !void {
pub inline fn next(self: *Self, c: u8) !void {
// The scalar path can be responsible for decoding UTF-8.
if (self.parser.state == .ground) {
try self.nextUtf8(c);
@ -185,7 +185,7 @@ pub fn Stream(comptime Handler: type) type {
///
/// This assumes we're in the UTF-8 decoding state. If we may not
/// be in the UTF-8 decoding state call nextSlice or next.
fn nextUtf8(self: *Self, c: u8) !void {
inline fn nextUtf8(self: *Self, c: u8) !void {
assert(self.parser.state == .ground);
const res = self.utf8decoder.next(c);
@ -278,7 +278,14 @@ pub fn Stream(comptime Handler: type) type {
return;
}
const actions = self.parser.next(c);
// We explicitly inline this call here for performance reasons.
//
// We do this rather than mark Parser.next as inline because doing
// that causes weird behavior in some tests- I'm not sure if they
// miscompile or it's just very counter-intuitive comptime stuff,
// but regardless, this is the easy solution.
const actions = @call(.always_inline, Parser.next, .{ &self.parser, c });
for (actions) |action_opt| {
const action = action_opt orelse continue;
if (comptime debug) log.info("action: {}", .{action});
@ -326,13 +333,13 @@ pub fn Stream(comptime Handler: type) type {
}
}
pub fn print(self: *Self, c: u21) !void {
pub inline fn print(self: *Self, c: u21) !void {
if (@hasDecl(T, "print")) {
try self.handler.print(c);
}
}
pub fn execute(self: *Self, c: u8) !void {
pub inline fn execute(self: *Self, c: u8) !void {
const c0: ansi.C0 = @enumFromInt(c);
if (comptime debug) log.info("execute: {}", .{c0});
switch (c0) {
@ -383,7 +390,7 @@ pub fn Stream(comptime Handler: type) type {
}
}
fn csiDispatch(self: *Self, input: Parser.Action.CSI) !void {
inline fn csiDispatch(self: *Self, input: Parser.Action.CSI) !void {
switch (input.final) {
// CUU - Cursor Up
'A', 'k' => switch (input.intermediates.len) {
@ -1490,7 +1497,7 @@ pub fn Stream(comptime Handler: type) type {
}
}
fn oscDispatch(self: *Self, cmd: osc.Command) !void {
inline fn oscDispatch(self: *Self, cmd: osc.Command) !void {
switch (cmd) {
.change_window_title => |title| {
if (@hasDecl(T, "changeWindowTitle")) {
@ -1635,7 +1642,7 @@ pub fn Stream(comptime Handler: type) type {
}
}
fn configureCharset(
inline fn configureCharset(
self: *Self,
intermediates: []const u8,
set: charsets.Charset,
@ -1669,7 +1676,7 @@ pub fn Stream(comptime Handler: type) type {
});
}
fn escDispatch(
inline fn escDispatch(
self: *Self,
action: Parser.Action.ESC,
) !void {

View File

@ -186,19 +186,19 @@ pub const StreamHandler = struct {
_ = self.renderer_mailbox.push(msg, .{ .forever = {} });
}
pub fn dcsHook(self: *StreamHandler, dcs: terminal.DCS) !void {
pub inline fn dcsHook(self: *StreamHandler, dcs: terminal.DCS) !void {
var cmd = self.dcs.hook(self.alloc, dcs) orelse return;
defer cmd.deinit();
try self.dcsCommand(&cmd);
}
pub fn dcsPut(self: *StreamHandler, byte: u8) !void {
pub inline fn dcsPut(self: *StreamHandler, byte: u8) !void {
var cmd = self.dcs.put(byte) orelse return;
defer cmd.deinit();
try self.dcsCommand(&cmd);
}
pub fn dcsUnhook(self: *StreamHandler) !void {
pub inline fn dcsUnhook(self: *StreamHandler) !void {
var cmd = self.dcs.unhook() orelse return;
defer cmd.deinit();
try self.dcsCommand(&cmd);
@ -293,11 +293,11 @@ pub const StreamHandler = struct {
}
}
pub fn apcStart(self: *StreamHandler) !void {
pub inline fn apcStart(self: *StreamHandler) !void {
self.apc.start();
}
pub fn apcPut(self: *StreamHandler, byte: u8) !void {
pub inline fn apcPut(self: *StreamHandler, byte: u8) !void {
self.apc.feed(self.alloc, byte);
}
@ -322,23 +322,23 @@ pub const StreamHandler = struct {
}
}
pub fn print(self: *StreamHandler, ch: u21) !void {
pub inline fn print(self: *StreamHandler, ch: u21) !void {
try self.terminal.print(ch);
}
pub fn printRepeat(self: *StreamHandler, count: usize) !void {
pub inline fn printRepeat(self: *StreamHandler, count: usize) !void {
try self.terminal.printRepeat(count);
}
pub fn bell(self: *StreamHandler) !void {
pub inline fn bell(self: *StreamHandler) !void {
self.surfaceMessageWriter(.ring_bell);
}
pub fn backspace(self: *StreamHandler) !void {
pub inline fn backspace(self: *StreamHandler) !void {
self.terminal.backspace();
}
pub fn horizontalTab(self: *StreamHandler, count: u16) !void {
pub inline fn horizontalTab(self: *StreamHandler, count: u16) !void {
for (0..count) |_| {
const x = self.terminal.screen.cursor.x;
try self.terminal.horizontalTab();
@ -346,7 +346,7 @@ pub const StreamHandler = struct {
}
}
pub fn horizontalTabBack(self: *StreamHandler, count: u16) !void {
pub inline fn horizontalTabBack(self: *StreamHandler, count: u16) !void {
for (0..count) |_| {
const x = self.terminal.screen.cursor.x;
try self.terminal.horizontalTabBack();
@ -354,61 +354,61 @@ pub const StreamHandler = struct {
}
}
pub fn linefeed(self: *StreamHandler) !void {
pub inline fn linefeed(self: *StreamHandler) !void {
// Small optimization: call index instead of linefeed because they're
// identical and this avoids one layer of function call overhead.
try self.terminal.index();
}
pub fn carriageReturn(self: *StreamHandler) !void {
pub inline fn carriageReturn(self: *StreamHandler) !void {
self.terminal.carriageReturn();
}
pub fn setCursorLeft(self: *StreamHandler, amount: u16) !void {
pub inline fn setCursorLeft(self: *StreamHandler, amount: u16) !void {
self.terminal.cursorLeft(amount);
}
pub fn setCursorRight(self: *StreamHandler, amount: u16) !void {
pub inline fn setCursorRight(self: *StreamHandler, amount: u16) !void {
self.terminal.cursorRight(amount);
}
pub fn setCursorDown(self: *StreamHandler, amount: u16, carriage: bool) !void {
pub inline fn setCursorDown(self: *StreamHandler, amount: u16, carriage: bool) !void {
self.terminal.cursorDown(amount);
if (carriage) self.terminal.carriageReturn();
}
pub fn setCursorUp(self: *StreamHandler, amount: u16, carriage: bool) !void {
pub inline fn setCursorUp(self: *StreamHandler, amount: u16, carriage: bool) !void {
self.terminal.cursorUp(amount);
if (carriage) self.terminal.carriageReturn();
}
pub fn setCursorCol(self: *StreamHandler, col: u16) !void {
pub inline fn setCursorCol(self: *StreamHandler, col: u16) !void {
self.terminal.setCursorPos(self.terminal.screen.cursor.y + 1, col);
}
pub fn setCursorColRelative(self: *StreamHandler, offset: u16) !void {
pub inline fn setCursorColRelative(self: *StreamHandler, offset: u16) !void {
self.terminal.setCursorPos(
self.terminal.screen.cursor.y + 1,
self.terminal.screen.cursor.x + 1 +| offset,
);
}
pub fn setCursorRow(self: *StreamHandler, row: u16) !void {
pub inline fn setCursorRow(self: *StreamHandler, row: u16) !void {
self.terminal.setCursorPos(row, self.terminal.screen.cursor.x + 1);
}
pub fn setCursorRowRelative(self: *StreamHandler, offset: u16) !void {
pub inline fn setCursorRowRelative(self: *StreamHandler, offset: u16) !void {
self.terminal.setCursorPos(
self.terminal.screen.cursor.y + 1 +| offset,
self.terminal.screen.cursor.x + 1,
);
}
pub fn setCursorPos(self: *StreamHandler, row: u16, col: u16) !void {
pub inline fn setCursorPos(self: *StreamHandler, row: u16, col: u16) !void {
self.terminal.setCursorPos(row, col);
}
pub fn eraseDisplay(self: *StreamHandler, mode: terminal.EraseDisplay, protected: bool) !void {
pub inline fn eraseDisplay(self: *StreamHandler, mode: terminal.EraseDisplay, protected: bool) !void {
if (mode == .complete) {
// Whenever we erase the full display, scroll to bottom.
try self.terminal.scrollViewport(.{ .bottom = {} });
@ -418,48 +418,48 @@ pub const StreamHandler = struct {
self.terminal.eraseDisplay(mode, protected);
}
pub fn eraseLine(self: *StreamHandler, mode: terminal.EraseLine, protected: bool) !void {
pub inline fn eraseLine(self: *StreamHandler, mode: terminal.EraseLine, protected: bool) !void {
self.terminal.eraseLine(mode, protected);
}
pub fn deleteChars(self: *StreamHandler, count: usize) !void {
pub inline fn deleteChars(self: *StreamHandler, count: usize) !void {
self.terminal.deleteChars(count);
}
pub fn eraseChars(self: *StreamHandler, count: usize) !void {
pub inline fn eraseChars(self: *StreamHandler, count: usize) !void {
self.terminal.eraseChars(count);
}
pub fn insertLines(self: *StreamHandler, count: usize) !void {
pub inline fn insertLines(self: *StreamHandler, count: usize) !void {
self.terminal.insertLines(count);
}
pub fn insertBlanks(self: *StreamHandler, count: usize) !void {
pub inline fn insertBlanks(self: *StreamHandler, count: usize) !void {
self.terminal.insertBlanks(count);
}
pub fn deleteLines(self: *StreamHandler, count: usize) !void {
pub inline fn deleteLines(self: *StreamHandler, count: usize) !void {
self.terminal.deleteLines(count);
}
pub fn reverseIndex(self: *StreamHandler) !void {
pub inline fn reverseIndex(self: *StreamHandler) !void {
self.terminal.reverseIndex();
}
pub fn index(self: *StreamHandler) !void {
pub inline fn index(self: *StreamHandler) !void {
try self.terminal.index();
}
pub fn nextLine(self: *StreamHandler) !void {
pub inline fn nextLine(self: *StreamHandler) !void {
try self.terminal.index();
self.terminal.carriageReturn();
}
pub fn setTopAndBottomMargin(self: *StreamHandler, top: u16, bot: u16) !void {
pub inline fn setTopAndBottomMargin(self: *StreamHandler, top: u16, bot: u16) !void {
self.terminal.setTopAndBottomMargin(top, bot);
}
pub fn setLeftAndRightMarginAmbiguous(self: *StreamHandler) !void {
pub inline fn setLeftAndRightMarginAmbiguous(self: *StreamHandler) !void {
if (self.terminal.modes.get(.enable_left_and_right_margin)) {
try self.setLeftAndRightMargin(0, 0);
} else {
@ -467,7 +467,7 @@ pub const StreamHandler = struct {
}
}
pub fn setLeftAndRightMargin(self: *StreamHandler, left: u16, right: u16) !void {
pub inline fn setLeftAndRightMargin(self: *StreamHandler, left: u16, right: u16) !void {
self.terminal.setLeftAndRightMargin(left, right);
}
@ -504,12 +504,12 @@ pub const StreamHandler = struct {
self.messageWriter(msg);
}
pub fn saveMode(self: *StreamHandler, mode: terminal.Mode) !void {
pub inline fn saveMode(self: *StreamHandler, mode: terminal.Mode) !void {
// log.debug("save mode={}", .{mode});
self.terminal.modes.save(mode);
}
pub fn restoreMode(self: *StreamHandler, mode: terminal.Mode) !void {
pub inline fn restoreMode(self: *StreamHandler, mode: terminal.Mode) !void {
// For restore mode we have to restore but if we set it, we
// always have to call setMode because setting some modes have
// side effects and we want to make sure we process those.
@ -696,11 +696,11 @@ pub const StreamHandler = struct {
}
}
pub fn setMouseShiftCapture(self: *StreamHandler, v: bool) !void {
pub inline fn setMouseShiftCapture(self: *StreamHandler, v: bool) !void {
self.terminal.flags.mouse_shift_capture = if (v) .true else .false;
}
pub fn setAttribute(self: *StreamHandler, attr: terminal.Attribute) !void {
pub inline fn setAttribute(self: *StreamHandler, attr: terminal.Attribute) !void {
switch (attr) {
.unknown => |unk| log.warn("unimplemented or unknown SGR attribute: {any}", .{unk}),
@ -709,11 +709,11 @@ pub const StreamHandler = struct {
}
}
pub fn startHyperlink(self: *StreamHandler, uri: []const u8, id: ?[]const u8) !void {
pub inline fn startHyperlink(self: *StreamHandler, uri: []const u8, id: ?[]const u8) !void {
try self.terminal.screen.startHyperlink(uri, id);
}
pub fn endHyperlink(self: *StreamHandler) !void {
pub inline fn endHyperlink(self: *StreamHandler) !void {
self.terminal.screen.endHyperlink();
}
@ -832,31 +832,31 @@ pub const StreamHandler = struct {
}
}
pub fn setProtectedMode(self: *StreamHandler, mode: terminal.ProtectedMode) !void {
pub inline fn setProtectedMode(self: *StreamHandler, mode: terminal.ProtectedMode) !void {
self.terminal.setProtectedMode(mode);
}
pub fn decaln(self: *StreamHandler) !void {
pub inline fn decaln(self: *StreamHandler) !void {
try self.terminal.decaln();
}
pub fn tabClear(self: *StreamHandler, cmd: terminal.TabClear) !void {
pub inline fn tabClear(self: *StreamHandler, cmd: terminal.TabClear) !void {
self.terminal.tabClear(cmd);
}
pub fn tabSet(self: *StreamHandler) !void {
pub inline fn tabSet(self: *StreamHandler) !void {
self.terminal.tabSet();
}
pub fn tabReset(self: *StreamHandler) !void {
pub inline fn tabReset(self: *StreamHandler) !void {
self.terminal.tabReset();
}
pub fn saveCursor(self: *StreamHandler) !void {
pub inline fn saveCursor(self: *StreamHandler) !void {
self.terminal.saveCursor();
}
pub fn restoreCursor(self: *StreamHandler) !void {
pub inline fn restoreCursor(self: *StreamHandler) !void {
try self.terminal.restoreCursor();
}
@ -865,11 +865,11 @@ pub const StreamHandler = struct {
self.messageWriter(try termio.Message.writeReq(self.alloc, self.enquiry_response));
}
pub fn scrollDown(self: *StreamHandler, count: usize) !void {
pub inline fn scrollDown(self: *StreamHandler, count: usize) !void {
self.terminal.scrollDown(count);
}
pub fn scrollUp(self: *StreamHandler, count: usize) !void {
pub inline fn scrollUp(self: *StreamHandler, count: usize) !void {
self.terminal.scrollUp(count);
}
@ -995,7 +995,7 @@ pub const StreamHandler = struct {
self.surfaceMessageWriter(.{ .set_title = buf });
}
pub fn setMouseShape(
pub inline fn setMouseShape(
self: *StreamHandler,
shape: terminal.MouseShape,
) !void {
@ -1037,22 +1037,22 @@ pub const StreamHandler = struct {
});
}
pub fn promptStart(self: *StreamHandler, aid: ?[]const u8, redraw: bool) !void {
pub inline fn promptStart(self: *StreamHandler, aid: ?[]const u8, redraw: bool) !void {
_ = aid;
self.terminal.markSemanticPrompt(.prompt);
self.terminal.flags.shell_redraws_prompt = redraw;
}
pub fn promptContinuation(self: *StreamHandler, aid: ?[]const u8) !void {
pub inline fn promptContinuation(self: *StreamHandler, aid: ?[]const u8) !void {
_ = aid;
self.terminal.markSemanticPrompt(.prompt_continuation);
}
pub fn promptEnd(self: *StreamHandler) !void {
pub inline fn promptEnd(self: *StreamHandler) !void {
self.terminal.markSemanticPrompt(.input);
}
pub fn endOfInput(self: *StreamHandler) !void {
pub inline fn endOfInput(self: *StreamHandler) !void {
self.terminal.markSemanticPrompt(.command);
}