terminal: inline all the things

A whole bunch of inline annotations, some of these were tracked down
with Instruments.app, others are guesses / just seemed right because
they were trivial wrapper functions.

Regardless, these changes are ultimately supported by improved vtebench
results on my machine (Apple M3 Max).
pull/8946/head
Qwerasd 2025-09-28 21:26:12 -06:00 committed by Mitchell Hashimoto
parent 43dd712053
commit 0388a2b396
7 changed files with 80 additions and 80 deletions

View File

@ -1861,7 +1861,7 @@ pub fn maxSize(self: *const PageList) usize {
}
/// Returns true if we need to grow into our active area.
fn growRequiredForActive(self: *const PageList) bool {
inline fn growRequiredForActive(self: *const PageList) bool {
var rows: usize = 0;
var page = self.pages.last;
while (page) |p| : (page = p.prev) {
@ -2047,7 +2047,7 @@ pub fn adjustCapacity(
/// Create a new page node. This does not add it to the list and this
/// does not do any memory size accounting with max_size/page_size.
fn createPage(
inline fn createPage(
self: *PageList,
cap: Capacity,
) Allocator.Error!*List.Node {
@ -2055,7 +2055,7 @@ fn createPage(
return try createPageExt(&self.pool, cap, &self.page_size);
}
fn createPageExt(
inline fn createPageExt(
pool: *MemoryPool,
cap: Capacity,
total_size: ?*usize,
@ -3394,7 +3394,7 @@ pub const Pin = struct {
y: size.CellCountInt = 0,
x: size.CellCountInt = 0,
pub fn rowAndCell(self: Pin) struct {
pub inline fn rowAndCell(self: Pin) struct {
row: *pagepkg.Row,
cell: *pagepkg.Cell,
} {
@ -3407,7 +3407,7 @@ pub const Pin = struct {
/// Returns the cells for the row that this pin is on. The subset determines
/// what subset of the cells are returned. The "left/right" subsets are
/// inclusive of the x coordinate of the pin.
pub fn cells(self: Pin, subset: CellSubset) []pagepkg.Cell {
pub inline fn cells(self: Pin, subset: CellSubset) []pagepkg.Cell {
const rac = self.rowAndCell();
const all = self.node.data.getCells(rac.row);
return switch (subset) {
@ -3419,12 +3419,12 @@ pub const Pin = struct {
/// Returns the grapheme codepoints for the given cell. These are only
/// the EXTRA codepoints and not the first codepoint.
pub fn grapheme(self: Pin, cell: *const pagepkg.Cell) ?[]u21 {
pub inline fn grapheme(self: Pin, cell: *const pagepkg.Cell) ?[]u21 {
return self.node.data.lookupGrapheme(cell);
}
/// Returns the style for the given cell in this pin.
pub fn style(self: Pin, cell: *const pagepkg.Cell) stylepkg.Style {
pub inline fn style(self: Pin, cell: *const pagepkg.Cell) stylepkg.Style {
if (cell.style_id == stylepkg.default_id) return .{};
return self.node.data.styles.get(
self.node.data.memory,
@ -3433,12 +3433,12 @@ pub const Pin = struct {
}
/// Check if this pin is dirty.
pub fn isDirty(self: Pin) bool {
pub inline fn isDirty(self: Pin) bool {
return self.node.data.isRowDirty(self.y);
}
/// Mark this pin location as dirty.
pub fn markDirty(self: Pin) void {
pub inline fn markDirty(self: Pin) void {
var set = self.node.data.dirtyBitSet();
set.set(self.y);
}
@ -3507,7 +3507,7 @@ pub const Pin = struct {
/// pointFromPin and building up the iterator from points.
///
/// The limit pin is inclusive.
pub fn pageIterator(
pub inline fn pageIterator(
self: Pin,
direction: Direction,
limit: ?Pin,
@ -3529,7 +3529,7 @@ pub const Pin = struct {
};
}
pub fn rowIterator(
pub inline fn rowIterator(
self: Pin,
direction: Direction,
limit: ?Pin,
@ -3546,7 +3546,7 @@ pub const Pin = struct {
};
}
pub fn cellIterator(
pub inline fn cellIterator(
self: Pin,
direction: Direction,
limit: ?Pin,
@ -3647,14 +3647,14 @@ pub const Pin = struct {
return false;
}
pub fn eql(self: Pin, other: Pin) bool {
pub inline fn eql(self: Pin, other: Pin) bool {
return self.node == other.node and
self.y == other.y and
self.x == other.x;
}
/// Move the pin left n columns. n must fit within the size.
pub fn left(self: Pin, n: usize) Pin {
pub inline fn left(self: Pin, n: usize) Pin {
assert(n <= self.x);
var result = self;
result.x -= std.math.cast(size.CellCountInt, n) orelse result.x;
@ -3662,7 +3662,7 @@ pub const Pin = struct {
}
/// Move the pin right n columns. n must fit within the size.
pub fn right(self: Pin, n: usize) Pin {
pub inline fn right(self: Pin, n: usize) Pin {
assert(self.x + n < self.node.data.size.cols);
var result = self;
result.x +|= std.math.cast(size.CellCountInt, n) orelse
@ -3671,14 +3671,14 @@ pub const Pin = struct {
}
/// Move the pin left n columns, stopping at the start of the row.
pub fn leftClamp(self: Pin, n: size.CellCountInt) Pin {
pub inline fn leftClamp(self: Pin, n: size.CellCountInt) Pin {
var result = self;
result.x -|= n;
return result;
}
/// Move the pin right n columns, stopping at the end of the row.
pub fn rightClamp(self: Pin, n: size.CellCountInt) Pin {
pub inline fn rightClamp(self: Pin, n: size.CellCountInt) Pin {
var result = self;
result.x = @min(self.x +| n, self.node.data.size.cols - 1);
return result;
@ -3740,7 +3740,7 @@ pub const Pin = struct {
/// Move the pin down a certain number of rows, or return null if
/// the pin goes beyond the end of the screen.
pub fn down(self: Pin, n: usize) ?Pin {
pub inline fn down(self: Pin, n: usize) ?Pin {
return switch (self.downOverflow(n)) {
.offset => |v| v,
.overflow => null,
@ -3749,7 +3749,7 @@ pub const Pin = struct {
/// Move the pin up a certain number of rows, or return null if
/// the pin goes beyond the start of the screen.
pub fn up(self: Pin, n: usize) ?Pin {
pub inline fn up(self: Pin, n: usize) ?Pin {
return switch (self.upOverflow(n)) {
.offset => |v| v,
.overflow => null,

View File

@ -254,7 +254,7 @@ pub fn deinit(self: *Parser) void {
/// Next consumes the next character c and returns the actions to execute.
/// Up to 3 actions may need to be executed -- in order -- representing
/// the state exit, transition, and entry actions.
pub fn next(self: *Parser, c: u8) [3]?Action {
pub inline fn next(self: *Parser, c: u8) [3]?Action {
const effect = table[c][@intFromEnum(self.state)];
// log.info("next: {x}", .{c});
@ -314,7 +314,7 @@ pub fn next(self: *Parser, c: u8) [3]?Action {
};
}
pub fn collect(self: *Parser, c: u8) void {
pub inline fn collect(self: *Parser, c: u8) void {
if (self.intermediates_idx >= MAX_INTERMEDIATE) {
log.warn("invalid intermediates count", .{});
return;
@ -324,7 +324,7 @@ pub fn collect(self: *Parser, c: u8) void {
self.intermediates_idx += 1;
}
fn doAction(self: *Parser, action: TransitionAction, c: u8) ?Action {
inline fn doAction(self: *Parser, action: TransitionAction, c: u8) ?Action {
return switch (action) {
.none, .ignore => null,
.print => Action{ .print = c },
@ -410,7 +410,7 @@ fn doAction(self: *Parser, action: TransitionAction, c: u8) ?Action {
};
}
pub fn clear(self: *Parser) void {
pub inline fn clear(self: *Parser) void {
self.intermediates_idx = 0;
self.params_idx = 0;
self.params_sep = .initEmpty();

View File

@ -533,13 +533,13 @@ pub fn adjustCapacity(
return new_node;
}
pub fn cursorCellRight(self: *Screen, n: size.CellCountInt) *pagepkg.Cell {
pub inline fn cursorCellRight(self: *Screen, n: size.CellCountInt) *pagepkg.Cell {
assert(self.cursor.x + n < self.pages.cols);
const cell: [*]pagepkg.Cell = @ptrCast(self.cursor.page_cell);
return @ptrCast(cell + n);
}
pub fn cursorCellLeft(self: *Screen, n: size.CellCountInt) *pagepkg.Cell {
pub inline fn cursorCellLeft(self: *Screen, n: size.CellCountInt) *pagepkg.Cell {
assert(self.cursor.x >= n);
const cell: [*]pagepkg.Cell = @ptrCast(self.cursor.page_cell);
return @ptrCast(cell - n);
@ -959,7 +959,7 @@ fn cursorScrollAboveRotate(self: *Screen) !void {
/// Move the cursor down if we're not at the bottom of the screen. Otherwise
/// scroll. Currently only used for testing.
fn cursorDownOrScroll(self: *Screen) !void {
inline fn cursorDownOrScroll(self: *Screen) !void {
if (self.cursor.y + 1 < self.pages.rows) {
self.cursorDown(1);
} else {
@ -1034,7 +1034,7 @@ pub fn cursorCopy(self: *Screen, other: Cursor, opts: struct {
/// page than the old AND we have a style or hyperlink set. In that case,
/// we must release our old one and insert the new one, since styles are
/// stored per-page.
fn cursorChangePin(self: *Screen, new: Pin) void {
inline fn cursorChangePin(self: *Screen, new: Pin) void {
// Moving the cursor affects text run splitting (ligatures) so
// we must mark the old and new page dirty. We do this as long
// as the pins are not equal
@ -1108,7 +1108,7 @@ fn cursorChangePin(self: *Screen, new: Pin) void {
/// Mark the cursor position as dirty.
/// TODO: test
pub fn cursorMarkDirty(self: *Screen) void {
pub inline fn cursorMarkDirty(self: *Screen) void {
self.cursor.page_pin.markDirty();
}
@ -1160,7 +1160,7 @@ pub const Scroll = union(enum) {
};
/// Scroll the viewport of the terminal grid.
pub fn scroll(self: *Screen, behavior: Scroll) void {
pub inline fn scroll(self: *Screen, behavior: Scroll) void {
defer self.assertIntegrity();
if (comptime build_options.kitty_graphics) {
@ -1181,7 +1181,7 @@ pub fn scroll(self: *Screen, behavior: Scroll) void {
/// See PageList.scrollClear. In addition to that, we reset the cursor
/// to be on top.
pub fn scrollClear(self: *Screen) !void {
pub inline fn scrollClear(self: *Screen) !void {
defer self.assertIntegrity();
try self.pages.scrollClear();
@ -1196,14 +1196,14 @@ pub fn scrollClear(self: *Screen) !void {
}
/// Returns true if the viewport is scrolled to the bottom of the screen.
pub fn viewportIsBottom(self: Screen) bool {
pub inline fn viewportIsBottom(self: Screen) bool {
return self.pages.viewport == .active;
}
/// Erase the region specified by tl and br, inclusive. This will physically
/// erase the rows meaning the memory will be reclaimed (if the underlying
/// page is empty) and other rows will be shifted up.
pub fn eraseRows(
pub inline fn eraseRows(
self: *Screen,
tl: point.Point,
bl: ?point.Point,
@ -1539,7 +1539,7 @@ pub fn splitCellBoundary(
/// Returns the blank cell to use when doing terminal operations that
/// require preserving the bg color.
pub fn blankCell(self: *const Screen) Cell {
pub inline fn blankCell(self: *const Screen) Cell {
if (self.cursor.style_id == style.default_id) return .{};
return self.cursor.style.bgCell() orelse .{};
}
@ -1557,7 +1557,7 @@ pub fn blankCell(self: *const Screen) Cell {
/// probably means the system is in trouble anyways. I'd like to improve this
/// in the future but it is not a priority particularly because this scenario
/// (resize) is difficult.
pub fn resize(
pub inline fn resize(
self: *Screen,
cols: size.CellCountInt,
rows: size.CellCountInt,
@ -1568,7 +1568,7 @@ pub fn resize(
/// Resize the screen without any reflow. In this mode, columns/rows will
/// be truncated as they are shrunk. If they are grown, the new space is filled
/// with zeros.
pub fn resizeWithoutReflow(
pub inline fn resizeWithoutReflow(
self: *Screen,
cols: size.CellCountInt,
rows: size.CellCountInt,

View File

@ -191,7 +191,7 @@ pub const Page = struct {
/// The backing memory is always allocated using mmap directly.
/// You cannot use custom allocators with this structure because
/// it is critical to performance that we use mmap.
pub fn init(cap: Capacity) !Page {
pub inline fn init(cap: Capacity) !Page {
const l = layout(cap);
// We use mmap directly to avoid Zig allocator overhead
@ -215,7 +215,7 @@ pub const Page = struct {
/// Initialize a new page using the given backing memory.
/// It is up to the caller to not call deinit on these pages.
pub fn initBuf(buf: OffsetBuf, l: Layout) Page {
pub inline fn initBuf(buf: OffsetBuf, l: Layout) Page {
const cap = l.capacity;
const rows = buf.member(Row, l.rows_start);
const cells = buf.member(Cell, l.cells_start);
@ -270,13 +270,13 @@ pub const Page = struct {
/// Deinitialize the page, freeing any backing memory. Do NOT call
/// this if you allocated the backing memory yourself (i.e. you used
/// initBuf).
pub fn deinit(self: *Page) void {
pub inline fn deinit(self: *Page) void {
posix.munmap(self.memory);
self.* = undefined;
}
/// Reinitialize the page with the same capacity.
pub fn reinit(self: *Page) void {
pub inline fn reinit(self: *Page) void {
// We zero the page memory as u64 instead of u8 because
// we can and it's empirically quite a bit faster.
@memset(@as([*]u64, @ptrCast(self.memory))[0 .. self.memory.len / 8], 0);
@ -306,7 +306,7 @@ pub const Page = struct {
/// Temporarily pause integrity checks. This is useful when you are
/// doing a lot of operations that would trigger integrity check
/// violations but you know the page will end up in a consistent state.
pub fn pauseIntegrityChecks(self: *Page, v: bool) void {
pub inline fn pauseIntegrityChecks(self: *Page, v: bool) void {
if (build_options.slow_runtime_safety) {
if (v) {
self.pause_integrity_checks += 1;
@ -319,7 +319,7 @@ pub const Page = struct {
/// A helper that can be used to assert the integrity of the page
/// when runtime safety is enabled. This is a no-op when runtime
/// safety is disabled. This uses the libc allocator.
pub fn assertIntegrity(self: *const Page) void {
pub inline fn assertIntegrity(self: *const Page) void {
if (comptime build_options.slow_runtime_safety) {
var debug_allocator: std.heap.DebugAllocator(.{}) = .init;
defer _ = debug_allocator.deinit();
@ -603,7 +603,7 @@ pub const Page = struct {
/// Clone the contents of this page. This will allocate new memory
/// using the page allocator. If you want to manage memory manually,
/// use cloneBuf.
pub fn clone(self: *const Page) !Page {
pub inline fn clone(self: *const Page) !Page {
const backing = try posix.mmap(
null,
self.memory.len,
@ -619,7 +619,7 @@ pub const Page = struct {
/// Clone the entire contents of this page.
///
/// The buffer must be at least the size of self.memory.
pub fn cloneBuf(self: *const Page, buf: []align(std.heap.page_size_min) u8) Page {
pub inline fn cloneBuf(self: *const Page, buf: []align(std.heap.page_size_min) u8) Page {
assert(buf.len >= self.memory.len);
// The entire concept behind a page is that everything is stored
@ -671,7 +671,7 @@ pub const Page = struct {
/// If the other page has more columns, the extra columns will be
/// truncated. If the other page has fewer columns, the extra columns
/// will be zeroed.
pub fn cloneFrom(
pub inline fn cloneFrom(
self: *Page,
other: *const Page,
y_start: usize,
@ -695,7 +695,7 @@ pub const Page = struct {
}
/// Clone a single row from another page into this page.
pub fn cloneRowFrom(
pub inline fn cloneRowFrom(
self: *Page,
other: *const Page,
dst_row: *Row,
@ -912,13 +912,13 @@ pub const Page = struct {
}
/// Get a single row. y must be valid.
pub fn getRow(self: *const Page, y: usize) *Row {
pub inline fn getRow(self: *const Page, y: usize) *Row {
assert(y < self.size.rows);
return &self.rows.ptr(self.memory)[y];
}
/// Get the cells for a row.
pub fn getCells(self: *const Page, row: *Row) []Cell {
pub inline fn getCells(self: *const Page, row: *Row) []Cell {
if (build_options.slow_runtime_safety) {
const rows = self.rows.ptr(self.memory);
const cells = self.cells.ptr(self.memory);
@ -931,7 +931,7 @@ pub const Page = struct {
}
/// Get the row and cell for the given X/Y within this page.
pub fn getRowAndCell(self: *const Page, x: usize, y: usize) struct {
pub inline fn getRowAndCell(self: *const Page, x: usize, y: usize) struct {
row: *Row,
cell: *Cell,
} {
@ -1016,7 +1016,7 @@ pub const Page = struct {
}
/// Swap two cells within the same row as quickly as possible.
pub fn swapCells(
pub inline fn swapCells(
self: *Page,
src: *Cell,
dst: *Cell,
@ -1077,7 +1077,7 @@ pub const Page = struct {
/// active, Page cannot know this and it will still be ref counted down.
/// The best solution for this is to artificially increment the ref count
/// prior to calling this function.
pub fn clearCells(
pub inline fn clearCells(
self: *Page,
row: *Row,
left: usize,
@ -1127,14 +1127,14 @@ pub const Page = struct {
}
/// Returns the hyperlink ID for the given cell.
pub fn lookupHyperlink(self: *const Page, cell: *const Cell) ?hyperlink.Id {
pub inline fn lookupHyperlink(self: *const Page, cell: *const Cell) ?hyperlink.Id {
const cell_offset = getOffset(Cell, self.memory, cell);
const map = self.hyperlink_map.map(self.memory);
return map.get(cell_offset);
}
/// Clear the hyperlink from the given cell.
pub fn clearHyperlink(self: *Page, row: *Row, cell: *Cell) void {
pub inline fn clearHyperlink(self: *Page, row: *Row, cell: *Cell) void {
defer self.assertIntegrity();
// Get our ID
@ -1258,7 +1258,7 @@ pub const Page = struct {
/// Caller is responsible for updating the refcount in the hyperlink
/// set as necessary by calling `use` if the id was not acquired with
/// `add`.
pub fn setHyperlink(self: *Page, row: *Row, cell: *Cell, id: hyperlink.Id) error{HyperlinkMapOutOfMemory}!void {
pub inline fn setHyperlink(self: *Page, row: *Row, cell: *Cell, id: hyperlink.Id) error{HyperlinkMapOutOfMemory}!void {
defer self.assertIntegrity();
const cell_offset = getOffset(Cell, self.memory, cell);
@ -1300,7 +1300,7 @@ pub const Page = struct {
/// Move the hyperlink from one cell to another. This can't fail
/// because we avoid any allocations since we're just moving data.
/// Destination must NOT have a hyperlink.
fn moveHyperlink(self: *Page, src: *Cell, dst: *Cell) void {
inline fn moveHyperlink(self: *Page, src: *Cell, dst: *Cell) void {
assert(src.hyperlink);
assert(!dst.hyperlink);
@ -1320,19 +1320,19 @@ pub const Page = struct {
/// Returns the number of hyperlinks in the page. This isn't the byte
/// size but the total number of unique cells that have hyperlink data.
pub fn hyperlinkCount(self: *const Page) usize {
pub inline fn hyperlinkCount(self: *const Page) usize {
return self.hyperlink_map.map(self.memory).count();
}
/// Returns the hyperlink capacity for the page. This isn't the byte
/// size but the number of unique cells that can have hyperlink data.
pub fn hyperlinkCapacity(self: *const Page) usize {
pub inline fn hyperlinkCapacity(self: *const Page) usize {
return self.hyperlink_map.map(self.memory).capacity();
}
/// Set the graphemes for the given cell. This asserts that the cell
/// has no graphemes set, and only contains a single codepoint.
pub fn setGraphemes(
pub inline fn setGraphemes(
self: *Page,
row: *Row,
cell: *Cell,
@ -1433,7 +1433,7 @@ pub const Page = struct {
/// Returns the codepoints for the given cell. These are the codepoints
/// in addition to the first codepoint. The first codepoint is NOT
/// included since it is on the cell itself.
pub fn lookupGrapheme(self: *const Page, cell: *const Cell) ?[]u21 {
pub inline fn lookupGrapheme(self: *const Page, cell: *const Cell) ?[]u21 {
const cell_offset = getOffset(Cell, self.memory, cell);
const map = self.grapheme_map.map(self.memory);
const slice = map.get(cell_offset) orelse return null;
@ -1446,7 +1446,7 @@ pub const Page = struct {
/// WARNING: This will NOT change the content_tag on the cells because
/// there are scenarios where we want to move graphemes without changing
/// the content tag. Callers beware but assertIntegrity should catch this.
fn moveGrapheme(self: *Page, src: *Cell, dst: *Cell) void {
inline fn moveGrapheme(self: *Page, src: *Cell, dst: *Cell) void {
if (build_options.slow_runtime_safety) {
assert(src.hasGrapheme());
assert(!dst.hasGrapheme());
@ -1462,7 +1462,7 @@ pub const Page = struct {
}
/// Clear the graphemes for a given cell.
pub fn clearGrapheme(self: *Page, row: *Row, cell: *Cell) void {
pub inline fn clearGrapheme(self: *Page, row: *Row, cell: *Cell) void {
defer self.assertIntegrity();
if (build_options.slow_runtime_safety) assert(cell.hasGrapheme());
@ -1488,13 +1488,13 @@ pub const Page = struct {
/// Returns the number of graphemes in the page. This isn't the byte
/// size but the total number of unique cells that have grapheme data.
pub fn graphemeCount(self: *const Page) usize {
pub inline fn graphemeCount(self: *const Page) usize {
return self.grapheme_map.map(self.memory).count();
}
/// Returns the grapheme capacity for the page. This isn't the byte
/// size but the number of unique cells that can have grapheme data.
pub fn graphemeCapacity(self: *const Page) usize {
pub inline fn graphemeCapacity(self: *const Page) usize {
return self.grapheme_map.map(self.memory).capacity();
}
@ -1676,7 +1676,7 @@ pub const Page = struct {
/// The returned value is a DynamicBitSetUnmanaged but it is NOT
/// actually dynamic; do NOT call resize on this. It is safe to
/// read and write but do not resize it.
pub fn dirtyBitSet(self: *const Page) std.DynamicBitSetUnmanaged {
pub inline fn dirtyBitSet(self: *const Page) std.DynamicBitSetUnmanaged {
return .{
.bit_length = self.capacity.rows,
.masks = self.dirty.ptr(self.memory),
@ -1686,14 +1686,14 @@ pub const Page = struct {
/// Returns true if the given row is dirty. This is NOT very
/// efficient if you're checking many rows and you should use
/// dirtyBitSet directly instead.
pub fn isRowDirty(self: *const Page, y: usize) bool {
pub inline fn isRowDirty(self: *const Page, y: usize) bool {
return self.dirtyBitSet().isSet(y);
}
/// Returns true if this page is dirty at all. If you plan on
/// checking any additional rows, you should use dirtyBitSet and
/// check this on your own so you have the set available.
pub fn isDirty(self: *const Page) bool {
pub inline fn isDirty(self: *const Page) bool {
return self.dirtyBitSet().findFirstSet() != null;
}
@ -1722,7 +1722,7 @@ pub const Page = struct {
/// The memory layout for a page given a desired minimum cols
/// and rows size.
pub fn layout(cap: Capacity) Layout {
pub inline fn layout(cap: Capacity) Layout {
const rows_count: usize = @intCast(cap.rows);
const rows_start = 0;
const rows_end: usize = rows_start + (rows_count * @sizeOf(Row));

View File

@ -56,7 +56,7 @@ pub const Point = union(Tag) {
screen: Coordinate,
history: Coordinate,
pub fn coord(self: Point) Coordinate {
pub inline fn coord(self: Point) Coordinate {
return switch (self) {
.active,
.viewport,

View File

@ -31,7 +31,7 @@ pub fn Offset(comptime T: type) type {
};
/// Returns a pointer to the start of the data, properly typed.
pub fn ptr(self: Self, base: anytype) [*]T {
pub inline fn ptr(self: Self, base: anytype) [*]T {
// The offset must be properly aligned for the type since
// our return type is naturally aligned. We COULD modify this
// to return arbitrary alignment, but its not something we need.

View File

@ -64,7 +64,7 @@ pub fn Stream(comptime Handler: type) type {
}
/// Process a string of characters.
pub fn nextSlice(self: *Self, input: []const u8) !void {
pub inline fn nextSlice(self: *Self, input: []const u8) !void {
// Disable SIMD optimizations if build requests it or if our
// manual debug mode is on.
if (comptime debug or !build_options.simd) {
@ -87,7 +87,7 @@ pub fn Stream(comptime Handler: type) type {
}
}
fn nextSliceCapped(self: *Self, input: []const u8, cp_buf: []u32) !void {
inline fn nextSliceCapped(self: *Self, input: []const u8, cp_buf: []u32) !void {
assert(input.len <= cp_buf.len);
var offset: usize = 0;
@ -144,7 +144,7 @@ pub fn Stream(comptime Handler: type) type {
///
/// Expects input to start with 0x1B, use consumeUntilGround first
/// if the stream may be in the middle of an escape sequence.
fn consumeAllEscapes(self: *Self, input: []const u8) !usize {
inline fn consumeAllEscapes(self: *Self, input: []const u8) !usize {
var offset: usize = 0;
while (input[offset] == 0x1B) {
self.parser.state = .escape;
@ -158,7 +158,7 @@ pub fn Stream(comptime Handler: type) type {
/// Parses escape sequences until the parser reaches the ground state.
/// Returns the number of bytes consumed from the provided input.
fn consumeUntilGround(self: *Self, input: []const u8) !usize {
inline fn consumeUntilGround(self: *Self, input: []const u8) !usize {
var offset: usize = 0;
while (self.parser.state != .ground) {
if (offset >= input.len) return input.len;
@ -171,7 +171,7 @@ pub fn Stream(comptime Handler: type) type {
/// Like nextSlice but takes one byte and is necessarily a scalar
/// operation that can't use SIMD. Prefer nextSlice if you can and
/// try to get multiple bytes at once.
pub fn next(self: *Self, c: u8) !void {
pub inline fn next(self: *Self, c: u8) !void {
// The scalar path can be responsible for decoding UTF-8.
if (self.parser.state == .ground) {
try self.nextUtf8(c);
@ -185,7 +185,7 @@ pub fn Stream(comptime Handler: type) type {
///
/// This assumes we're in the UTF-8 decoding state. If we may not
/// be in the UTF-8 decoding state call nextSlice or next.
fn nextUtf8(self: *Self, c: u8) !void {
inline fn nextUtf8(self: *Self, c: u8) !void {
assert(self.parser.state == .ground);
const res = self.utf8decoder.next(c);
@ -326,13 +326,13 @@ pub fn Stream(comptime Handler: type) type {
}
}
pub fn print(self: *Self, c: u21) !void {
pub inline fn print(self: *Self, c: u21) !void {
if (@hasDecl(T, "print")) {
try self.handler.print(c);
}
}
pub fn execute(self: *Self, c: u8) !void {
pub inline fn execute(self: *Self, c: u8) !void {
const c0: ansi.C0 = @enumFromInt(c);
if (comptime debug) log.info("execute: {}", .{c0});
switch (c0) {
@ -383,7 +383,7 @@ pub fn Stream(comptime Handler: type) type {
}
}
fn csiDispatch(self: *Self, input: Parser.Action.CSI) !void {
inline fn csiDispatch(self: *Self, input: Parser.Action.CSI) !void {
switch (input.final) {
// CUU - Cursor Up
'A', 'k' => switch (input.intermediates.len) {
@ -1490,7 +1490,7 @@ pub fn Stream(comptime Handler: type) type {
}
}
fn oscDispatch(self: *Self, cmd: osc.Command) !void {
inline fn oscDispatch(self: *Self, cmd: osc.Command) !void {
switch (cmd) {
.change_window_title => |title| {
if (@hasDecl(T, "changeWindowTitle")) {
@ -1635,7 +1635,7 @@ pub fn Stream(comptime Handler: type) type {
}
}
fn configureCharset(
inline fn configureCharset(
self: *Self,
intermediates: []const u8,
set: charsets.Charset,
@ -1669,7 +1669,7 @@ pub fn Stream(comptime Handler: type) type {
});
}
fn escDispatch(
inline fn escDispatch(
self: *Self,
action: Parser.Action.ESC,
) !void {