Skip to content

Commit

Permalink
allow ast-check and build on save to executed in parallel
Browse files Browse the repository at this point in the history
  • Loading branch information
Techatrix committed Mar 2, 2024
1 parent 80ddf7b commit dd7b88e
Show file tree
Hide file tree
Showing 2 changed files with 24 additions and 21 deletions.
16 changes: 12 additions & 4 deletions src/Server.zig
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,11 @@ wait_group: if (zig_builtin.single_threaded) void else std.Thread.WaitGroup,
job_queue: std.fifo.LinearFifo(Job, .Dynamic),
job_queue_lock: std.Thread.Mutex = .{},
ip: InternPool = .{},
zig_exe_lock: std.Thread.Mutex = .{},
// ensure that build on save is only executed once at a time
running_build_on_save_processes: std.atomic.Value(usize) = std.atomic.Value(usize).init(0),
/// avoid Zig deadlocking when spawning multiple `zig ast-check` processes at the same time.
/// See https://github.com/ziglang/zig/issues/16369
zig_ast_check_lock: std.Thread.Mutex = .{},
config_arena: std.heap.ArenaAllocator.State = .{},
client_capabilities: ClientCapabilities = .{},
runtime_zig_version: ?ZigVersionWrapper = null,
Expand Down Expand Up @@ -2047,19 +2051,23 @@ fn processJob(server: *Server, job: Job, wait_group: ?*std.Thread.WaitGroup) voi
server.allocator.free(json_message);
},
.run_build_on_save => {
std.debug.assert(std.process.can_spawn);
if (!std.process.can_spawn) return;
if (!std.process.can_spawn) unreachable;

if (server.running_build_on_save_processes.load(.SeqCst) != 0) return;

for (server.client_capabilities.workspace_folders) |workspace_folder_uri| {
_ = server.running_build_on_save_processes.fetchAdd(1, .AcqRel);
defer _ = server.running_build_on_save_processes.fetchSub(1, .AcqRel);

var arena_allocator = std.heap.ArenaAllocator.init(server.allocator);
defer arena_allocator.deinit();
var diagnostic_set = std.StringArrayHashMapUnmanaged(std.ArrayListUnmanaged(types.Diagnostic)){};
diagnostics_gen.generateBuildOnSaveDiagnostics(server, workspace_folder_uri, arena_allocator.allocator(), &diagnostic_set) catch |err| {
log.err("failed to run build on save on {s}: {}", .{ workspace_folder_uri, err });
return;
};

for (diagnostic_set.keys(), diagnostic_set.values()) |document_uri, diagnostics| {
if (diagnostics.items.len == 0) continue;
const json_message = server.sendToClientNotification("textDocument/publishDiagnostics", .{
.uri = document_uri,
.diagnostics = diagnostics.items,
Expand Down
29 changes: 12 additions & 17 deletions src/features/diagnostics.zig
Original file line number Diff line number Diff line change
Expand Up @@ -251,21 +251,16 @@ pub fn generateBuildOnSaveDiagnostics(
}
}

const result = blk: {
server.zig_exe_lock.lock();
defer server.zig_exe_lock.unlock();

break :blk std.process.Child.run(.{
.allocator = server.allocator,
.argv = argv.items,
.cwd = workspace_path,
.max_output_bytes = 1024 * 1024,
}) catch |err| {
const joined = std.mem.join(server.allocator, " ", argv.items) catch return;
defer server.allocator.free(joined);
log.err("failed zig build command:\n{s}\nerror:{}\n", .{ joined, err });
return err;
};
const result = std.process.Child.run(.{
.allocator = server.allocator,
.argv = argv.items,
.cwd = workspace_path,
.max_output_bytes = 1024 * 1024,
}) catch |err| {
const joined = std.mem.join(server.allocator, " ", argv.items) catch return;
defer server.allocator.free(joined);
log.err("failed zig build command:\n{s}\nerror:{}\n", .{ joined, err });
return err;
};
defer server.allocator.free(result.stdout);
defer server.allocator.free(result.stderr);
Expand Down Expand Up @@ -393,8 +388,8 @@ fn getDiagnosticsFromAstCheck(
const zig_exe_path = server.config.zig_exe_path.?;

const stderr_bytes = blk: {
server.zig_exe_lock.lock();
defer server.zig_exe_lock.unlock();
server.zig_ast_check_lock.lock();
defer server.zig_ast_check_lock.unlock();

var process = std.process.Child.init(&[_][]const u8{ zig_exe_path, "ast-check", "--color", "off" }, server.allocator);
process.stdin_behavior = .Pipe;
Expand Down

0 comments on commit dd7b88e

Please sign in to comment.