Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
131 changes: 101 additions & 30 deletions crates/stdlib/src/openssl.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2805,49 +2805,120 @@ mod _ssl {
let stream = self.connection.read();
let ssl_ptr = stream.ssl().as_ptr();

// Perform SSL shutdown - may need to be called twice:
// 1st call: sends close-notify, returns 0
// 2nd call: reads peer's close-notify, returns 1
let mut ret = unsafe { sys::SSL_shutdown(ssl_ptr) };

// If ret == 0, try once more to complete the bidirectional shutdown
// This handles the case where peer's close-notify is already available
if ret == 0 {
ret = unsafe { sys::SSL_shutdown(ssl_ptr) };
// BIO mode: just try shutdown once and raise SSLWantReadError if needed
if stream.is_bio() {
let ret = unsafe { sys::SSL_shutdown(ssl_ptr) };
if ret < 0 {
let err = unsafe { sys::SSL_get_error(ssl_ptr, ret) };
if err == sys::SSL_ERROR_WANT_READ {
return Err(create_ssl_want_read_error(vm).upcast());
} else if err == sys::SSL_ERROR_WANT_WRITE {
return Err(create_ssl_want_write_error(vm).upcast());
} else {
return Err(new_ssl_error(
vm,
format!("SSL shutdown failed: error code {}", err),
));
}
} else if ret == 0 {
// Sent close-notify, waiting for peer's - raise SSLWantReadError
return Err(create_ssl_want_read_error(vm).upcast());
}
return Ok(None);
}

if ret < 0 {
// Error occurred
// Socket mode: loop with select to wait for peer's close-notify
let socket_stream = stream.get_ref().expect("get_ref() failed for socket mode");
let deadline = socket_stream.timeout_deadline();

// Track how many times we've seen ret == 0 (max 2 tries)
let mut zeros = 0;

loop {
let ret = unsafe { sys::SSL_shutdown(ssl_ptr) };

// ret > 0: complete shutdown
if ret > 0 {
break;
}

// ret == 0: sent our close-notify, need to receive peer's
if ret == 0 {
zeros += 1;
if zeros > 1 {
// Already tried twice, break out (legacy behavior)
break;
}
// Wait briefly for peer's close_notify before retrying
match socket_stream.select(SslNeeds::Read, &deadline) {
SelectRet::TimedOut => {
return Err(vm.new_exception_msg(
vm.ctx.exceptions.timeout_error.to_owned(),
"The read operation timed out".to_owned(),
));
}
SelectRet::Closed => {
return Err(socket_closed_error(vm));
}
SelectRet::Nonblocking => {
// Non-blocking socket: return SSLWantReadError
return Err(create_ssl_want_read_error(vm).upcast());
}
SelectRet::Ok => {
// Data available, continue to retry
}
}
continue;
}

// ret < 0: error or would-block
let err = unsafe { sys::SSL_get_error(ssl_ptr, ret) };

if err == sys::SSL_ERROR_WANT_READ {
return Err(create_ssl_want_read_error(vm).upcast());
let needs = if err == sys::SSL_ERROR_WANT_READ {
SslNeeds::Read
} else if err == sys::SSL_ERROR_WANT_WRITE {
return Err(create_ssl_want_write_error(vm).upcast());
SslNeeds::Write
} else {
// Real error
return Err(new_ssl_error(
vm,
format!("SSL shutdown failed: error code {}", err),
));
}
} else if ret == 0 {
// Still waiting for peer's close-notify after retry
// In BIO mode, raise SSLWantReadError
if stream.is_bio() {
return Err(create_ssl_want_read_error(vm).upcast());
}
}
};

// BIO mode doesn't have an underlying socket to return
if stream.is_bio() {
return Ok(None);
// Wait on the socket
match socket_stream.select(needs, &deadline) {
SelectRet::TimedOut => {
let msg = if err == sys::SSL_ERROR_WANT_READ {
"The read operation timed out"
} else {
"The write operation timed out"
};
return Err(vm.new_exception_msg(
vm.ctx.exceptions.timeout_error.to_owned(),
msg.to_owned(),
));
}
SelectRet::Closed => {
return Err(socket_closed_error(vm));
}
SelectRet::Nonblocking => {
// Non-blocking socket, raise SSLWantReadError/SSLWantWriteError
if err == sys::SSL_ERROR_WANT_READ {
return Err(create_ssl_want_read_error(vm).upcast());
} else {
return Err(create_ssl_want_write_error(vm).upcast());
}
}
SelectRet::Ok => {
// Socket is ready, retry shutdown
continue;
}
}
}

// Return the underlying socket for socket mode
let socket = stream
.get_ref()
.expect("unwrap() called on bio mode; should only be called in socket mode");
Ok(Some(socket.0.clone()))
// Return the underlying socket
Ok(Some(socket_stream.0.clone()))
}

#[cfg(osslconf = "OPENSSL_NO_COMP")]
Expand Down
136 changes: 103 additions & 33 deletions crates/stdlib/src/ssl.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4119,45 +4119,115 @@ mod _ssl {
peer_closed = true;
}
} else if let Some(timeout) = timeout_mode {
// All socket modes (blocking, timeout, non-blocking):
// Return immediately after sending our close_notify.
//
// This matches CPython/OpenSSL behavior where SSL_shutdown()
// returns after sending close_notify, allowing the app to
// close the socket without waiting for peer's close_notify.
//
// Waiting for peer's close_notify can cause deadlock with
// asyncore-based servers where both sides wait for the other's
// close_notify before closing the connection.

// Ensure all pending TLS data is sent before returning
// This prevents data loss when rustls drains its buffer
// but the socket couldn't accept all data immediately
drop(conn_guard);

// Respect socket timeout settings for flushing pending TLS data
match timeout {
Some(0.0) => {
// Non-blocking: best-effort flush, ignore errors
// to avoid deadlock with asyncore-based servers
// Non-blocking: return immediately after sending close_notify.
// Don't wait for peer's close_notify to avoid blocking.
drop(conn_guard);
// Best-effort flush; WouldBlock is expected in non-blocking mode.
// Other errors indicate close_notify may not have been sent,
// but we still complete shutdown to avoid inconsistent state.
let _ = self.flush_pending_tls_output(vm, None);
*self.shutdown_state.lock() = ShutdownState::Completed;
*self.connection.lock() = None;
return Ok(self.sock.clone());
Comment on lines 4124 to 4133
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟡 Minor

Silent error suppression may hide close_notify delivery failures.

Line 4127 uses let _ = to discard any error from flush_pending_tls_output. In non-blocking mode, if the flush fails (e.g., socket buffer full, connection reset), the close_notify may not reach the peer, and the caller has no indication of this failure.

Consider propagating the error or at minimum logging it, especially since the code immediately marks shutdown as Completed and clears the connection regardless of flush success.

Proposed fix
 Some(0.0) => {
     // Non-blocking: return immediately after sending close_notify.
     // Don't wait for peer's close_notify to avoid blocking.
     drop(conn_guard);
-    let _ = self.flush_pending_tls_output(vm, None);
+    // Best-effort flush; ignore WouldBlock but propagate other errors
+    if let Err(e) = self.flush_pending_tls_output(vm, None) {
+        if !is_blocking_io_error(&e, vm) {
+            // Non-blocking error is expected, but other errors may indicate
+            // close_notify was not sent. Still complete shutdown to avoid
+            // leaving socket in inconsistent state.
+        }
+    }
     *self.shutdown_state.lock() = ShutdownState::Completed;
     *self.connection.lock() = None;
     return Ok(self.sock.clone());
 }
🤖 Prompt for AI Agents
In `@crates/stdlib/src/ssl.rs` around lines 4124 - 4130, The code currently
swallows errors from flush_pending_tls_output in the non-blocking shutdown
branch (where conn_guard is dropped) using `let _ =`, which can hide failures to
deliver close_notify; change this to capture the Result from
flush_pending_tls_output(self.flush_pending_tls_output(vm, None)) and if it Err,
either return that Err to the caller or at minimum log it via the existing
logger before proceeding, and only set *self.shutdown_state.lock() =
ShutdownState::Completed and clear *self.connection.lock() = None when the flush
succeeds; reference the flush_pending_tls_output call and the
shutdown_state/connection locks to locate and update this logic.

}
Some(_t) => {
// Timeout mode: use flush with socket's timeout
// Errors (including timeout) are propagated to caller
self.flush_pending_tls_output(vm, None)?;
}
None => {
// Blocking mode: wait until all pending data is sent
self.blocking_flush_all_pending(vm)?;
_ => {
// Blocking or timeout mode: wait for peer's close_notify.
// This is proper TLS shutdown - we should receive peer's
// close_notify before closing the connection.
drop(conn_guard);

// Flush our close_notify first
if timeout.is_none() {
self.blocking_flush_all_pending(vm)?;
} else {
self.flush_pending_tls_output(vm, None)?;
}

// Calculate deadline for timeout mode
let deadline = timeout.map(|t| {
std::time::Instant::now() + std::time::Duration::from_secs_f64(t)
});

// Wait for peer's close_notify
loop {
// Re-acquire connection lock for each iteration
let mut conn_guard = self.connection.lock();
let conn = match conn_guard.as_mut() {
Some(c) => c,
None => break, // Connection already closed
};

// Check if peer already sent close_notify
if self.check_peer_closed(conn, vm)? {
break;
}

drop(conn_guard);

// Check timeout
let remaining_timeout = if let Some(dl) = deadline {
let now = std::time::Instant::now();
if now >= dl {
// Timeout reached - raise TimeoutError
return Err(vm.new_exception_msg(
vm.ctx.exceptions.timeout_error.to_owned(),
"The read operation timed out".to_owned(),
));
}
Some(dl - now)
} else {
None // Blocking mode: no timeout
};

// Wait for socket to be readable
let timed_out = self.sock_wait_for_io_with_timeout(
SelectKind::Read,
remaining_timeout,
vm,
)?;

if timed_out {
// Timeout waiting for peer's close_notify
// Raise TimeoutError
return Err(vm.new_exception_msg(
vm.ctx.exceptions.timeout_error.to_owned(),
"The read operation timed out".to_owned(),
));
}

// Try to read data from socket
let mut conn_guard = self.connection.lock();
let conn = match conn_guard.as_mut() {
Some(c) => c,
None => break,
};

// Read and process any incoming TLS data
match self.try_read_close_notify(conn, vm) {
Ok(closed) => {
if closed {
break;
}
// Check again after processing
if self.check_peer_closed(conn, vm)? {
break;
}
}
Err(_) => {
// Socket error - peer likely closed connection
break;
}
}
}

// Shutdown complete
*self.shutdown_state.lock() = ShutdownState::Completed;
*self.connection.lock() = None;
return Ok(self.sock.clone());
}
}

// Set shutdown_state first to ensure atomic visibility
// This prevents read/write race conditions during shutdown
*self.shutdown_state.lock() = ShutdownState::Completed;
*self.connection.lock() = None;
return Ok(self.sock.clone());
}

// Step 3: Check again if peer has sent close_notify (non-blocking/BIO mode only)
Expand Down
44 changes: 44 additions & 0 deletions crates/vm/src/stdlib/thread.rs
Original file line number Diff line number Diff line change
Expand Up @@ -936,6 +936,50 @@ pub(crate) mod _thread {
true
});
}

// Clean up shutdown_handles as well.
// This is critical to prevent _shutdown() from waiting on threads
// that don't exist in the child process after fork.
if let Some(mut handles) = vm.state.shutdown_handles.try_lock() {
// Mark all non-current threads as done in shutdown_handles
handles.retain(|(inner_weak, done_event_weak): &ShutdownEntry| {
let Some(inner) = inner_weak.upgrade() else {
return false; // Remove dead entries
};
let Some(done_event) = done_event_weak.upgrade() else {
return false;
};

// Try to lock the inner state - skip if we can't
let Some(mut inner_guard) = inner.try_lock() else {
return false;
};

// Skip current thread
if inner_guard.ident == current_ident {
return true;
}

// Keep handles for threads that have not been started yet.
// They are safe to start in the child process.
if inner_guard.state == ThreadHandleState::NotStarted {
return true;
}

// Mark as done so _shutdown() won't wait on it
inner_guard.state = ThreadHandleState::Done;
drop(inner_guard);

// Notify waiters
let (lock, cvar) = &*done_event;
if let Some(mut done) = lock.try_lock() {
*done = true;
cvar.notify_all();
}

false // Remove from shutdown_handles - these threads don't exist in child
});
}
}

// Thread handle state enum
Expand Down