Script 'mail_helper' called by obssrc
Hello community,
here is the log from the commit of package aws-c-io for openSUSE:Factory checked in at 2024-06-06 12:34:24
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/aws-c-io (Old)
and /work/SRC/openSUSE:Factory/.aws-c-io.new.24587 (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Package is "aws-c-io"
Thu Jun 6 12:34:24 2024 rev:7 rq:1178905 version:0.14.9
Changes:
--------
--- /work/SRC/openSUSE:Factory/aws-c-io/aws-c-io.changes 2024-05-16 17:15:40.909023698 +0200
+++ /work/SRC/openSUSE:Factory/.aws-c-io.new.24587/aws-c-io.changes 2024-06-06 12:35:01.617740112 +0200
@@ -1,0 +2,12 @@
+Wed Jun 5 08:50:30 UTC 2024 - John Paul Adrian Glaubitz
+
+- Update to version 0.14.9
+ * Fix tests that require a valid cert to use add_net_test_case
+ by @waahm7 in (#637)
+ * Fix signed/unsigned bug with aws_future_wait() timeout value
+ by @graebm in (#638)
+ * Adds Host Resolver IPv6 variations test by @waahm7 in (#639)
+ * Fix bug where last few bytes on socket go unread
+ by @graebm in (#642)
+
+-------------------------------------------------------------------
Old:
----
v0.14.8.tar.gz
New:
----
v0.14.9.tar.gz
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Other differences:
------------------
++++++ aws-c-io.spec ++++++
--- /var/tmp/diff_new_pack.T3k2Mf/_old 2024-06-06 12:35:03.365803815 +0200
+++ /var/tmp/diff_new_pack.T3k2Mf/_new 2024-06-06 12:35:03.373804107 +0200
@@ -20,7 +20,7 @@
%define library_version 1.0.0
%define library_soversion 0unstable
Name: aws-c-io
-Version: 0.14.8
+Version: 0.14.9
Release: 0
Summary: I/O and TLS package AWS SDK for C
License: Apache-2.0
++++++ v0.14.8.tar.gz -> v0.14.9.tar.gz ++++++
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/aws-c-io-0.14.8/source/future.c new/aws-c-io-0.14.9/source/future.c
--- old/aws-c-io-0.14.8/source/future.c 2024-05-04 21:28:06.000000000 +0200
+++ new/aws-c-io-0.14.9/source/future.c 2024-06-04 18:27:58.000000000 +0200
@@ -453,13 +453,16 @@
/* this function is conceptually const, but we need to use synchronization primitives */
struct aws_future_impl *mutable_future = (struct aws_future_impl *)future;
+ /* condition-variable takes signed timeout, so clamp to INT64_MAX (292+ years) */
+ int64_t timeout_i64 = aws_min_u64(timeout_ns, INT64_MAX);
+
/* BEGIN CRITICAL SECTION */
aws_mutex_lock(&mutable_future->lock);
bool is_done = aws_condition_variable_wait_for_pred(
&mutable_future->wait_cvar,
&mutable_future->lock,
- (int64_t)timeout_ns,
+ timeout_i64,
s_future_impl_is_done_pred,
mutable_future) == AWS_OP_SUCCESS;
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/aws-c-io-0.14.8/source/posix/socket.c new/aws-c-io-0.14.9/source/posix/socket.c
--- old/aws-c-io-0.14.8/source/posix/socket.c 2024-05-04 21:28:06.000000000 +0200
+++ new/aws-c-io-0.14.9/source/posix/socket.c 2024-06-04 18:27:58.000000000 +0200
@@ -1668,6 +1668,23 @@
* subscribed is set to false. */
aws_ref_count_acquire(&socket_impl->internal_refcount);
+ /* NOTE: READABLE|WRITABLE|HANG_UP events might arrive simultaneously
+ * (e.g. peer sends last few bytes and immediately hangs up).
+ * Notify user of READABLE|WRITABLE events first, so they try to read any remaining bytes. */
+
+ if (socket_impl->currently_subscribed && events & AWS_IO_EVENT_TYPE_READABLE) {
+ AWS_LOGF_TRACE(AWS_LS_IO_SOCKET, "id=%p fd=%d: is readable", (void *)socket, socket->io_handle.data.fd);
+ if (socket->readable_fn) {
+ socket->readable_fn(socket, AWS_OP_SUCCESS, socket->readable_user_data);
+ }
+ }
+ /* if socket closed in between these branches, the currently_subscribed will be false and socket_impl will not
+ * have been cleaned up, so this next branch is safe. */
+ if (socket_impl->currently_subscribed && events & AWS_IO_EVENT_TYPE_WRITABLE) {
+ AWS_LOGF_TRACE(AWS_LS_IO_SOCKET, "id=%p fd=%d: is writable", (void *)socket, socket->io_handle.data.fd);
+ s_process_socket_write_requests(socket, NULL);
+ }
+
if (events & AWS_IO_EVENT_TYPE_REMOTE_HANG_UP || events & AWS_IO_EVENT_TYPE_CLOSED) {
aws_raise_error(AWS_IO_SOCKET_CLOSED);
AWS_LOGF_TRACE(AWS_LS_IO_SOCKET, "id=%p fd=%d: closed remotely", (void *)socket, socket->io_handle.data.fd);
@@ -1688,19 +1705,6 @@
goto end_check;
}
- if (socket_impl->currently_subscribed && events & AWS_IO_EVENT_TYPE_READABLE) {
- AWS_LOGF_TRACE(AWS_LS_IO_SOCKET, "id=%p fd=%d: is readable", (void *)socket, socket->io_handle.data.fd);
- if (socket->readable_fn) {
- socket->readable_fn(socket, AWS_OP_SUCCESS, socket->readable_user_data);
- }
- }
- /* if socket closed in between these branches, the currently_subscribed will be false and socket_impl will not
- * have been cleaned up, so this next branch is safe. */
- if (socket_impl->currently_subscribed && events & AWS_IO_EVENT_TYPE_WRITABLE) {
- AWS_LOGF_TRACE(AWS_LS_IO_SOCKET, "id=%p fd=%d: is writable", (void *)socket, socket->io_handle.data.fd);
- s_process_socket_write_requests(socket, NULL);
- }
-
end_check:
aws_ref_count_release(&socket_impl->internal_refcount);
}
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/aws-c-io-0.14.8/source/socket_channel_handler.c new/aws-c-io-0.14.9/source/socket_channel_handler.c
--- old/aws-c-io-0.14.8/source/socket_channel_handler.c 2024-05-04 21:28:06.000000000 +0200
+++ new/aws-c-io-0.14.9/source/socket_channel_handler.c 2024-06-04 18:27:58.000000000 +0200
@@ -122,6 +122,10 @@
*/
static void s_do_read(struct socket_handler *socket_handler) {
+ if (socket_handler->shutdown_in_progress) {
+ return;
+ }
+
size_t downstream_window = aws_channel_slot_downstream_read_window(socket_handler->slot);
size_t max_to_read =
downstream_window > socket_handler->max_rw_size ? socket_handler->max_rw_size : downstream_window;
@@ -139,17 +143,20 @@
size_t total_read = 0;
size_t read = 0;
- while (total_read < max_to_read && !socket_handler->shutdown_in_progress) {
+ int last_error = 0;
+ while (total_read < max_to_read) {
size_t iter_max_read = max_to_read - total_read;
struct aws_io_message *message = aws_channel_acquire_message_from_pool(
socket_handler->slot->channel, AWS_IO_MESSAGE_APPLICATION_DATA, iter_max_read);
if (!message) {
+ last_error = aws_last_error();
break;
}
if (aws_socket_read(socket_handler->socket, &message->message_data, &read)) {
+ last_error = aws_last_error();
aws_mem_release(message->allocator, message);
break;
}
@@ -162,6 +169,7 @@
(unsigned long long)read);
if (aws_channel_slot_send_message(socket_handler->slot, message, AWS_CHANNEL_DIR_READ)) {
+ last_error = aws_last_error();
aws_mem_release(message->allocator, message);
break;
}
@@ -170,30 +178,29 @@
AWS_LOGF_TRACE(
AWS_LS_IO_SOCKET_HANDLER,
"id=%p: total read on this tick %llu",
- (void *)&socket_handler->slot->handler,
+ (void *)socket_handler->slot->handler,
(unsigned long long)total_read);
socket_handler->stats.bytes_read += total_read;
/* resubscribe as long as there's no error, just return if we're in a would block scenario. */
if (total_read < max_to_read) {
- int last_error = aws_last_error();
+ AWS_ASSERT(last_error != 0);
- if (last_error != AWS_IO_READ_WOULD_BLOCK && !socket_handler->shutdown_in_progress) {
+ if (last_error != AWS_IO_READ_WOULD_BLOCK) {
aws_channel_shutdown(socket_handler->slot->channel, last_error);
+ } else {
+ AWS_LOGF_TRACE(
+ AWS_LS_IO_SOCKET_HANDLER,
+ "id=%p: out of data to read on socket. "
+ "Waiting on event-loop notification.",
+ (void *)socket_handler->slot->handler);
}
-
- AWS_LOGF_TRACE(
- AWS_LS_IO_SOCKET_HANDLER,
- "id=%p: out of data to read on socket. "
- "Waiting on event-loop notification.",
- (void *)socket_handler->slot->handler);
return;
}
/* in this case, everything was fine, but there's still pending reads. We need to schedule a task to do the read
* again. */
- if (!socket_handler->shutdown_in_progress && total_read == socket_handler->max_rw_size &&
- !socket_handler->read_task_storage.task_fn) {
+ if (total_read == socket_handler->max_rw_size && !socket_handler->read_task_storage.task_fn) {
AWS_LOGF_TRACE(
AWS_LS_IO_SOCKET_HANDLER,
@@ -212,17 +219,29 @@
(void)socket;
struct socket_handler *socket_handler = user_data;
- AWS_LOGF_TRACE(AWS_LS_IO_SOCKET_HANDLER, "id=%p: socket is now readable", (void *)socket_handler->slot->handler);
+ AWS_LOGF_TRACE(
+ AWS_LS_IO_SOCKET_HANDLER,
+ "id=%p: socket on-readable with error code %d(%s)",
+ (void *)socket_handler->slot->handler,
+ error_code,
+ aws_error_name(error_code));
- /* read regardless so we can pick up data that was sent prior to the close. For example, peer sends a TLS ALERT
- * then immediately closes the socket. On some platforms, we'll never see the readable flag. So we want to make
+ /* Regardless of error code call read() until it reports error or EOF,
+ * so we can pick up data that was sent prior to the close.
+ *
+ * For example, if peer closes the socket immediately after sending the last
+ * bytes of data, the READABLE and HANGUP events arrive simultaneously.
+ *
+ * Another example, peer sends a TLS ALERT then immediately closes the socket.
+ * On some platforms, we'll never see the readable flag. So we want to make
* sure we read the ALERT, otherwise, we'll end up telling the user that the channel shutdown because of a socket
- * closure, when in reality it was a TLS error */
+ * closure, when in reality it was a TLS error
+ *
+ * It may take more than one read() to get all remaining data.
+ * Also, if the downstream read-window reaches 0, we need to patiently
+ * wait until the window opens before we can call read() again. */
+ (void)error_code;
s_do_read(socket_handler);
-
- if (error_code && !socket_handler->shutdown_in_progress) {
- aws_channel_shutdown(socket_handler->slot->channel, error_code);
- }
}
/* Either the result of a context switch (for fairness in the event loop), or a window update. */
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/aws-c-io-0.14.8/source/windows/iocp/socket.c new/aws-c-io-0.14.9/source/windows/iocp/socket.c
--- old/aws-c-io-0.14.8/source/windows/iocp/socket.c 2024-05-04 21:28:06.000000000 +0200
+++ new/aws-c-io-0.14.9/source/windows/iocp/socket.c 2024-06-04 18:27:58.000000000 +0200
@@ -636,6 +636,7 @@
case IO_STATUS_TIMEOUT:
return AWS_IO_SOCKET_TIMEOUT;
case IO_PIPE_BROKEN:
+ case ERROR_BROKEN_PIPE:
return AWS_IO_SOCKET_CLOSED;
case STATUS_INVALID_ADDRESS_COMPONENT:
case WSAEADDRNOTAVAIL:
@@ -2970,7 +2971,7 @@
AWS_LOGF_ERROR(
AWS_LS_IO_SOCKET,
- "id=%p handle=%p: ReadFile() failed with error %d",
+ "id=%p handle=%p: recv() failed with error %d",
(void *)socket,
(void *)socket->io_handle.data.handle,
error);
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/aws-c-io-0.14.8/tests/CMakeLists.txt new/aws-c-io-0.14.9/tests/CMakeLists.txt
--- old/aws-c-io-0.14.8/tests/CMakeLists.txt 2024-05-04 21:28:06.000000000 +0200
+++ new/aws-c-io-0.14.9/tests/CMakeLists.txt 2024-06-04 18:27:58.000000000 +0200
@@ -94,6 +94,7 @@
add_net_test_case(channel_connect_some_hosts_timeout)
add_net_test_case(test_default_with_ipv6_lookup)
+add_net_test_case(test_default_host_resolver_ipv6_address_variations)
add_test_case(test_resolver_ipv6_address_lookup)
add_net_test_case(test_default_with_multiple_lookups)
add_test_case(test_resolver_ipv4_address_lookup)
@@ -122,6 +123,28 @@
add_test_case(socket_handler_echo_and_backpressure)
add_test_case(socket_handler_close)
+# These tests fail on Windows due to some bug in our server code where, if the socket is closed
+# immediately after data is written, that data does not flush cleanly to the client.
+# I've lost days to this bug, and no one is using our Windows server funcionality,
+# so disabling these tests on Windows and moving along for now.
+# I tried the following:
+# 1) Wrote 2 simple standalone Windows programs, server and client, using simple synchronous socket code.
+# WORKED PERFECTLY. So it's not a fundamental issue with Windows.
+# 2) Commented out server part of this failing test, and used the simple standalone server instead.
+# WORKED PERFECTLY. So it's not a problem with our actual client code.
+# 3) Copy/pasted the simple standlone server code into this test, and used that instead of our actual server code.
+# WORKED PERFECTLY. So it's not a problem with the server and client sockets being in the same process.
+# 4) Commented out the client part of this failing test, and used the simple standalone client instead.
+# FAILED. The standalone client got WSAECONNRESET (Connection reset by peer) before receiving all the data.
+# So it's something with our complicated non-blocking server code.
+# The last interesting thing I noticed before giving up was: we call shutdown() immediately
+# before calling closesocket() but shutdown() gets error WSAENOTCONN, even
+# though, at that moment, the socket should be connected just fine.
+if(NOT WIN32)
+ add_net_test_case(socket_handler_read_to_eof_after_peer_hangup)
+ add_net_test_case(socket_handler_ipv4_read_to_eof_after_peer_hangup)
+ add_net_test_case(socket_handler_ipv6_read_to_eof_after_peer_hangup)
+endif()
add_test_case(socket_pinned_event_loop)
add_net_test_case(socket_pinned_event_loop_dns_failure)
@@ -209,7 +232,7 @@
# Misc non-badssl tls tests
add_net_test_case(test_concurrent_cert_import)
add_net_test_case(test_duplicate_cert_import)
- add_test_case(tls_channel_echo_and_backpressure_test)
+ add_net_test_case(tls_channel_echo_and_backpressure_test)
add_net_test_case(tls_client_channel_negotiation_error_socket_closed)
add_net_test_case(tls_client_channel_negotiation_success)
add_net_test_case(tls_server_multiple_connections)
@@ -222,8 +245,8 @@
add_test_case(alpn_error_creating_handler)
add_test_case(tls_destroy_null_context)
- add_test_case(tls_channel_statistics_test)
- add_test_case(tls_certificate_chain_test)
+ add_net_test_case(tls_channel_statistics_test)
+ add_net_test_case(tls_certificate_chain_test)
else()
add_test_case(byo_tls_handler_test)
endif()
@@ -238,6 +261,7 @@
add_test_case(future_register_event_loop_callback_always_scheduled)
add_test_case(future_register_channel_callback)
add_test_case(future_wait_timeout)
+add_test_case(future_wait_timeout_max)
add_test_case(future_pointer_with_destroy)
add_test_case(future_pointer_with_release)
add_test_case(future_get_result_by_move)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/aws-c-io-0.14.8/tests/default_host_resolver_test.c new/aws-c-io-0.14.9/tests/default_host_resolver_test.c
--- old/aws-c-io-0.14.8/tests/default_host_resolver_test.c 2024-05-04 21:28:06.000000000 +0200
+++ new/aws-c-io-0.14.9/tests/default_host_resolver_test.c 2024-06-04 18:27:58.000000000 +0200
@@ -160,6 +160,103 @@
AWS_TEST_CASE(test_default_with_ipv6_lookup, s_test_default_with_ipv6_lookup_fn)
+static int s_test_default_host_resolver_ipv6_address_variations_fn(struct aws_allocator *allocator, void *ctx) {
+ (void)ctx;
+
+ aws_io_library_init(allocator);
+
+ const struct test_case {
+ const char *ip_address;
+ const char *expected_resolved_ip_address;
+ } test_cases[] = {
+ /* simple full uri*/
+ {
+ .ip_address = "0:0::1",
+ .expected_resolved_ip_address = "::1",
+ },
+ {
+ .ip_address = "::1",
+ .expected_resolved_ip_address = "::1",
+ },
+ {
+ .ip_address = "0:0:0:0:0:0:0:1",
+ .expected_resolved_ip_address = "::1",
+ },
+ {
+ .ip_address = "fd00:ec2:0:0:0:0:0:23",
+ .expected_resolved_ip_address = "fd00:ec2::23",
+ },
+
+ };
+
+ struct aws_event_loop_group *el_group = aws_event_loop_group_new_default(allocator, 1, NULL);
+
+ struct aws_host_resolver_default_options resolver_options = {
+ .el_group = el_group,
+ .max_entries = 10,
+ };
+ struct aws_host_resolver *resolver = aws_host_resolver_new_default(allocator, &resolver_options);
+
+ struct aws_host_resolution_config config = {
+ .max_ttl = 10,
+ .impl = aws_default_dns_resolve,
+ .impl_data = NULL,
+ };
+
+ struct aws_mutex mutex = AWS_MUTEX_INIT;
+ struct default_host_callback_data callback_data = {
+ .condition_variable = AWS_CONDITION_VARIABLE_INIT,
+ .invoked = false,
+ .has_aaaa_address = false,
+ .has_a_address = false,
+ .mutex = &mutex,
+ };
+
+ for (size_t case_idx = 0; case_idx < AWS_ARRAY_SIZE(test_cases); ++case_idx) {
+ struct test_case case_i = test_cases[case_idx];
+ printf(
+ "CASE[%zu]: ip_address=%s expected_resolved_ip_address=%s\n, ",
+ case_idx,
+ case_i.ip_address,
+ case_i.expected_resolved_ip_address);
+ struct aws_string *address = aws_string_new_from_c_str(allocator, case_i.ip_address);
+ struct aws_string *expected_address = aws_string_new_from_c_str(allocator, case_i.expected_resolved_ip_address);
+
+ ASSERT_SUCCESS(aws_host_resolver_resolve_host(
+ resolver, address, s_default_host_resolved_test_callback, &config, &callback_data));
+
+ ASSERT_SUCCESS(aws_mutex_lock(&mutex));
+ aws_condition_variable_wait_pred(
+ &callback_data.condition_variable, &mutex, s_default_host_resolved_predicate, &callback_data);
+
+ callback_data.invoked = false;
+ ASSERT_TRUE(callback_data.has_aaaa_address);
+ ASSERT_INT_EQUALS(AWS_ADDRESS_RECORD_TYPE_AAAA, callback_data.aaaa_address.record_type);
+ ASSERT_BIN_ARRAYS_EQUALS(
+ aws_string_bytes(expected_address),
+ expected_address->len,
+ aws_string_bytes(callback_data.aaaa_address.address),
+ callback_data.aaaa_address.address->len);
+
+ aws_host_address_clean_up(&callback_data.aaaa_address);
+ aws_host_address_clean_up(&callback_data.a_address);
+ ASSERT_SUCCESS(aws_mutex_unlock(&mutex));
+ aws_string_destroy(address);
+ aws_string_destroy(expected_address);
+ }
+
+ aws_host_resolver_release(resolver);
+ aws_event_loop_group_release(el_group);
+
+ aws_io_library_clean_up();
+
+ return 0;
+}
+
+AWS_TEST_CASE(
+ test_default_host_resolver_ipv6_address_variations,
+ s_test_default_host_resolver_ipv6_address_variations_fn)
+
/* just FYI, this test assumes that "s3.us-east-1.amazonaws.com" does not return IPv6 addresses. */
static int s_test_default_with_ipv4_only_lookup_fn(struct aws_allocator *allocator, void *ctx) {
(void)ctx;
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/aws-c-io-0.14.8/tests/future_test.c new/aws-c-io-0.14.9/tests/future_test.c
--- old/aws-c-io-0.14.8/tests/future_test.c 2024-05-04 21:28:06.000000000 +0200
+++ new/aws-c-io-0.14.9/tests/future_test.c 2024-06-04 18:27:58.000000000 +0200
@@ -440,6 +440,29 @@
}
AWS_TEST_CASE(future_wait_timeout, s_test_future_wait_timeout)
+/* This is a regression test */
+static int s_test_future_wait_timeout_max(struct aws_allocator *alloc, void *ctx) {
+ (void)ctx;
+ aws_io_library_init(alloc);
+
+ /* Thread will complete the future in 1sec */
+ struct aws_future_size *future = s_start_thread_job(alloc, ONE_SEC_IN_NS);
+
+ /* Wait for future to complete, with timeout of UINT64_MAX.
+ * Once upon a time, there was a bug where this became a negative number and immediately timed out. */
+ bool completed_before_timeout = aws_future_size_wait(future, UINT64_MAX);
+ ASSERT_TRUE(completed_before_timeout);
+
+ /* Wait until other thread joins, at which point the future is complete and the callback has fired */
+ aws_thread_set_managed_join_timeout_ns(MAX_TIMEOUT_NS);
+ ASSERT_SUCCESS(aws_thread_join_all_managed());
+
+ aws_future_size_release(future);
+ aws_io_library_clean_up();
+ return 0;
+}
+AWS_TEST_CASE(future_wait_timeout_max, s_test_future_wait_timeout_max)
+
struct aws_destroyme {
struct aws_allocator *alloc;
bool *set_true_on_death;
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/aws-c-io-0.14.8/tests/read_write_test_handler.c new/aws-c-io-0.14.9/tests/read_write_test_handler.c
--- old/aws-c-io-0.14.8/tests/read_write_test_handler.c 2024-05-04 21:28:06.000000000 +0200
+++ new/aws-c-io-0.14.9/tests/read_write_test_handler.c 2024-06-04 18:27:58.000000000 +0200
@@ -190,42 +190,60 @@
struct aws_channel_slot *slot;
struct aws_byte_buf *buffer;
struct aws_channel_task task;
+ aws_channel_on_message_write_completed_fn *on_completion;
+ void *user_data;
};
-static void s_rw_handler_write_task(struct aws_channel_task *task, void *arg, enum aws_task_status task_status) {
- (void)task;
- (void)task_status;
- struct rw_handler_write_task_args *write_task_args = arg;
+static void s_rw_handler_write_now(
+ struct aws_channel_slot *slot,
+ struct aws_byte_buf *buffer,
+ aws_channel_on_message_write_completed_fn *on_completion,
+ void *user_data) {
+
+ struct aws_io_message *msg =
+ aws_channel_acquire_message_from_pool(slot->channel, AWS_IO_MESSAGE_APPLICATION_DATA, buffer->len);
- struct aws_io_message *msg = aws_channel_acquire_message_from_pool(
- write_task_args->slot->channel, AWS_IO_MESSAGE_APPLICATION_DATA, write_task_args->buffer->len);
+ msg->on_completion = on_completion;
+ msg->user_data = user_data;
- struct aws_byte_cursor write_buffer = aws_byte_cursor_from_buf(write_task_args->buffer);
- aws_byte_buf_append(&msg->message_data, &write_buffer);
+ struct aws_byte_cursor write_buffer = aws_byte_cursor_from_buf(buffer);
+ AWS_FATAL_ASSERT(aws_byte_buf_append(&msg->message_data, &write_buffer) == AWS_OP_SUCCESS);
- aws_channel_slot_send_message(write_task_args->slot, msg, AWS_CHANNEL_DIR_WRITE);
+ AWS_FATAL_ASSERT(aws_channel_slot_send_message(slot, msg, AWS_CHANNEL_DIR_WRITE) == AWS_OP_SUCCESS);
+}
+static void s_rw_handler_write_task(struct aws_channel_task *task, void *arg, enum aws_task_status task_status) {
+ (void)task;
+ (void)task_status;
+ struct rw_handler_write_task_args *write_task_args = arg;
+ s_rw_handler_write_now(
+ write_task_args->slot, write_task_args->buffer, write_task_args->on_completion, write_task_args->user_data);
aws_mem_release(write_task_args->handler->alloc, write_task_args);
}
void rw_handler_write(struct aws_channel_handler *handler, struct aws_channel_slot *slot, struct aws_byte_buf *buffer) {
+ rw_handler_write_with_callback(handler, slot, buffer, NULL /*on_completion*/, NULL /*user_data*/);
+}
+
+void rw_handler_write_with_callback(
+ struct aws_channel_handler *handler,
+ struct aws_channel_slot *slot,
+ struct aws_byte_buf *buffer,
+ aws_channel_on_message_write_completed_fn *on_completion,
+ void *user_data) {
struct rw_test_handler_impl *handler_impl = handler->impl;
if (!handler_impl->event_loop_driven || aws_channel_thread_is_callers_thread(slot->channel)) {
- struct aws_io_message *msg =
- aws_channel_acquire_message_from_pool(slot->channel, AWS_IO_MESSAGE_APPLICATION_DATA, buffer->len);
-
- struct aws_byte_cursor write_buffer = aws_byte_cursor_from_buf(buffer);
- aws_byte_buf_append(&msg->message_data, &write_buffer);
-
- aws_channel_slot_send_message(slot, msg, AWS_CHANNEL_DIR_WRITE);
+ s_rw_handler_write_now(slot, buffer, on_completion, user_data);
} else {
struct rw_handler_write_task_args *write_task_args =
aws_mem_acquire(handler->alloc, sizeof(struct rw_handler_write_task_args));
write_task_args->handler = handler;
write_task_args->buffer = buffer;
write_task_args->slot = slot;
+ write_task_args->on_completion = on_completion;
+ write_task_args->user_data = user_data;
aws_channel_task_init(&write_task_args->task, s_rw_handler_write_task, write_task_args, "rw_handler_write");
aws_channel_schedule_task_now(slot->channel, &write_task_args->task);
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/aws-c-io-0.14.8/tests/read_write_test_handler.h new/aws-c-io-0.14.9/tests/read_write_test_handler.h
--- old/aws-c-io-0.14.8/tests/read_write_test_handler.h 2024-05-04 21:28:06.000000000 +0200
+++ new/aws-c-io-0.14.9/tests/read_write_test_handler.h 2024-06-04 18:27:58.000000000 +0200
@@ -36,6 +36,13 @@
void rw_handler_write(struct aws_channel_handler *handler, struct aws_channel_slot *slot, struct aws_byte_buf *buffer);
+void rw_handler_write_with_callback(
+ struct aws_channel_handler *handler,
+ struct aws_channel_slot *slot,
+ struct aws_byte_buf *buffer,
+ aws_channel_on_message_write_completed_fn *on_completion,
+ void *user_data);
+
void rw_handler_trigger_read(struct aws_channel_handler *handler, struct aws_channel_slot *slot);
bool rw_handler_shutdown_called(struct aws_channel_handler *handler);
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/aws-c-io-0.14.8/tests/socket_handler_test.c new/aws-c-io-0.14.9/tests/socket_handler_test.c
--- old/aws-c-io-0.14.8/tests/socket_handler_test.c 2024-05-04 21:28:06.000000000 +0200
+++ new/aws-c-io-0.14.9/tests/socket_handler_test.c 2024-06-04 18:27:58.000000000 +0200
@@ -17,6 +17,13 @@
#include "statistics_handler_test.h"
#include
+#ifdef _MSC_VER
+# pragma warning(disable : 4996) /* allow strncpy() */
+#endif
+
+#define NANOS_PER_SEC ((uint64_t)AWS_TIMESTAMP_NANOS)
+#define TIMEOUT (10 * NANOS_PER_SEC)
+
struct socket_test_args {
struct aws_allocator *allocator;
struct aws_mutex *mutex;
@@ -24,7 +31,7 @@
struct aws_channel *channel;
struct aws_channel_handler *rw_handler;
- struct aws_atomic_var rw_slot; /* pointer-to struct aws_channel_slot */
+ struct aws_channel_slot *rw_slot;
int error_code;
bool shutdown_invoked;
bool error_invoked;
@@ -37,6 +44,7 @@
struct aws_mutex mutex;
struct aws_condition_variable condition_variable;
struct aws_event_loop_group *el_group;
+ struct aws_host_resolver *resolver;
struct aws_atomic_var current_time_ns;
struct aws_atomic_var stats_handler;
@@ -52,6 +60,13 @@
aws_io_library_init(allocator);
tester->el_group = aws_event_loop_group_new_default(allocator, 0, NULL);
+
+ struct aws_host_resolver_default_options resolver_options = {
+ .el_group = tester->el_group,
+ .max_entries = 8,
+ };
+ tester->resolver = aws_host_resolver_new_default(allocator, &resolver_options);
+
struct aws_mutex mutex = AWS_MUTEX_INIT;
struct aws_condition_variable condition_variable = AWS_CONDITION_VARIABLE_INIT;
tester->mutex = mutex;
@@ -63,6 +78,7 @@
}
static int s_socket_common_tester_clean_up(struct socket_common_tester *tester) {
+ aws_host_resolver_release(tester->resolver);
aws_event_loop_group_release(tester->el_group);
aws_mutex_clean_up(&tester->mutex);
@@ -87,7 +103,7 @@
static bool s_channel_setup_predicate(void *user_data) {
struct socket_test_args *setup_test_args = (struct socket_test_args *)user_data;
- return aws_atomic_load_ptr(&setup_test_args->rw_slot) != NULL;
+ return setup_test_args->rw_slot != NULL;
}
static bool s_channel_shutdown_predicate(void *user_data) {
@@ -120,7 +136,7 @@
aws_channel_slot_insert_end(channel, rw_slot);
aws_channel_slot_set_handler(rw_slot, setup_test_args->rw_handler);
- aws_atomic_store_ptr(&setup_test_args->rw_slot, rw_slot);
+ setup_test_args->rw_slot = rw_slot;
aws_mutex_unlock(setup_test_args->mutex);
@@ -147,7 +163,7 @@
aws_channel_slot_insert_end(channel, rw_slot);
aws_channel_slot_set_handler(rw_slot, setup_test_args->rw_handler);
- aws_atomic_store_ptr(&setup_test_args->rw_slot, rw_slot);
+ setup_test_args->rw_slot = rw_slot;
}
aws_mutex_unlock(setup_test_args->mutex);
@@ -198,6 +214,7 @@
struct aws_byte_buf received_message;
size_t amount_read;
size_t expected_read;
+ size_t amount_written;
bool invocation_happened;
bool shutdown_finished;
};
@@ -224,8 +241,7 @@
struct socket_test_rw_args *rw_args = (struct socket_test_rw_args *)user_data;
aws_mutex_lock(rw_args->mutex);
- memcpy(rw_args->received_message.buffer + rw_args->received_message.len, data_read->buffer, data_read->len);
- rw_args->received_message.len += data_read->len;
+ AWS_FATAL_ASSERT(aws_byte_buf_write_from_whole_buffer(&rw_args->received_message, *data_read) == true);
rw_args->amount_read += data_read->len;
rw_args->invocation_happened = true;
aws_condition_variable_notify_one(rw_args->condition_variable);
@@ -234,6 +250,23 @@
return rw_args->received_message;
}
+void s_socket_test_handle_on_write_completed(
+ struct aws_channel *channel,
+ struct aws_io_message *message,
+ int error_code,
+ void *user_data) {
+
+ (void)channel;
+ AWS_FATAL_ASSERT(error_code == 0);
+ struct socket_test_rw_args *rw_args = (struct socket_test_rw_args *)user_data;
+
+ aws_mutex_lock(rw_args->mutex);
+ rw_args->amount_written += message->message_data.len;
+ rw_args->invocation_happened = true;
+ aws_condition_variable_notify_one(rw_args->condition_variable);
+ aws_mutex_unlock(rw_args->mutex);
+}
+
static struct aws_byte_buf s_socket_test_handle_write(
struct aws_channel_handler *handler,
struct aws_channel_slot *slot,
@@ -291,13 +324,27 @@
struct local_server_tester *tester,
struct socket_test_args *args,
struct socket_common_tester *s_c_tester,
+ enum aws_socket_domain socket_domain,
bool enable_back_pressure) {
+
AWS_ZERO_STRUCT(*tester);
tester->socket_options.connect_timeout_ms = 3000;
tester->socket_options.type = AWS_SOCKET_STREAM;
- tester->socket_options.domain = AWS_SOCKET_LOCAL;
-
- aws_socket_endpoint_init_local_address_for_test(&tester->endpoint);
+ tester->socket_options.domain = socket_domain;
+ switch (socket_domain) {
+ case AWS_SOCKET_LOCAL:
+ aws_socket_endpoint_init_local_address_for_test(&tester->endpoint);
+ break;
+ case AWS_SOCKET_IPV4:
+ strncpy(tester->endpoint.address, "127.0.0.1", sizeof(tester->endpoint.address));
+ break;
+ case AWS_SOCKET_IPV6:
+ strncpy(tester->endpoint.address, "::1", sizeof(tester->endpoint.address));
+ break;
+ default:
+ ASSERT_TRUE(false);
+ break;
+ }
tester->server_bootstrap = aws_server_bootstrap_new(allocator, s_c_tester->el_group);
ASSERT_NOT_NULL(tester->server_bootstrap);
@@ -316,6 +363,9 @@
tester->listener = aws_server_bootstrap_new_socket_listener(&bootstrap_options);
ASSERT_NOT_NULL(tester->listener);
+ /* find out which port the socket is bound to */
+ ASSERT_SUCCESS(aws_socket_get_bound_address(tester->listener, &tester->endpoint));
+
return AWS_OP_SUCCESS;
}
@@ -344,11 +394,12 @@
ASSERT_SUCCESS(s_socket_test_args_init(&client_args, &c_tester, client_rw_handler));
struct local_server_tester local_server_tester;
- ASSERT_SUCCESS(s_local_server_tester_init(allocator, &local_server_tester, &server_args, &c_tester, true));
+ ASSERT_SUCCESS(
+ s_local_server_tester_init(allocator, &local_server_tester, &server_args, &c_tester, AWS_SOCKET_LOCAL, true));
struct aws_client_bootstrap_options client_bootstrap_options = {
.event_loop_group = c_tester.el_group,
- .host_resolver = NULL,
+ .host_resolver = c_tester.resolver,
};
struct aws_client_bootstrap *client_bootstrap = aws_client_bootstrap_new(allocator, &client_bootstrap_options);
ASSERT_NOT_NULL(client_bootstrap);
@@ -359,7 +410,7 @@
AWS_ZERO_STRUCT(client_channel_options);
client_channel_options.bootstrap = client_bootstrap;
client_channel_options.host_name = local_server_tester.endpoint.address;
- client_channel_options.port = 0;
+ client_channel_options.port = local_server_tester.endpoint.port;
client_channel_options.socket_options = &local_server_tester.socket_options;
client_channel_options.setup_callback = s_socket_handler_test_client_setup_callback;
client_channel_options.shutdown_callback = s_socket_handler_test_client_shutdown_callback;
@@ -371,10 +422,10 @@
ASSERT_SUCCESS(aws_mutex_lock(&c_tester.mutex));
/* wait for both ends to setup */
- ASSERT_SUCCESS(aws_condition_variable_wait_pred(
- &c_tester.condition_variable, &c_tester.mutex, s_pinned_channel_setup_predicate, &server_args));
- ASSERT_SUCCESS(aws_condition_variable_wait_pred(
- &c_tester.condition_variable, &c_tester.mutex, s_pinned_channel_setup_predicate, &client_args));
+ ASSERT_SUCCESS(aws_condition_variable_wait_for_pred(
+ &c_tester.condition_variable, &c_tester.mutex, TIMEOUT, s_pinned_channel_setup_predicate, &server_args));
+ ASSERT_SUCCESS(aws_condition_variable_wait_for_pred(
+ &c_tester.condition_variable, &c_tester.mutex, TIMEOUT, s_pinned_channel_setup_predicate, &client_args));
/* Verify the client channel was placed on the requested event loop */
ASSERT_PTR_EQUALS(pinned_event_loop, aws_channel_get_event_loop(client_args.channel));
@@ -382,13 +433,13 @@
ASSERT_SUCCESS(aws_channel_shutdown(server_args.channel, AWS_OP_SUCCESS));
ASSERT_SUCCESS(aws_channel_shutdown(client_args.channel, AWS_OP_SUCCESS));
- ASSERT_SUCCESS(aws_condition_variable_wait_pred(
- &c_tester.condition_variable, &c_tester.mutex, s_channel_shutdown_predicate, &server_args));
- ASSERT_SUCCESS(aws_condition_variable_wait_pred(
- &c_tester.condition_variable, &c_tester.mutex, s_channel_shutdown_predicate, &client_args));
+ ASSERT_SUCCESS(aws_condition_variable_wait_for_pred(
+ &c_tester.condition_variable, &c_tester.mutex, TIMEOUT, s_channel_shutdown_predicate, &server_args));
+ ASSERT_SUCCESS(aws_condition_variable_wait_for_pred(
+ &c_tester.condition_variable, &c_tester.mutex, TIMEOUT, s_channel_shutdown_predicate, &client_args));
aws_server_bootstrap_destroy_socket_listener(local_server_tester.server_bootstrap, local_server_tester.listener);
- ASSERT_SUCCESS(aws_condition_variable_wait_pred(
- &c_tester.condition_variable, &c_tester.mutex, s_listener_destroy_predicate, &server_args));
+ ASSERT_SUCCESS(aws_condition_variable_wait_for_pred(
+ &c_tester.condition_variable, &c_tester.mutex, TIMEOUT, s_listener_destroy_predicate, &server_args));
aws_mutex_unlock(&c_tester.mutex);
@@ -450,15 +501,9 @@
s_socket_common_tester_init(allocator, &c_tester);
- struct aws_host_resolver_default_options resolver_options = {
- .el_group = c_tester.el_group,
- .max_entries = 8,
- };
- struct aws_host_resolver *resolver = aws_host_resolver_new_default(allocator, &resolver_options);
-
struct aws_client_bootstrap_options client_bootstrap_options = {
.event_loop_group = c_tester.el_group,
- .host_resolver = resolver,
+ .host_resolver = c_tester.resolver,
};
struct aws_client_bootstrap *client_bootstrap = aws_client_bootstrap_new(allocator, &client_bootstrap_options);
ASSERT_NOT_NULL(client_bootstrap);
@@ -487,8 +532,8 @@
ASSERT_SUCCESS(aws_client_bootstrap_new_socket_channel(&client_channel_options));
ASSERT_SUCCESS(aws_mutex_lock(&c_tester.mutex));
- ASSERT_SUCCESS(aws_condition_variable_wait_pred(
- &c_tester.condition_variable, &c_tester.mutex, s_dns_failure_channel_setup_predicate, &c_tester));
+ ASSERT_SUCCESS(aws_condition_variable_wait_for_pred(
+ &c_tester.condition_variable, &c_tester.mutex, TIMEOUT, s_dns_failure_channel_setup_predicate, &c_tester));
/* Verify the setup callback failure was on the requested event loop */
ASSERT_TRUE(c_tester.setup_error_code != 0);
@@ -496,7 +541,6 @@
aws_mutex_unlock(&c_tester.mutex);
aws_client_bootstrap_release(client_bootstrap);
- aws_host_resolver_release(resolver);
ASSERT_SUCCESS(s_socket_common_tester_clean_up(&c_tester));
return AWS_OP_SUCCESS;
@@ -556,11 +600,12 @@
ASSERT_SUCCESS(s_socket_test_args_init(&client_args, &c_tester, client_rw_handler));
struct local_server_tester local_server_tester;
- ASSERT_SUCCESS(s_local_server_tester_init(allocator, &local_server_tester, &server_args, &c_tester, true));
+ ASSERT_SUCCESS(
+ s_local_server_tester_init(allocator, &local_server_tester, &server_args, &c_tester, AWS_SOCKET_LOCAL, true));
struct aws_client_bootstrap_options client_bootstrap_options = {
.event_loop_group = c_tester.el_group,
- .host_resolver = NULL,
+ .host_resolver = c_tester.resolver,
};
struct aws_client_bootstrap *client_bootstrap = aws_client_bootstrap_new(allocator, &client_bootstrap_options);
ASSERT_NOT_NULL(client_bootstrap);
@@ -569,7 +614,7 @@
AWS_ZERO_STRUCT(client_channel_options);
client_channel_options.bootstrap = client_bootstrap;
client_channel_options.host_name = local_server_tester.endpoint.address;
- client_channel_options.port = 0;
+ client_channel_options.port = local_server_tester.endpoint.port;
client_channel_options.socket_options = &local_server_tester.socket_options;
client_channel_options.setup_callback = s_socket_handler_test_client_setup_callback;
client_channel_options.shutdown_callback = s_socket_handler_test_client_shutdown_callback;
@@ -581,20 +626,20 @@
ASSERT_SUCCESS(aws_mutex_lock(&c_tester.mutex));
/* wait for both ends to setup */
- ASSERT_SUCCESS(aws_condition_variable_wait_pred(
- &c_tester.condition_variable, &c_tester.mutex, s_channel_setup_predicate, &server_args));
- ASSERT_SUCCESS(aws_condition_variable_wait_pred(
- &c_tester.condition_variable, &c_tester.mutex, s_channel_setup_predicate, &client_args));
+ ASSERT_SUCCESS(aws_condition_variable_wait_for_pred(
+ &c_tester.condition_variable, &c_tester.mutex, TIMEOUT, s_channel_setup_predicate, &server_args));
+ ASSERT_SUCCESS(aws_condition_variable_wait_for_pred(
+ &c_tester.condition_variable, &c_tester.mutex, TIMEOUT, s_channel_setup_predicate, &client_args));
/* send msg from client to server, and wait for some bytes to be received */
- rw_handler_write(client_args.rw_handler, aws_atomic_load_ptr(&client_args.rw_slot), &msg_from_client);
- ASSERT_SUCCESS(aws_condition_variable_wait_pred(
- &c_tester.condition_variable, &c_tester.mutex, s_socket_test_read_predicate, &server_rw_args));
+ rw_handler_write(client_args.rw_handler, client_args.rw_slot, &msg_from_client);
+ ASSERT_SUCCESS(aws_condition_variable_wait_for_pred(
+ &c_tester.condition_variable, &c_tester.mutex, TIMEOUT, s_socket_test_read_predicate, &server_rw_args));
/* send msg from server to client, and wait for some bytes to be received */
- rw_handler_write(server_args.rw_handler, aws_atomic_load_ptr(&server_args.rw_slot), &msg_from_server);
- ASSERT_SUCCESS(aws_condition_variable_wait_pred(
- &c_tester.condition_variable, &c_tester.mutex, s_socket_test_read_predicate, &client_rw_args));
+ rw_handler_write(server_args.rw_handler, server_args.rw_slot, &msg_from_server);
+ ASSERT_SUCCESS(aws_condition_variable_wait_for_pred(
+ &c_tester.condition_variable, &c_tester.mutex, TIMEOUT, s_socket_test_read_predicate, &client_rw_args));
/* confirm that the initial read window was respected */
server_rw_args.invocation_happened = false;
@@ -604,13 +649,13 @@
ASSERT_INT_EQUALS(s_server_initial_read_window, server_rw_args.amount_read);
/* increment the read window on both sides and confirm they receive the remainder of their message */
- rw_handler_trigger_increment_read_window(server_args.rw_handler, aws_atomic_load_ptr(&server_args.rw_slot), 100);
- ASSERT_SUCCESS(aws_condition_variable_wait_pred(
- &c_tester.condition_variable, &c_tester.mutex, s_socket_test_full_read_predicate, &server_rw_args));
-
- rw_handler_trigger_increment_read_window(client_args.rw_handler, aws_atomic_load_ptr(&client_args.rw_slot), 100);
- ASSERT_SUCCESS(aws_condition_variable_wait_pred(
- &c_tester.condition_variable, &c_tester.mutex, s_socket_test_full_read_predicate, &client_rw_args));
+ rw_handler_trigger_increment_read_window(server_args.rw_handler, server_args.rw_slot, 100);
+ ASSERT_SUCCESS(aws_condition_variable_wait_for_pred(
+ &c_tester.condition_variable, &c_tester.mutex, TIMEOUT, s_socket_test_full_read_predicate, &server_rw_args));
+
+ rw_handler_trigger_increment_read_window(client_args.rw_handler, client_args.rw_slot, 100);
+ ASSERT_SUCCESS(aws_condition_variable_wait_for_pred(
+ &c_tester.condition_variable, &c_tester.mutex, TIMEOUT, s_socket_test_full_read_predicate, &client_rw_args));
ASSERT_INT_EQUALS(msg_from_server.len, client_rw_args.amount_read);
ASSERT_INT_EQUALS(msg_from_client.len, server_rw_args.amount_read);
@@ -630,13 +675,13 @@
ASSERT_SUCCESS(aws_channel_shutdown(server_args.channel, AWS_OP_SUCCESS));
ASSERT_SUCCESS(aws_channel_shutdown(client_args.channel, AWS_OP_SUCCESS));
- ASSERT_SUCCESS(aws_condition_variable_wait_pred(
- &c_tester.condition_variable, &c_tester.mutex, s_channel_shutdown_predicate, &server_args));
- ASSERT_SUCCESS(aws_condition_variable_wait_pred(
- &c_tester.condition_variable, &c_tester.mutex, s_channel_shutdown_predicate, &client_args));
+ ASSERT_SUCCESS(aws_condition_variable_wait_for_pred(
+ &c_tester.condition_variable, &c_tester.mutex, TIMEOUT, s_channel_shutdown_predicate, &server_args));
+ ASSERT_SUCCESS(aws_condition_variable_wait_for_pred(
+ &c_tester.condition_variable, &c_tester.mutex, TIMEOUT, s_channel_shutdown_predicate, &client_args));
aws_server_bootstrap_destroy_socket_listener(local_server_tester.server_bootstrap, local_server_tester.listener);
- ASSERT_SUCCESS(aws_condition_variable_wait_pred(
- &c_tester.condition_variable, &c_tester.mutex, s_listener_destroy_predicate, &server_args));
+ ASSERT_SUCCESS(aws_condition_variable_wait_for_pred(
+ &c_tester.condition_variable, &c_tester.mutex, TIMEOUT, s_listener_destroy_predicate, &server_args));
aws_mutex_unlock(&c_tester.mutex);
@@ -687,11 +732,12 @@
ASSERT_SUCCESS(s_socket_test_args_init(&client_args, &c_tester, client_rw_handler));
struct local_server_tester local_server_tester;
- ASSERT_SUCCESS(s_local_server_tester_init(allocator, &local_server_tester, &server_args, &c_tester, false));
+ ASSERT_SUCCESS(
+ s_local_server_tester_init(allocator, &local_server_tester, &server_args, &c_tester, AWS_SOCKET_LOCAL, false));
struct aws_client_bootstrap_options client_bootstrap_options = {
.event_loop_group = c_tester.el_group,
- .host_resolver = NULL,
+ .host_resolver = c_tester.resolver,
};
struct aws_client_bootstrap *client_bootstrap = aws_client_bootstrap_new(allocator, &client_bootstrap_options);
ASSERT_NOT_NULL(client_bootstrap);
@@ -700,7 +746,7 @@
AWS_ZERO_STRUCT(client_channel_options);
client_channel_options.bootstrap = client_bootstrap;
client_channel_options.host_name = local_server_tester.endpoint.address;
- client_channel_options.port = 0;
+ client_channel_options.port = local_server_tester.endpoint.port;
client_channel_options.socket_options = &local_server_tester.socket_options;
client_channel_options.setup_callback = s_socket_handler_test_client_setup_callback;
client_channel_options.shutdown_callback = s_socket_handler_test_client_shutdown_callback;
@@ -711,24 +757,24 @@
ASSERT_SUCCESS(aws_mutex_lock(&c_tester.mutex));
/* wait for both ends to setup */
- ASSERT_SUCCESS(aws_condition_variable_wait_pred(
- &c_tester.condition_variable, &c_tester.mutex, s_channel_setup_predicate, &server_args));
- ASSERT_SUCCESS(aws_condition_variable_wait_pred(
- &c_tester.condition_variable, &c_tester.mutex, s_channel_setup_predicate, &client_args));
+ ASSERT_SUCCESS(aws_condition_variable_wait_for_pred(
+ &c_tester.condition_variable, &c_tester.mutex, TIMEOUT, s_channel_setup_predicate, &server_args));
+ ASSERT_SUCCESS(aws_condition_variable_wait_for_pred(
+ &c_tester.condition_variable, &c_tester.mutex, TIMEOUT, s_channel_setup_predicate, &client_args));
aws_channel_shutdown(server_args.channel, AWS_OP_SUCCESS);
- ASSERT_SUCCESS(aws_condition_variable_wait_pred(
- &c_tester.condition_variable, &c_tester.mutex, s_channel_shutdown_predicate, &server_args));
- ASSERT_SUCCESS(aws_condition_variable_wait_pred(
- &c_tester.condition_variable, &c_tester.mutex, s_channel_shutdown_predicate, &client_args));
+ ASSERT_SUCCESS(aws_condition_variable_wait_for_pred(
+ &c_tester.condition_variable, &c_tester.mutex, TIMEOUT, s_channel_shutdown_predicate, &server_args));
+ ASSERT_SUCCESS(aws_condition_variable_wait_for_pred(
+ &c_tester.condition_variable, &c_tester.mutex, TIMEOUT, s_channel_shutdown_predicate, &client_args));
ASSERT_INT_EQUALS(AWS_OP_SUCCESS, server_args.error_code);
ASSERT_TRUE(
AWS_IO_SOCKET_CLOSED == client_args.error_code || AWS_IO_SOCKET_NOT_CONNECTED == client_args.error_code);
aws_server_bootstrap_destroy_socket_listener(local_server_tester.server_bootstrap, local_server_tester.listener);
- ASSERT_SUCCESS(aws_condition_variable_wait_pred(
- &c_tester.condition_variable, &c_tester.mutex, s_listener_destroy_predicate, &server_args));
+ ASSERT_SUCCESS(aws_condition_variable_wait_for_pred(
+ &c_tester.condition_variable, &c_tester.mutex, TIMEOUT, s_listener_destroy_predicate, &server_args));
aws_mutex_unlock(&c_tester.mutex);
@@ -743,6 +789,182 @@
AWS_TEST_CASE(socket_handler_close, s_socket_close_test)
+/* This is a regression test.
+ * Once upon a time, if the socket-handler received READABLE and HANGUP events simultaneously,
+ * it would read one last time from the socket before closing it. But one read may
+ * not be enough to get all remaining data. The correct thing is to do is
+ * repeatedly read until the read() call itself reports EOF or an error.
+ *
+ * Anyway, this test establishes a connection between server and client.
+ * The server sends a big chunk of data, and closes the socket immediately
+ * after the write completes. The client should still be able to read all the data. */
+static int s_socket_read_to_eof_after_peer_hangup_test_common(
+ struct aws_allocator *allocator,
+ void *ctx,
+ enum aws_socket_domain socket_domain) {
+
+ (void)ctx;
+ s_socket_common_tester_init(allocator, &c_tester);
+
+ const size_t total_bytes_to_send_from_server = g_aws_channel_max_fragment_size;
+
+ struct aws_byte_buf client_received_message;
+ ASSERT_SUCCESS(aws_byte_buf_init(&client_received_message, allocator, total_bytes_to_send_from_server));
+
+ struct aws_byte_buf msg_from_server;
+ ASSERT_SUCCESS(aws_byte_buf_init(&msg_from_server, allocator, total_bytes_to_send_from_server));
+
+ struct socket_test_rw_args server_rw_args;
+ ASSERT_SUCCESS(s_rw_args_init(&server_rw_args, &c_tester, aws_byte_buf_from_empty_array(NULL, 0), 0));
+
+ struct socket_test_rw_args client_rw_args;
+ ASSERT_SUCCESS(s_rw_args_init(&client_rw_args, &c_tester, client_received_message, 0));
+
+ /* NOTE: client starts with window=0, so we can VERY CAREFULLY control when it reads data from the socket */
+ struct aws_channel_handler *client_rw_handler = rw_handler_new(
+ allocator, s_socket_test_handle_read, s_socket_test_handle_write, true, 0 /*window*/, &client_rw_args);
+ ASSERT_NOT_NULL(client_rw_handler);
+
+ struct aws_channel_handler *server_rw_handler =
+ rw_handler_new(allocator, s_socket_test_handle_read, s_socket_test_handle_write, true, 10000, &server_rw_args);
+ ASSERT_NOT_NULL(server_rw_handler);
+
+ struct socket_test_args server_args;
+ ASSERT_SUCCESS(s_socket_test_args_init(&server_args, &c_tester, server_rw_handler));
+
+ struct socket_test_args client_args;
+ ASSERT_SUCCESS(s_socket_test_args_init(&client_args, &c_tester, client_rw_handler));
+
+ struct local_server_tester local_server_tester;
+ if (s_local_server_tester_init(allocator, &local_server_tester, &server_args, &c_tester, socket_domain, false)) {
+ /* Skip test if server can't bind to address (e.g. Gith9ub's ubuntu runners don't allow IPv6) */
+ if (aws_last_error() == AWS_IO_SOCKET_INVALID_ADDRESS) {
+ return AWS_OP_SKIP;
+ } else {
+ ASSERT_TRUE(false, "s_local_server_tester_init() failed");
+ }
+ }
+
+ struct aws_client_bootstrap_options client_bootstrap_options = {
+ .event_loop_group = c_tester.el_group,
+ .host_resolver = c_tester.resolver,
+ };
+ struct aws_client_bootstrap *client_bootstrap = aws_client_bootstrap_new(allocator, &client_bootstrap_options);
+ ASSERT_NOT_NULL(client_bootstrap);
+
+ struct aws_socket_channel_bootstrap_options client_channel_options = {
+ .bootstrap = client_bootstrap,
+ .host_name = local_server_tester.endpoint.address,
+ .port = local_server_tester.endpoint.port,
+ .socket_options = &local_server_tester.socket_options,
+ .setup_callback = s_socket_handler_test_client_setup_callback,
+ .shutdown_callback = s_socket_handler_test_client_shutdown_callback,
+ .user_data = &client_args,
+ .enable_read_back_pressure = true,
+ };
+
+ ASSERT_SUCCESS(aws_client_bootstrap_new_socket_channel(&client_channel_options));
+
+ ASSERT_SUCCESS(aws_mutex_lock(&c_tester.mutex));
+
+ /* wait for both ends to setup */
+ ASSERT_SUCCESS(aws_condition_variable_wait_for_pred(
+ &c_tester.condition_variable, &c_tester.mutex, TIMEOUT, s_channel_setup_predicate, &server_args));
+ ASSERT_SUCCESS(aws_condition_variable_wait_for_pred(
+ &c_tester.condition_variable, &c_tester.mutex, TIMEOUT, s_channel_setup_predicate, &client_args));
+
+ /* We want the server to send some data and hang up IMMEDIATELY after,
+ * before the client has fully read the data. This is tricky to do in a test.
+ *
+ * First, have the server send data... */
+ ASSERT_TRUE(aws_byte_buf_write_u8_n(&msg_from_server, 's', total_bytes_to_send_from_server));
+ rw_handler_write_with_callback(
+ server_rw_handler,
+ server_args.rw_slot,
+ &msg_from_server,
+ s_socket_test_handle_on_write_completed,
+ &server_rw_args);
+
+ /* ...now have the client open its read window and receive data in tiny chunks,
+ * stopping once the server has sent all data, but BEFORE the client has read all data.
+ * This is possible because the client's OS will buffer a certain amount of
+ * incoming data, before the client application calls read() on it. */
+ while (server_rw_args.amount_written < total_bytes_to_send_from_server) {
+ const size_t client_read_chunk_size = 128;
+ client_rw_args.expected_read += client_read_chunk_size;
+ rw_handler_trigger_increment_read_window(client_args.rw_handler, client_args.rw_slot, client_read_chunk_size);
+ ASSERT_SUCCESS(aws_condition_variable_wait_for_pred(
+ &c_tester.condition_variable,
+ &c_tester.mutex,
+ TIMEOUT,
+ s_socket_test_full_read_predicate,
+ &client_rw_args));
+ }
+
+ /* Now close the server's socket.*/
+ ASSERT_SUCCESS(aws_channel_shutdown(server_args.channel, AWS_OP_SUCCESS));
+ ASSERT_SUCCESS(aws_condition_variable_wait_for_pred(
+ &c_tester.condition_variable, &c_tester.mutex, TIMEOUT, s_channel_shutdown_predicate, &server_args));
+
+ /* Now sleep a moment to 100% guarantee the OS propagates the socket-close event to the client-side. */
+ aws_mutex_unlock(&c_tester.mutex);
+ aws_thread_current_sleep(NANOS_PER_SEC / 4);
+ aws_mutex_lock(&c_tester.mutex);
+
+ /* Ensure the client hasn't shut down before reading all the data. */
+ ASSERT_FALSE(client_args.shutdown_invoked, "Client should read all data before shutting down.");
+
+ /* Ensure the client hasn't read all data yet */
+ ASSERT_TRUE(
+ client_rw_args.amount_read < total_bytes_to_send_from_server,
+ "If this fails, then we're not truly reproducing the regression test."
+ " The server needs to finish sending data, and close the socket,"
+ " BEFORE the client reads all the data.");
+
+ /* Have the client open its window more-than-enough to receive the rest of the data.
+ * If the client socket closes before all the data is received, then we still have the bug. */
+ rw_handler_trigger_increment_read_window(
+ client_args.rw_handler, client_args.rw_slot, total_bytes_to_send_from_server * 3 /*more-than-enough*/);
+ client_rw_args.expected_read = total_bytes_to_send_from_server;
+ ASSERT_SUCCESS(aws_condition_variable_wait_for_pred(
+ &c_tester.condition_variable, &c_tester.mutex, TIMEOUT, s_socket_test_full_read_predicate, &client_rw_args));
+
+ /* Wait for client to shutdown, due to the server having closed the socket */
+ ASSERT_SUCCESS(aws_condition_variable_wait_for_pred(
+ &c_tester.condition_variable, &c_tester.mutex, TIMEOUT, s_channel_shutdown_predicate, &client_args));
+
+ aws_server_bootstrap_destroy_socket_listener(local_server_tester.server_bootstrap, local_server_tester.listener);
+ ASSERT_SUCCESS(aws_condition_variable_wait_for_pred(
+ &c_tester.condition_variable, &c_tester.mutex, TIMEOUT, s_listener_destroy_predicate, &server_args));
+
+ ASSERT_INT_EQUALS(AWS_IO_SOCKET_CLOSED, client_args.error_code);
+
+ aws_mutex_unlock(&c_tester.mutex);
+
+ /* clean up */
+ ASSERT_SUCCESS(s_local_server_tester_clean_up(&local_server_tester));
+ aws_byte_buf_clean_up(&client_received_message);
+ aws_byte_buf_clean_up(&msg_from_server);
+ aws_client_bootstrap_release(client_bootstrap);
+ ASSERT_SUCCESS(s_socket_common_tester_clean_up(&c_tester));
+
+ return AWS_OP_SUCCESS;
+}
+static int s_socket_read_to_eof_after_peer_hangup_test(struct aws_allocator *allocator, void *ctx) {
+ return s_socket_read_to_eof_after_peer_hangup_test_common(allocator, ctx, AWS_SOCKET_LOCAL);
+}
+AWS_TEST_CASE(socket_handler_read_to_eof_after_peer_hangup, s_socket_read_to_eof_after_peer_hangup_test)
+
+static int s_socket_ipv4_read_to_eof_after_peer_hangup_test(struct aws_allocator *allocator, void *ctx) {
+ return s_socket_read_to_eof_after_peer_hangup_test_common(allocator, ctx, AWS_SOCKET_IPV4);
+}
+AWS_TEST_CASE(socket_handler_ipv4_read_to_eof_after_peer_hangup, s_socket_ipv4_read_to_eof_after_peer_hangup_test)
+
+static int s_socket_ipv6_read_to_eof_after_peer_hangup_test(struct aws_allocator *allocator, void *ctx) {
+ return s_socket_read_to_eof_after_peer_hangup_test_common(allocator, ctx, AWS_SOCKET_IPV6);
+}
+AWS_TEST_CASE(socket_handler_ipv6_read_to_eof_after_peer_hangup, s_socket_ipv6_read_to_eof_after_peer_hangup_test)
+
static void s_creation_callback_test_channel_creation_callback(
struct aws_client_bootstrap *bootstrap,
int error_code,
@@ -843,12 +1065,13 @@
ASSERT_SUCCESS(s_socket_test_args_init(&client_args, &c_tester, client_rw_handler));
struct local_server_tester local_server_tester;
- ASSERT_SUCCESS(s_local_server_tester_init(allocator, &local_server_tester, &server_args, &c_tester, false));
+ ASSERT_SUCCESS(
+ s_local_server_tester_init(allocator, &local_server_tester, &server_args, &c_tester, AWS_SOCKET_LOCAL, false));
struct aws_client_bootstrap_options client_bootstrap_options;
AWS_ZERO_STRUCT(client_bootstrap_options);
client_bootstrap_options.event_loop_group = c_tester.el_group;
- client_bootstrap_options.host_resolver = NULL;
+ client_bootstrap_options.host_resolver = c_tester.resolver;
struct aws_client_bootstrap *client_bootstrap = aws_client_bootstrap_new(allocator, &client_bootstrap_options);
ASSERT_NOT_NULL(client_bootstrap);
@@ -857,7 +1080,7 @@
AWS_ZERO_STRUCT(client_channel_options);
client_channel_options.bootstrap = client_bootstrap;
client_channel_options.host_name = local_server_tester.endpoint.address;
- client_channel_options.port = 0;
+ client_channel_options.port = local_server_tester.endpoint.port;
client_channel_options.socket_options = &local_server_tester.socket_options;
client_channel_options.creation_callback = s_creation_callback_test_channel_creation_callback;
client_channel_options.setup_callback = s_socket_handler_test_client_setup_callback;
@@ -869,22 +1092,20 @@
ASSERT_SUCCESS(aws_mutex_lock(&c_tester.mutex));
/* wait for both ends to setup */
- ASSERT_SUCCESS(aws_condition_variable_wait_pred(
- &c_tester.condition_variable, &c_tester.mutex, s_channel_setup_predicate, &server_args));
- ASSERT_SUCCESS(aws_condition_variable_wait_pred(
- &c_tester.condition_variable, &c_tester.mutex, s_channel_setup_predicate, &client_args));
+ ASSERT_SUCCESS(aws_condition_variable_wait_for_pred(
+ &c_tester.condition_variable, &c_tester.mutex, TIMEOUT, s_channel_setup_predicate, &server_args));
+ ASSERT_SUCCESS(aws_condition_variable_wait_for_pred(
+ &c_tester.condition_variable, &c_tester.mutex, TIMEOUT, s_channel_setup_predicate, &client_args));
ASSERT_TRUE(client_args.creation_callback_invoked);
- struct aws_channel_slot *client_rw_slot = aws_atomic_load_ptr(&client_args.rw_slot);
- rw_handler_write(client_args.rw_handler, client_rw_slot, &msg_from_client);
- ASSERT_SUCCESS(aws_condition_variable_wait_pred(
- &c_tester.condition_variable, &c_tester.mutex, s_socket_test_read_predicate, &server_rw_args));
-
- struct aws_channel_slot *server_rw_slot = aws_atomic_load_ptr(&server_args.rw_slot);
- rw_handler_write(server_args.rw_handler, server_rw_slot, &msg_from_server);
- ASSERT_SUCCESS(aws_condition_variable_wait_pred(
- &c_tester.condition_variable, &c_tester.mutex, s_socket_test_read_predicate, &client_rw_args));
+ rw_handler_write(client_args.rw_handler, client_args.rw_slot, &msg_from_client);
+ ASSERT_SUCCESS(aws_condition_variable_wait_for_pred(
+ &c_tester.condition_variable, &c_tester.mutex, TIMEOUT, s_socket_test_read_predicate, &server_rw_args));
+
+ rw_handler_write(server_args.rw_handler, server_args.rw_slot, &msg_from_server);
+ ASSERT_SUCCESS(aws_condition_variable_wait_for_pred(
+ &c_tester.condition_variable, &c_tester.mutex, TIMEOUT, s_socket_test_read_predicate, &client_rw_args));
uint64_t ms_to_ns = aws_timestamp_convert(1, AWS_TIMESTAMP_MILLIS, AWS_TIMESTAMP_NANOS, NULL);
@@ -894,8 +1115,8 @@
struct aws_statistics_handler_test_impl *stats_impl = stats_handler->impl;
aws_mutex_lock(&stats_impl->lock);
- ASSERT_SUCCESS(aws_condition_variable_wait_pred(
- &stats_impl->signal, &stats_impl->lock, s_stats_processed_predicate, stats_handler));
+ ASSERT_SUCCESS(aws_condition_variable_wait_for_pred(
+ &stats_impl->signal, &stats_impl->lock, TIMEOUT, s_stats_processed_predicate, stats_handler));
ASSERT_TRUE(stats_impl->total_bytes_read == msg_from_server.len);
ASSERT_TRUE(stats_impl->total_bytes_written == msg_from_client.len);
@@ -904,14 +1125,14 @@
aws_channel_shutdown(server_args.channel, AWS_OP_SUCCESS);
- ASSERT_SUCCESS(aws_condition_variable_wait_pred(
- &c_tester.condition_variable, &c_tester.mutex, s_channel_shutdown_predicate, &server_args));
- ASSERT_SUCCESS(aws_condition_variable_wait_pred(
- &c_tester.condition_variable, &c_tester.mutex, s_channel_shutdown_predicate, &client_args));
+ ASSERT_SUCCESS(aws_condition_variable_wait_for_pred(
+ &c_tester.condition_variable, &c_tester.mutex, TIMEOUT, s_channel_shutdown_predicate, &server_args));
+ ASSERT_SUCCESS(aws_condition_variable_wait_for_pred(
+ &c_tester.condition_variable, &c_tester.mutex, TIMEOUT, s_channel_shutdown_predicate, &client_args));
aws_server_bootstrap_destroy_socket_listener(local_server_tester.server_bootstrap, local_server_tester.listener);
- ASSERT_SUCCESS(aws_condition_variable_wait_pred(
- &c_tester.condition_variable, &c_tester.mutex, s_listener_destroy_predicate, &server_args));
+ ASSERT_SUCCESS(aws_condition_variable_wait_for_pred(
+ &c_tester.condition_variable, &c_tester.mutex, TIMEOUT, s_listener_destroy_predicate, &server_args));
aws_mutex_unlock(&c_tester.mutex);