Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/mono/boringssl.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorHÃ¥vard Molland <haavardm@opera.com>2014-10-14 14:03:05 +0400
committerAdam Langley <agl@google.com>2014-11-19 01:06:46 +0300
commitce5be4bd5ceaadb7bcebaf575e4dea74a4bff109 (patch)
treef5596a96de10261429c71fad7db98ca344efd3dd /crypto/bio
parent5e4f6e92476144313b14dbadee25f1e288583d41 (diff)
Add zero copy read and write api for bio pairs.
Also add functionality for setting external buffers to give the caller better control of the buffers. This is typical needed if OS sockets can outlive the bio pair. Change-Id: I500f0c522011ce76e9a9bce5d7b43c93d9d11457
Diffstat (limited to 'crypto/bio')
-rw-r--r--crypto/bio/bio_error.c4
-rw-r--r--crypto/bio/bio_test.c154
-rw-r--r--crypto/bio/pair.c343
3 files changed, 485 insertions, 16 deletions
diff --git a/crypto/bio/bio_error.c b/crypto/bio/bio_error.c
index c67da280..09585e44 100644
--- a/crypto/bio/bio_error.c
+++ b/crypto/bio/bio_error.c
@@ -22,6 +22,10 @@ const ERR_STRING_DATA BIO_error_string_data[] = {
{ERR_PACK(ERR_LIB_BIO, BIO_F_BIO_new, 0), "BIO_new"},
{ERR_PACK(ERR_LIB_BIO, BIO_F_BIO_new_file, 0), "BIO_new_file"},
{ERR_PACK(ERR_LIB_BIO, BIO_F_BIO_new_mem_buf, 0), "BIO_new_mem_buf"},
+ {ERR_PACK(ERR_LIB_BIO, BIO_F_BIO_zero_copy_get_read_buf, 0), "BIO_zero_copy_get_read_buf"},
+ {ERR_PACK(ERR_LIB_BIO, BIO_F_BIO_zero_copy_get_read_buf_done, 0), "BIO_zero_copy_get_read_buf_done"},
+ {ERR_PACK(ERR_LIB_BIO, BIO_F_BIO_zero_copy_get_write_buf, 0), "BIO_zero_copy_get_write_buf"},
+ {ERR_PACK(ERR_LIB_BIO, BIO_F_BIO_zero_copy_get_write_buf_done, 0), "BIO_zero_copy_get_write_buf_done"},
{ERR_PACK(ERR_LIB_BIO, BIO_F_bio_ctrl, 0), "bio_ctrl"},
{ERR_PACK(ERR_LIB_BIO, BIO_F_bio_io, 0), "bio_io"},
{ERR_PACK(ERR_LIB_BIO, BIO_F_bio_ip_and_port_to_socket_and_addr, 0), "bio_ip_and_port_to_socket_and_addr"},
diff --git a/crypto/bio/bio_test.c b/crypto/bio/bio_test.c
index 86363f9c..39a79be9 100644
--- a/crypto/bio/bio_test.c
+++ b/crypto/bio/bio_test.c
@@ -35,6 +35,7 @@
#include <openssl/crypto.h>
#include <openssl/err.h>
+#define MIN(a, b) ((a < b) ? a : b)
#if !defined(OPENSSL_WINDOWS)
static int closesocket(int sock) {
@@ -119,6 +120,155 @@ static int test_socket_connect(void) {
return 1;
}
+
+/* bio_read_zero_copy_wrapper is a wrapper around the zero-copy APIs to make
+ * testing easier. */
+static size_t bio_read_zero_copy_wrapper(BIO* bio, void* data, size_t len) {
+ uint8_t* read_buf;
+ size_t read_buf_offset;
+ size_t available_bytes;
+ size_t len_read = 0;
+
+ do {
+ if (!BIO_zero_copy_get_read_buf(bio, &read_buf, &read_buf_offset,
+ &available_bytes)) {
+ return 0;
+ }
+
+ available_bytes = MIN(available_bytes, len - len_read);
+ memmove(data + len_read, read_buf + read_buf_offset, available_bytes);
+
+ BIO_zero_copy_get_read_buf_done(bio, available_bytes);
+
+ len_read += available_bytes;
+ } while (len - len_read > 0 && available_bytes > 0);
+
+ return len_read;
+}
+
+/* bio_write_zero_copy_wrapper is a wrapper around the zero-copy APIs to make
+ * testing easier. */
+static size_t bio_write_zero_copy_wrapper(BIO* bio, const void* data,
+ size_t len) {
+ uint8_t* write_buf;
+ size_t write_buf_offset;
+ size_t available_bytes;
+ size_t len_written = 0;
+
+ do {
+ if (!BIO_zero_copy_get_write_buf(bio, &write_buf, &write_buf_offset,
+ &available_bytes)) {
+ return 0;
+ }
+
+ available_bytes = MIN(available_bytes, len - len_written);
+ memmove(write_buf + write_buf_offset, data + len_written, available_bytes);
+
+ BIO_zero_copy_get_write_buf_done(bio, available_bytes);
+
+ len_written += available_bytes;
+ } while (len - len_written > 0 && available_bytes > 0);
+
+ return len_written;
+}
+
+static int test_zero_copy_bio_pairs(void) {
+ /* Test read and write, especially triggering the ring buffer wrap-around.*/
+ BIO* bio1;
+ BIO* bio2;
+ size_t i, j;
+ uint8_t bio1_application_send_buffer[1024];
+ uint8_t bio2_application_recv_buffer[1024];
+ size_t total_read = 0;
+ size_t total_write = 0;
+ uint8_t* write_buf;
+ size_t write_buf_offset;
+ size_t available_bytes;
+ size_t bytes_left;
+
+ const size_t kLengths[] = {254, 255, 256, 257, 510, 511, 512, 513};
+
+ /* These trigger ring buffer wrap around. */
+ const size_t kPartialLengths[] = {0, 1, 2, 3, 128, 255, 256, 257, 511, 512};
+
+ static const size_t kBufferSize = 512;
+
+ srand(1);
+ for (i = 0; i < sizeof(bio1_application_send_buffer); i++) {
+ bio1_application_send_buffer[i] = rand() & 255;
+ }
+
+ /* Transfer bytes from bio1_application_send_buffer to
+ * bio2_application_recv_buffer in various ways. */
+ for (i = 0; i < sizeof(kLengths) / sizeof(kLengths[0]); i++) {
+ for (j = 0; j < sizeof(kPartialLengths) / sizeof(kPartialLengths[0]); j++) {
+ total_write = 0;
+ total_read = 0;
+
+ BIO_new_bio_pair(&bio1, kBufferSize, &bio2, kBufferSize);
+
+ total_write += bio_write_zero_copy_wrapper(
+ bio1, bio1_application_send_buffer, kLengths[i]);
+
+ /* This tests interleaved read/write calls. Do a read between zero copy
+ * write calls. */
+ if (!BIO_zero_copy_get_write_buf(bio1, &write_buf, &write_buf_offset,
+ &available_bytes)) {
+ return 0;
+ }
+
+ /* Free kPartialLengths[j] bytes in the beginning of bio1 write buffer.
+ * This enables ring buffer wrap around for the next write. */
+ total_read += BIO_read(bio2, bio2_application_recv_buffer + total_read,
+ kPartialLengths[j]);
+
+ size_t interleaved_write_len = MIN(kPartialLengths[j], available_bytes);
+
+ /* Write the data for the interleaved write call. If the buffer becomes
+ * empty after a read, the write offset is normally set to 0. Check that
+ * this does not happen for interleaved read/write and that
+ * |write_buf_offset| is still valid. */
+ memcpy(write_buf + write_buf_offset,
+ bio1_application_send_buffer + total_write, interleaved_write_len);
+ if (BIO_zero_copy_get_write_buf_done(bio1, interleaved_write_len)) {
+ total_write += interleaved_write_len;
+ }
+
+ /* Do another write in case |write_buf_offset| was wrapped */
+ total_write += bio_write_zero_copy_wrapper(
+ bio1, bio1_application_send_buffer + total_write,
+ kPartialLengths[j] - interleaved_write_len);
+
+ /* Drain the rest. */
+ bytes_left = BIO_pending(bio2);
+ total_read += bio_read_zero_copy_wrapper(
+ bio2, bio2_application_recv_buffer + total_read, bytes_left);
+
+ BIO_free(bio1);
+ BIO_free(bio2);
+
+ if (total_read != total_write) {
+ fprintf(stderr, "Lengths not equal in round (%u, %u)\n", (unsigned)i,
+ (unsigned)j);
+ return 0;
+ }
+ if (total_read > kLengths[i] + kPartialLengths[j]) {
+ fprintf(stderr, "Bad lengths in round (%u, %u)\n", (unsigned)i,
+ (unsigned)j);
+ return 0;
+ }
+ if (memcmp(bio1_application_send_buffer, bio2_application_recv_buffer,
+ total_read) != 0) {
+ fprintf(stderr, "Buffers not equal in round (%u, %u)\n", (unsigned)i,
+ (unsigned)j);
+ return 0;
+ }
+ }
+ }
+
+ return 1;
+}
+
static int test_printf(void) {
/* Test a short output, a very long one, and various sizes around
* 256 (the size of the buffer) to ensure edge cases are correct. */
@@ -201,6 +351,10 @@ int main(void) {
return 1;
}
+ if (!test_zero_copy_bio_pairs()) {
+ return 1;
+ }
+
printf("PASS\n");
return 0;
}
diff --git a/crypto/bio/pair.c b/crypto/bio/pair.c
index a2cc6d65..14a0bab2 100644
--- a/crypto/bio/pair.c
+++ b/crypto/bio/pair.c
@@ -70,7 +70,13 @@ struct bio_bio_st {
size_t len; /* valid iff buf != NULL; 0 if peer == NULL */
size_t offset; /* valid iff buf != NULL; 0 if len == 0 */
size_t size;
- char *buf; /* "size" elements (if != NULL) */
+ uint8_t *buf; /* "size" elements (if != NULL) */
+ char buf_externally_allocated; /* true iff buf was externally allocated. */
+
+ char zero_copy_read_lock; /* true iff a zero copy read operation
+ * is in progress. */
+ char zero_copy_write_lock; /* true iff a zero copy write operation
+ * is in progress. */
size_t request; /* valid iff peer != NULL; 0 if len != 0,
* otherwise set by peer to number of bytes
@@ -140,7 +146,7 @@ static int bio_free(BIO *bio) {
bio_destroy_pair(bio);
}
- if (b->buf != NULL) {
+ if (b->buf != NULL && !b->buf_externally_allocated) {
OPENSSL_free(b->buf);
}
@@ -149,6 +155,269 @@ static int bio_free(BIO *bio) {
return 1;
}
+static size_t bio_zero_copy_get_read_buf(struct bio_bio_st* peer_b,
+ uint8_t** out_read_buf,
+ size_t* out_buf_offset) {
+ size_t max_available;
+ if (peer_b->len > peer_b->size - peer_b->offset) {
+ /* Only the first half of the ring buffer can be read. */
+ max_available = peer_b->size - peer_b->offset;
+ } else {
+ max_available = peer_b->len;
+ }
+
+ *out_read_buf = peer_b->buf;
+ *out_buf_offset = peer_b->offset;
+ return max_available;
+}
+
+int BIO_zero_copy_get_read_buf(BIO* bio, uint8_t** out_read_buf,
+ size_t* out_buf_offset,
+ size_t* out_available_bytes) {
+ struct bio_bio_st* b;
+ struct bio_bio_st* peer_b;
+ size_t max_available;
+ *out_available_bytes = 0;
+
+ BIO_clear_retry_flags(bio);
+
+ if (!bio->init) {
+ OPENSSL_PUT_ERROR(BIO, BIO_zero_copy_get_read_buf, BIO_R_UNINITIALIZED);
+ return 0;
+ }
+
+ b = bio->ptr;
+
+ if (!b || !b->peer) {
+ OPENSSL_PUT_ERROR(BIO, BIO_zero_copy_get_read_buf,
+ BIO_R_UNSUPPORTED_METHOD);
+ return 0;
+ }
+
+ peer_b = b->peer->ptr;
+ if (!peer_b || !peer_b->peer || peer_b->peer->ptr != b) {
+ OPENSSL_PUT_ERROR(BIO, BIO_zero_copy_get_read_buf,
+ BIO_R_UNSUPPORTED_METHOD);
+ return 0;
+ }
+
+ if (peer_b->zero_copy_read_lock) {
+ OPENSSL_PUT_ERROR(BIO, BIO_zero_copy_get_read_buf, BIO_R_INVALID_ARGUMENT);
+ return 0;
+ }
+
+ peer_b->request = 0; /* Is not used by zero-copy API. */
+
+ max_available =
+ bio_zero_copy_get_read_buf(peer_b, out_read_buf, out_buf_offset);
+
+ assert(peer_b->buf != NULL);
+ if (max_available > 0) {
+ peer_b->zero_copy_read_lock = 1;
+ }
+
+ *out_available_bytes = max_available;
+ return 1;
+}
+
+int BIO_zero_copy_get_read_buf_done(BIO* bio, size_t bytes_read) {
+ struct bio_bio_st* b;
+ struct bio_bio_st* peer_b;
+ size_t max_available;
+ size_t dummy_read_offset;
+ uint8_t* dummy_read_buf;
+
+ assert(BIO_get_retry_flags(bio) == 0);
+
+ if (!bio->init) {
+ OPENSSL_PUT_ERROR(BIO, BIO_zero_copy_get_read_buf_done,
+ BIO_R_UNINITIALIZED);
+ return 0;
+ }
+
+ b = bio->ptr;
+
+ if (!b || !b->peer) {
+ OPENSSL_PUT_ERROR(BIO, BIO_zero_copy_get_read_buf_done,
+ BIO_R_UNSUPPORTED_METHOD);
+ return 0;
+ }
+
+ peer_b = b->peer->ptr;
+ if (!peer_b || !peer_b->peer || peer_b->peer->ptr != b) {
+ OPENSSL_PUT_ERROR(BIO, BIO_zero_copy_get_read_buf_done,
+ BIO_R_UNSUPPORTED_METHOD);
+ return 0;
+ }
+
+ if (!peer_b->zero_copy_read_lock) {
+ OPENSSL_PUT_ERROR(BIO, BIO_zero_copy_get_read_buf_done,
+ BIO_R_INVALID_ARGUMENT);
+ return 0;
+ }
+
+ max_available =
+ bio_zero_copy_get_read_buf(peer_b, &dummy_read_buf, &dummy_read_offset);
+ if (bytes_read > max_available) {
+ OPENSSL_PUT_ERROR(BIO, BIO_zero_copy_get_read_buf_done,
+ BIO_R_INVALID_ARGUMENT);
+ return 0;
+ }
+
+ peer_b->len -= bytes_read;
+ assert(peer_b->len >= 0);
+ assert(peer_b->offset + bytes_read <= peer_b->size);
+
+ /* Move read offset. If zero_copy_write_lock == 1 we must advance the
+ * offset even if buffer becomes empty, to make sure
+ * write_offset = (offset + len) mod size does not change. */
+ if (peer_b->offset + bytes_read == peer_b->size ||
+ (!peer_b->zero_copy_write_lock && peer_b->len == 0)) {
+ peer_b->offset = 0;
+ } else {
+ peer_b->offset += bytes_read;
+ }
+
+ bio->num_read += bytes_read;
+ peer_b->zero_copy_read_lock = 0;
+ return 1;
+}
+
+static size_t bio_zero_copy_get_write_buf(struct bio_bio_st* b,
+ uint8_t** out_write_buf,
+ size_t* out_buf_offset) {
+ size_t write_offset;
+ size_t max_available;
+
+ assert(b->len <= b->size);
+
+ write_offset = b->offset + b->len;
+
+ if (write_offset >= b->size) {
+ /* Only the first half of the ring buffer can be written to. */
+ write_offset -= b->size;
+ /* write up to the start of the ring buffer. */
+ max_available = b->offset - write_offset;
+ } else {
+ /* write up to the end the buffer. */
+ max_available = b->size - write_offset;
+ }
+
+ *out_write_buf = b->buf;
+ *out_buf_offset = write_offset;
+ return max_available;
+}
+
+int BIO_zero_copy_get_write_buf(BIO* bio, uint8_t** out_write_buf,
+ size_t* out_buf_offset,
+ size_t* out_available_bytes) {
+ struct bio_bio_st* b;
+ struct bio_bio_st* peer_b;
+ size_t max_available;
+
+ *out_available_bytes = 0;
+ BIO_clear_retry_flags(bio);
+
+ if (!bio->init) {
+ OPENSSL_PUT_ERROR(BIO, BIO_zero_copy_get_write_buf, BIO_R_UNINITIALIZED);
+ return 0;
+ }
+
+ b = bio->ptr;
+
+ if (!b || !b->buf || !b->peer) {
+ OPENSSL_PUT_ERROR(BIO, BIO_zero_copy_get_write_buf,
+ BIO_R_UNSUPPORTED_METHOD);
+ return 0;
+ }
+ peer_b = b->peer->ptr;
+ if (!peer_b || !peer_b->peer || peer_b->peer->ptr != b) {
+ OPENSSL_PUT_ERROR(BIO, BIO_zero_copy_get_write_buf,
+ BIO_R_UNSUPPORTED_METHOD);
+ return 0;
+ }
+
+ assert(b->buf != NULL);
+
+ if (b->zero_copy_write_lock) {
+ OPENSSL_PUT_ERROR(BIO, BIO_zero_copy_get_write_buf, BIO_R_INVALID_ARGUMENT);
+ return 0;
+ }
+
+ b->request = 0;
+ if (b->closed) {
+ /* Bio is already closed. */
+ OPENSSL_PUT_ERROR(BIO, BIO_zero_copy_get_write_buf, BIO_R_BROKEN_PIPE);
+ return 0;
+ }
+
+ max_available = bio_zero_copy_get_write_buf(b, out_write_buf, out_buf_offset);
+
+ if (max_available > 0) {
+ OPENSSL_PUT_ERROR(BIO, BIO_zero_copy_get_write_buf, BIO_R_INVALID_ARGUMENT);
+ b->zero_copy_write_lock = 1;
+ }
+
+ *out_available_bytes = max_available;
+ return 1;
+}
+
+int BIO_zero_copy_get_write_buf_done(BIO* bio, size_t bytes_written) {
+ struct bio_bio_st* b;
+ struct bio_bio_st* peer_b;
+
+ size_t rest;
+ size_t dummy_write_offset;
+ uint8_t* dummy_write_buf;
+
+ if (!bio->init) {
+ OPENSSL_PUT_ERROR(BIO, BIO_zero_copy_get_write_buf_done,
+ BIO_R_UNINITIALIZED);
+ return 0;
+ }
+
+ b = bio->ptr;
+
+ if (!b || !b->buf || !b->peer) {
+ OPENSSL_PUT_ERROR(BIO, BIO_zero_copy_get_write_buf_done,
+ BIO_R_UNSUPPORTED_METHOD);
+ return 0;
+ }
+ peer_b = b->peer->ptr;
+ if (!peer_b || !peer_b->peer || peer_b->peer->ptr != b) {
+ OPENSSL_PUT_ERROR(BIO, BIO_zero_copy_get_write_buf_done,
+ BIO_R_UNSUPPORTED_METHOD);
+ return 0;
+ }
+
+ b->request = 0;
+ if (b->closed) {
+ /* BIO is already closed. */
+ OPENSSL_PUT_ERROR(BIO, BIO_zero_copy_get_write_buf_done, BIO_R_BROKEN_PIPE);
+ return 0;
+ }
+
+ if (!b->zero_copy_write_lock) {
+ OPENSSL_PUT_ERROR(BIO, BIO_zero_copy_get_write_buf_done,
+ BIO_R_INVALID_ARGUMENT);
+ return 0;
+ }
+
+ rest = bio_zero_copy_get_write_buf(b, &dummy_write_buf, &dummy_write_offset);
+
+ if (bytes_written > rest) {
+ OPENSSL_PUT_ERROR(BIO, BIO_zero_copy_get_write_buf_done,
+ BIO_R_INVALID_ARGUMENT);
+ return 0;
+ }
+
+ bio->num_write += bytes_written;
+ /* Move write offset. */
+ b->len += bytes_written;
+ b->zero_copy_write_lock = 0;
+ return 1;
+}
+
static int bio_read(BIO *bio, char *buf, int size_) {
size_t size = size_;
size_t rest;
@@ -169,7 +438,7 @@ static int bio_read(BIO *bio, char *buf, int size_) {
peer_b->request = 0; /* will be set in "retry_read" situation */
- if (buf == NULL || size == 0) {
+ if (buf == NULL || size == 0 || peer_b->zero_copy_read_lock) {
return 0;
}
@@ -214,7 +483,10 @@ static int bio_read(BIO *bio, char *buf, int size_) {
memcpy(buf, peer_b->buf + peer_b->offset, chunk);
peer_b->len -= chunk;
- if (peer_b->len) {
+ /* If zero_copy_write_lock == 1 we must advance the offset even if buffer
+ * becomes empty, to make sure write_offset = (offset + len) % size
+ * does not change. */
+ if (peer_b->len || peer_b->zero_copy_write_lock) {
peer_b->offset += chunk;
assert(peer_b->offset <= peer_b->size);
if (peer_b->offset == peer_b->size) {
@@ -248,6 +520,10 @@ static int bio_write(BIO *bio, const char *buf, int num_) {
assert(b->peer != NULL);
assert(b->buf != NULL);
+ if (b->zero_copy_write_lock) {
+ return 0;
+ }
+
b->request = 0;
if (b->closed) {
/* we already closed */
@@ -304,7 +580,8 @@ static int bio_write(BIO *bio, const char *buf, int num_) {
return num;
}
-static int bio_make_pair(BIO *bio1, BIO *bio2) {
+static int bio_make_pair(BIO* bio1, BIO* bio2, uint8_t* ext_writebuf1,
+ uint8_t* ext_writebuf2) {
struct bio_bio_st *b1, *b2;
assert(bio1 != NULL);
@@ -319,20 +596,32 @@ static int bio_make_pair(BIO *bio1, BIO *bio2) {
}
if (b1->buf == NULL) {
- b1->buf = OPENSSL_malloc(b1->size);
- if (b1->buf == NULL) {
- OPENSSL_PUT_ERROR(BIO, bio_make_pair, ERR_R_MALLOC_FAILURE);
- return 0;
+ if (!ext_writebuf1) {
+ b1->buf_externally_allocated = 0;
+ b1->buf = OPENSSL_malloc(b1->size);
+ if (b1->buf == NULL) {
+ OPENSSL_PUT_ERROR(BIO, bio_make_pair, ERR_R_MALLOC_FAILURE);
+ return 0;
+ }
+ } else {
+ b1->buf = ext_writebuf1;
+ b1->buf_externally_allocated = 1;
}
b1->len = 0;
b1->offset = 0;
}
if (b2->buf == NULL) {
- b2->buf = OPENSSL_malloc(b2->size);
- if (b2->buf == NULL) {
- OPENSSL_PUT_ERROR(BIO, bio_make_pair, ERR_R_MALLOC_FAILURE);
- return 0;
+ if (!ext_writebuf2) {
+ b2->buf_externally_allocated = 0;
+ b2->buf = OPENSSL_malloc(b2->size);
+ if (b2->buf == NULL) {
+ OPENSSL_PUT_ERROR(BIO, bio_make_pair, ERR_R_MALLOC_FAILURE);
+ return 0;
+ }
+ } else {
+ b2->buf = ext_writebuf2;
+ b2->buf_externally_allocated = 1;
}
b2->len = 0;
b2->offset = 0;
@@ -341,9 +630,13 @@ static int bio_make_pair(BIO *bio1, BIO *bio2) {
b1->peer = bio2;
b1->closed = 0;
b1->request = 0;
+ b1->zero_copy_read_lock = 0;
+ b1->zero_copy_write_lock = 0;
b2->peer = bio1;
b2->closed = 0;
b2->request = 0;
+ b2->zero_copy_read_lock = 0;
+ b2->zero_copy_write_lock = 0;
bio1->init = 1;
bio2->init = 1;
@@ -370,6 +663,11 @@ static long bio_ctrl(BIO *bio, int cmd, long num, void *ptr) {
} else {
size_t new_size = num;
+ /* Don't change the size of externally allocated buffers. */
+ if (b->buf && !b->buf_externally_allocated) {
+ return 0;
+ }
+
if (b->size != new_size) {
if (b->buf) {
OPENSSL_free(b->buf);
@@ -478,12 +776,25 @@ static int bio_puts(BIO *bio, const char *str) {
return bio_write(bio, str, strlen(str));
}
-int BIO_new_bio_pair(BIO **bio1_p, size_t writebuf1, BIO **bio2_p,
- size_t writebuf2) {
+int BIO_new_bio_pair(BIO** bio1_p, size_t writebuf1,
+ BIO** bio2_p, size_t writebuf2) {
+ return BIO_new_bio_pair_external_buf(bio1_p, writebuf1, NULL, bio2_p,
+ writebuf2, NULL);
+}
+
+int BIO_new_bio_pair_external_buf(BIO** bio1_p, size_t writebuf1,
+ uint8_t* ext_writebuf1,
+ BIO** bio2_p, size_t writebuf2,
+ uint8_t* ext_writebuf2) {
BIO *bio1 = NULL, *bio2 = NULL;
long r;
int ret = 0;
+ /* External buffers must have sizes greater than 0. */
+ if ((ext_writebuf1 && !writebuf1) || (ext_writebuf2 && !writebuf2)) {
+ return 0;
+ }
+
bio1 = BIO_new(BIO_s_bio());
if (bio1 == NULL) {
goto err;
@@ -506,7 +817,7 @@ int BIO_new_bio_pair(BIO **bio1_p, size_t writebuf1, BIO **bio2_p,
}
}
- if (!bio_make_pair(bio1, bio2)) {
+ if (!bio_make_pair(bio1, bio2, ext_writebuf1, ext_writebuf2)) {
goto err;
}
ret = 1;