diff options
author | Max Kellermann <max@duempel.org> | 2011-09-16 08:54:47 +0200 |
---|---|---|
committer | Max Kellermann <max@duempel.org> | 2011-09-16 09:14:29 +0200 |
commit | b5139438939e866aefa70278fc8cddc03f4dd92f (patch) | |
tree | e649802a4274c4870154c064a63382e6bfb8a96f | |
parent | edffc566001b10fe4a50985f644a33c576bedf38 (diff) | |
download | mpd-b5139438939e866aefa70278fc8cddc03f4dd92f.tar.gz mpd-b5139438939e866aefa70278fc8cddc03f4dd92f.tar.xz mpd-b5139438939e866aefa70278fc8cddc03f4dd92f.zip |
input/curl: wait some more before resuming the stream
Pausing and resuming after every little chunk adds lots of overhead.
Add a lower level for resuming the stream.
Diffstat (limited to '')
-rw-r--r-- | src/input/curl_input_plugin.c | 7 |
1 files changed, 6 insertions, 1 deletions
diff --git a/src/input/curl_input_plugin.c b/src/input/curl_input_plugin.c index 9b0cfab2e..cbee2f7a3 100644 --- a/src/input/curl_input_plugin.c +++ b/src/input/curl_input_plugin.c @@ -52,6 +52,11 @@ static const size_t CURL_MAX_BUFFERED = 512 * 1024; /** + * Resume the stream at this number of bytes after it has been paused. + */ +static const size_t CURL_RESUME_AT = 384 * 1024; + +/** * Buffers created by input_curl_writefunction(). */ struct buffer { @@ -918,7 +923,7 @@ input_curl_read(struct input_stream *is, void *ptr, size_t size, is->offset += (goffset)nbytes; #if LIBCURL_VERSION_NUM >= 0x071200 - if (c->paused && curl_total_buffer_size(c) < CURL_MAX_BUFFERED) { + if (c->paused && curl_total_buffer_size(c) < CURL_RESUME_AT) { g_mutex_unlock(c->mutex); io_thread_call(input_curl_resume, c); g_mutex_lock(c->mutex); |