Hello community,
here is the log from the commit of package librepo for openSUSE:Factory checked in at 2019-04-01 12:40:01
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/librepo (Old)
and /work/SRC/openSUSE:Factory/.librepo.new.25356 (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Package is "librepo"
Mon Apr 1 12:40:01 2019 rev:6 rq:690200 version:1.9.6
Changes:
--------
--- /work/SRC/openSUSE:Factory/librepo/librepo.changes 2019-02-20 14:10:13.586961533 +0100
+++ /work/SRC/openSUSE:Factory/.librepo.new.25356/librepo.changes 2019-04-01 12:40:01.521962976 +0200
@@ -1,0 +2,18 @@
+Sun Mar 31 18:26:16 UTC 2019 - Neal Gompa
+
+- Add patch to fix fetching zck-compressed repos (rh#1694411)
+ * Patch: librepo-PR148-clean-up-curl-target_handle.patch
+
+-------------------------------------------------------------------
+Sun Mar 31 15:04:35 UTC 2019 - Neal Gompa
+
+- Upgrade to 1.9.6
+ + Fix progress reporting with zchunk files
+ + Reduce download delays by using still_running correctly
+ + Improve error handling, cleanup
+ + Simplified lr_perform()'s loop to prevent busy wait
+ + Require libcurl >= 7.28.0
+- Drop patch included in this release
+ * Patch: PR138-Fix-progress-reporting-with-zchunk-files.patch
+
+-------------------------------------------------------------------
Old:
----
PR138-Fix-progress-reporting-with-zchunk-files.patch
librepo-1.9.3.tar.gz
New:
----
librepo-1.9.6.tar.gz
librepo-PR148-clean-up-curl-target_handle.patch
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Other differences:
------------------
++++++ librepo.spec ++++++
--- /var/tmp/diff_new_pack.x37bT4/_old 2019-04-01 12:40:02.741963568 +0200
+++ /var/tmp/diff_new_pack.x37bT4/_new 2019-04-01 12:40:02.745963570 +0200
@@ -29,7 +29,7 @@
%define devname %{name}-devel
Name: librepo
-Version: 1.9.3
+Version: 1.9.6
Release: 0
Summary: Repodata downloading library
License: LGPL-2.0-or-later
@@ -39,15 +39,15 @@
Source0: %{url}/archive/%{version}/%{name}-%{version}.tar.gz
# Patches proposed upstream
-# From: https://github.com/rpm-software-management/librepo/pull/138
-Patch0101: PR138-Fix-progress-reporting-with-zchunk-files.patch
+## Clean up target->curl_handle rather than target->handle (rh#1694411)
+Patch0101: librepo-PR148-clean-up-curl-target_handle.patch
BuildRequires: cmake
BuildRequires: doxygen
BuildRequires: gpgme-devel
BuildRequires: pkgconfig(check)
BuildRequires: pkgconfig(glib-2.0) >= 2.26.0
-BuildRequires: pkgconfig(libcurl) >= 7.19.0
+BuildRequires: pkgconfig(libcurl) >= 7.28.0
BuildRequires: pkgconfig(libxml-2.0)
BuildRequires: pkgconfig(libcrypto)
BuildRequires: pkgconfig(openssl)
++++++ librepo-1.9.3.tar.gz -> librepo-1.9.6.tar.gz ++++++
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/librepo-1.9.3/VERSION.cmake new/librepo-1.9.6/VERSION.cmake
--- old/librepo-1.9.3/VERSION.cmake 2018-12-12 12:36:43.000000000 +0100
+++ new/librepo-1.9.6/VERSION.cmake 2019-03-27 12:26:10.000000000 +0100
@@ -1,3 +1,3 @@
SET(LIBREPO_MAJOR "1")
SET(LIBREPO_MINOR "9")
-SET(LIBREPO_PATCH "3")
+SET(LIBREPO_PATCH "6")
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/librepo-1.9.3/librepo/cleanup.h new/librepo-1.9.6/librepo/cleanup.h
--- old/librepo-1.9.3/librepo/cleanup.h 2018-12-12 12:36:43.000000000 +0100
+++ new/librepo-1.9.6/librepo/cleanup.h 2019-03-27 12:26:10.000000000 +0100
@@ -31,7 +31,7 @@
G_BEGIN_DECLS
-static void
+static inline void
lr_close(int fildes)
{
if (fildes < 0)
@@ -40,23 +40,23 @@
}
#define LR_DEFINE_CLEANUP_FUNCTION(Type, name, func) \
- static inline void name (void *v) \
+ static inline void name (Type *v) \
{ \
- func (*(Type*)v); \
+ func (*v); \
}
#define LR_DEFINE_CLEANUP_FUNCTION0(Type, name, func) \
- static inline void name (void *v) \
+ static inline void name (Type *v) \
{ \
- if (*(Type*)v) \
- func (*(Type*)v); \
+ if (*v) \
+ func (*v); \
}
#define LR_DEFINE_CLEANUP_FUNCTIONt(Type, name, func) \
- static inline void name (void *v) \
+ static inline void name (Type *v) \
{ \
- if (*(Type*)v) \
- func (*(Type*)v, TRUE); \
+ if (*v) \
+ func (*v, TRUE); \
}
LR_DEFINE_CLEANUP_FUNCTION0(GArray*, lr_local_array_unref, g_array_unref)
@@ -76,16 +76,27 @@
LR_DEFINE_CLEANUP_FUNCTION(char**, lr_local_strfreev, g_strfreev)
LR_DEFINE_CLEANUP_FUNCTION(GList*, lr_local_free_list, g_list_free)
-LR_DEFINE_CLEANUP_FUNCTION(void*, lr_local_free, g_free)
-LR_DEFINE_CLEANUP_FUNCTION(int, lr_local_file_close, lr_close)
+LR_DEFINE_CLEANUP_FUNCTION(GSList*, lr_local_free_slist, g_slist_free)
+LR_DEFINE_CLEANUP_FUNCTION(int, lr_local_fd_close, lr_close)
+
+/*
+ * g_free() could be used for any pointer type.
+ * To avoid warnings, we must take void * in place of void **.
+ */
+static inline void
+lr_local_free(void *v)
+{
+ g_free(*(void **)v);
+}
#define _cleanup_dir_close_ __attribute__ ((cleanup(lr_local_dir_close)))
-#define _cleanup_file_close_ __attribute__ ((cleanup(lr_local_file_close)))
+#define _cleanup_fd_close_ __attribute__ ((cleanup(lr_local_fd_close)))
#define _cleanup_timer_destroy_ __attribute__ ((cleanup(lr_local_destroy_timer)))
#define _cleanup_free_ __attribute__ ((cleanup(lr_local_free)))
#define _cleanup_checksum_free_ __attribute__ ((cleanup(lr_local_checksum_free)))
#define _cleanup_error_free_ __attribute__ ((cleanup(lr_local_free_error)))
#define _cleanup_list_free_ __attribute__ ((cleanup(lr_local_free_list)))
+#define _cleanup_slist_free_ __attribute__ ((cleanup(lr_local_free_slist)))
#define _cleanup_string_free_ __attribute__ ((cleanup(lr_local_free_string)))
#define _cleanup_strv_free_ __attribute__ ((cleanup(lr_local_strfreev)))
#define _cleanup_array_unref_ __attribute__ ((cleanup(lr_local_array_unref)))
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/librepo-1.9.3/librepo/downloader.c new/librepo-1.9.6/librepo/downloader.c
--- old/librepo-1.9.3/librepo/downloader.c 2018-12-12 12:36:43.000000000 +0100
+++ new/librepo-1.9.6/librepo/downloader.c 2019-03-27 12:26:10.000000000 +0100
@@ -414,6 +414,13 @@
if (!target->target->progresscb)
return ret;
+#ifdef WITH_ZCHUNK
+ if (target->target->is_zchunk) {
+ total_to_download = target->target->total_to_download;
+ now_downloaded = now_downloaded + target->target->downloaded;
+ }
+#endif /* WITH_ZCHUNK */
+
ret = target->target->progresscb(target->target->cbdata,
total_to_download,
now_downloaded);
@@ -1106,6 +1113,7 @@
target->target->zck_dl = zck_dl_init(zck);
}
target->target->range = zck_get_range(0, target->target->zck_header_size-1);
+ target->target->total_to_download = target->target->zck_header_size;
target->target->resume = 0;
target->zck_state = LR_ZCK_DL_HEADER;
return lr_zck_clear_header(target, err);
@@ -1174,6 +1182,11 @@
g_slist_free_full(filelist, free);
free(uf);
}
+ target->target->downloaded = target->target->total_to_download;
+ /* Calculate how many bytes need to be downloaded */
+ for(zckChunk *idx = zck_get_first_chunk(zck); idx != NULL; idx = zck_get_next_chunk(idx))
+ if(zck_get_chunk_valid(idx) != 1)
+ target->target->total_to_download += zck_get_chunk_comp_size(idx) + 92; /* Estimate of multipart overhead */
target->zck_state = LR_ZCK_DL_BODY;
return TRUE;
}
@@ -1299,20 +1312,68 @@
}
}
zck_reset_failed_chunks(zck);
+ /* Recalculate how many bytes remain to be downloaded by subtracting from total_to_download */
+ target->target->downloaded = target->target->total_to_download;
+ for(zckChunk *idx = zck_get_first_chunk(zck); idx != NULL; idx = zck_get_next_chunk(idx))
+ if(zck_get_chunk_valid(idx) != 1)
+ target->target->downloaded -= zck_get_chunk_comp_size(idx) + 92;
return prep_zck_body(target, err);
}
#endif /* WITH_ZCHUNK */
-/** Prepares next transfer
+/** Open the file to write to
+ */
+static FILE*
+open_target_file(LrTarget *target, GError **err)
+{
+ int fd;
+ FILE *f;
+
+ if (target->target->fd != -1) {
+ // Use supplied filedescriptor
+ fd = dup(target->target->fd);
+ if (fd == -1) {
+ g_set_error(err, LR_DOWNLOADER_ERROR, LRE_IO,
+ "dup(%d) failed: %s",
+ target->target->fd, g_strerror(errno));
+ return NULL;
+ }
+ } else {
+ // Use supplied filename
+ int open_flags = O_CREAT|O_TRUNC|O_RDWR;
+ if (target->resume || target->target->is_zchunk)
+ open_flags &= ~O_TRUNC;
+
+ fd = open(target->target->fn, open_flags, 0666);
+ if (fd == -1) {
+ g_set_error(err, LR_DOWNLOADER_ERROR, LRE_IO,
+ "Cannot open %s: %s",
+ target->target->fn, g_strerror(errno));
+ return NULL;
+ }
+ }
+
+ f = fdopen(fd, "w+b");
+ if (!f) {
+ close(fd);
+ g_set_error(err, LR_DOWNLOADER_ERROR, LRE_IO,
+ "fdopen(%d) failed: %s",
+ fd, g_strerror(errno));
+ return NULL;
+ }
+
+ return f;
+}
+
+/** Prepare next transfer
*/
static gboolean
prepare_next_transfer(LrDownload *dd, gboolean *candidatefound, GError **err)
{
LrTarget *target;
- char *full_url = NULL;
+ _cleanup_free_ char *full_url = NULL;
LrProtocol protocol = LR_PROTOCOL_OTHER;
gboolean ret;
- GError *tmp_err = NULL;
assert(dd);
assert(!err || *err == NULL);
@@ -1343,8 +1404,9 @@
// Something went wrong
g_set_error(err, LR_DOWNLOADER_ERROR, LRE_CURL,
"curl_easy_duphandle() call failed");
- return FALSE;
+ goto fail;
}
+ target->curl_handle = h;
// Set URL
c_rc = curl_easy_setopt(h, CURLOPT_URL, full_url);
@@ -1352,9 +1414,7 @@
g_set_error(err, LR_DOWNLOADER_ERROR, LRE_CURL,
"curl_easy_setopt(h, CURLOPT_URL, %s) failed: %s",
full_url, curl_easy_strerror(c_rc));
- lr_free(full_url);
- curl_easy_cleanup(h);
- return FALSE;
+ goto fail;
}
// Set error buffer
@@ -1362,83 +1422,49 @@
c_rc = curl_easy_setopt(h, CURLOPT_ERRORBUFFER, target->errorbuffer);
if (c_rc != CURLE_OK) {
g_set_error(err, LR_DOWNLOADER_ERROR, LRE_CURL,
- "curl_easy_setopt(h, CURLOPT_ERRORBUFFER, %s) failed: %s",
- full_url, curl_easy_strerror(c_rc));
- lr_free(full_url);
- curl_easy_cleanup(h);
- return FALSE;
+ "curl_easy_setopt(h, CURLOPT_ERRORBUFFER, target->errorbuffer) failed: %s",
+ curl_easy_strerror(c_rc));
+ goto fail;
}
- lr_free(full_url);
-
// Prepare FILE
- int fd;
-
- if (target->target->fd != -1) {
- // Use supplied filedescriptor
- fd = dup(target->target->fd);
- if (fd == -1) {
- g_set_error(err, LR_DOWNLOADER_ERROR, LRE_IO,
- "dup(%d) failed: %s",
- target->target->fd, g_strerror(errno));
- curl_easy_cleanup(h);
- return FALSE;
- }
- } else {
- // Use supplied filename
- int open_flags = O_CREAT|O_TRUNC|O_RDWR;
- if (target->resume || target->target->is_zchunk)
- open_flags &= ~O_TRUNC;
-
- fd = open(target->target->fn, open_flags, 0666);
- if (fd < 0) {
- g_set_error(err, LR_DOWNLOADER_ERROR, LRE_IO,
- "Cannot open %s: %s",
- target->target->fn, g_strerror(errno));
- curl_easy_cleanup(h);
- return FALSE;
- }
- }
-
- FILE *f = fdopen(fd, "w+b");
- if (!f) {
- g_set_error(err, LR_DOWNLOADER_ERROR, LRE_IO,
- "fdopen(%d) failed: %s",
- fd, g_strerror(errno));
- curl_easy_cleanup(h);
- return FALSE;
- }
-
- target->f = f;
+ target->f = open_target_file(target, err);
+ if (!target->f)
+ goto fail;
target->writecb_recieved = 0;
target->writecb_required_range_written = FALSE;
#ifdef WITH_ZCHUNK
// If file is zchunk, prep it
- if(target->target->is_zchunk && !check_zck(target, &tmp_err)) {
- g_set_error(err, LR_DOWNLOADER_ERROR, LRE_ZCK,
- "Unable to initialize zchunk file %s: %s",
- target->target->path,
- tmp_err->message);
- curl_easy_cleanup(h);
- return FALSE;
- }
+ if(target->target->is_zchunk) {
+ GError *tmp_err = NULL;
- // If zchunk is finished, we're done
- if(target->target->is_zchunk &&
- target->zck_state == LR_ZCK_DL_FINISHED) {
- g_debug("%s: Target already fully downloaded: %s", __func__, target->target->path);
- target->state = LR_DS_FINISHED;
- curl_easy_cleanup(h);
- g_free(target->headercb_interrupt_reason);
- target->headercb_interrupt_reason = NULL;
- fclose(target->f);
- target->f = NULL;
- lr_downloadtarget_set_error(target->target, LRE_OK, NULL);
- return TRUE;
+ if (!check_zck(target, &tmp_err)) {
+ g_set_error(err, LR_DOWNLOADER_ERROR, LRE_ZCK,
+ "Unable to initialize zchunk file %s: %s",
+ target->target->path,
+ tmp_err->message);
+ goto fail;
+ }
+
+ // If zchunk is finished, we're done
+ if(target->zck_state == LR_ZCK_DL_FINISHED) {
+ g_debug("%s: Target already fully downloaded: %s", __func__, target->target->path);
+ target->state = LR_DS_FINISHED;
+ curl_easy_cleanup(target->handle);
+ target->handle = NULL;
+ g_free(target->headercb_interrupt_reason);
+ target->headercb_interrupt_reason = NULL;
+ fclose(target->f);
+ target->f = NULL;
+ lr_downloadtarget_set_error(target->target, LRE_OK, NULL);
+ return TRUE;
+ }
}
# endif /* WITH_ZCHUNK */
+ int fd = fileno(target->f);
+
// Allow resume only for files that were originally being
// downloaded by librepo
if (target->resume && !has_librepo_xattr(fd)) {
@@ -1448,9 +1474,7 @@
if (ftruncate(fd, 0) == -1) {
g_set_error(err, LR_DOWNLOADER_ERROR, LRE_IO,
"ftruncate() failed: %s", g_strerror(errno));
- fclose(f);
- curl_easy_cleanup(h);
- return FALSE;
+ goto fail;
}
}
@@ -1466,8 +1490,8 @@
if (target->original_offset == -1) {
// Determine offset
- fseek(f, 0L, SEEK_END);
- gint64 determined_offset = ftell(f);
+ fseek(target->f, 0L, SEEK_END);
+ gint64 determined_offset = ftell(target->f);
if (determined_offset == -1) {
// An error while determining offset =>
// Download the whole file again
@@ -1536,17 +1560,20 @@
// Fill in headers specified by user in LrHandle via LRO_HTTPHEADER
for (int x=0; target->handle->httpheader[x]; x++) {
headers = curl_slist_append(headers, target->handle->httpheader[x]);
- assert(headers);
+ if (!headers)
+ lr_out_of_memory();
}
}
if (target->target->no_cache) {
// Add headers that tell proxy to serve us fresh data
headers = curl_slist_append(headers, "Cache-Control: no-cache");
headers = curl_slist_append(headers, "Pragma: no-cache");
- assert(headers);
+ if (!headers)
+ lr_out_of_memory();
}
target->curl_rqheaders = headers;
- curl_easy_setopt(h, CURLOPT_HTTPHEADER, headers);
+ c_rc = curl_easy_setopt(h, CURLOPT_HTTPHEADER, headers);
+ assert(c_rc == CURLE_OK);
// Add the new handle to the curl multi handle
CURLMcode cm_rc = curl_multi_add_handle(dd->multi_handle, h);
@@ -1568,13 +1595,23 @@
// Set protocol of the target
target->protocol = protocol;
- // Save curl handle for the current transfer
- target->curl_handle = h;
-
// Add the transfer to the list of running transfers
dd->running_transfers = g_slist_append(dd->running_transfers, target);
return TRUE;
+
+fail:
+ // Cleanup target
+ if (target->curl_handle) {
+ curl_easy_cleanup(target->curl_handle);
+ target->curl_handle = NULL;
+ }
+ if (target->f != NULL) {
+ fclose(target->f);
+ target->f = NULL;
+ }
+
+ return FALSE;
}
static gboolean
@@ -1842,12 +1879,13 @@
static gboolean
-check_finished_trasfer_checksum(int fd,
- GSList *checksums,
- gboolean *checksum_matches,
- GError **transfer_err,
- GError **err)
+check_finished_transfer_checksum(int fd,
+ GSList *checksums,
+ gboolean *checksum_matches,
+ GError **transfer_err,
+ GError **err)
{
+ gboolean ret = TRUE;
gboolean matches = TRUE;
GSList *calculated_chksums = NULL;
@@ -1860,15 +1898,15 @@
continue; // Bad checksum
lseek(fd, 0, SEEK_SET);
- gboolean ret = lr_checksum_fd_compare(chksum->type,
- fd,
- chksum->value,
- 1,
- &matches,
- &calculated,
- err);
+ ret = lr_checksum_fd_compare(chksum->type,
+ fd,
+ chksum->value,
+ 1,
+ &matches,
+ &calculated,
+ err);
if (!ret)
- return FALSE;
+ goto cleanup;
// Store calculated checksum
calculated_chksum = lr_downloadtargetchecksum_new(chksum->type,
@@ -1904,10 +1942,11 @@
"Calculated: %s Expected: %s", calculated, expected);
}
+cleanup:
g_slist_free_full(calculated_chksums,
(GDestroyNotify) lr_downloadtargetchecksum_free);
- return TRUE;
+ return ret;
}
@@ -2067,7 +2106,7 @@
while ((msg = curl_multi_info_read(dd->multi_handle, &msgs_in_queue))) {
LrTarget *target = NULL;
- char *effective_url = NULL;
+ _cleanup_free_ char *effective_url = NULL;
int fd;
gboolean matches = TRUE;
GError *transfer_err = NULL;
@@ -2153,7 +2192,7 @@
}
} else {
#endif /* WITH_ZCHUNK */
- ret = check_finished_trasfer_checksum(fd,
+ ret = check_finished_transfer_checksum(fd,
target->target->checksums,
&matches,
&transfer_err,
@@ -2361,8 +2400,6 @@
effective_url);
}
- lr_free(effective_url);
-
if (fail_fast_error) {
// Interrupt whole downloading
// A fatal error occurred or interrupted by callback
@@ -2381,43 +2418,41 @@
lr_perform(LrDownload *dd, GError **err)
{
CURLMcode cm_rc; // CurlM_ReturnCode
- int still_running;
assert(dd);
assert(!err || *err == NULL);
- do { // Before version 7.20.0 CURLM_CALL_MULTI_PERFORM can appear
+ while (1) {
+
+ int still_running = 0;
+
cm_rc = curl_multi_perform(dd->multi_handle, &still_running);
- } while (cm_rc == CURLM_CALL_MULTI_PERFORM);
- if (cm_rc != CURLM_OK) {
- g_set_error(err, LR_DOWNLOADER_ERROR, LRE_CURLM,
- "curl_multi_perform() error: %s",
- curl_multi_strerror(cm_rc));
- return FALSE;
- }
+ if (lr_interrupt) {
+ // Check interrupt after each call of curl_multi_perform
+ g_set_error(err, LR_DOWNLOADER_ERROR, LRE_INTERRUPTED,
+ "Interrupted by signal");
+ return FALSE;
+ }
- if (lr_interrupt) {
- // Check interrupt after each call of curl_multi_perform
- g_set_error(err, LR_DOWNLOADER_ERROR, LRE_INTERRUPTED,
- "Interrupted by signal");
- return FALSE;
- }
+ if (cm_rc != CURLM_OK) {
+ g_set_error(err, LR_DOWNLOADER_ERROR, LRE_CURLM,
+ "curl_multi_perform() error: %s",
+ curl_multi_strerror(cm_rc));
+ return FALSE;
+ }
- while (dd->running_transfers) {
- int rc;
- int maxfd = -1;
- long curl_timeout = -1;
- struct timeval timeout;
- fd_set fdread, fdwrite, fdexcep;
+ // Check if any handle finished and potentialy add one or more
+ // waiting downloads to the multi_handle.
+ int rc = check_transfer_statuses(dd, err);
+ if (!rc)
+ return FALSE;
- FD_ZERO(&fdread);
- FD_ZERO(&fdwrite);
- FD_ZERO(&fdexcep);
-
- // Set suitable timeout to play around with
- timeout.tv_sec = 1;
- timeout.tv_usec = 0;
+ // Leave if there's nothing to wait for
+ if (!still_running && !dd->running_transfers)
+ break;
+
+ long curl_timeout = -1;
cm_rc = curl_multi_timeout(dd->multi_handle, &curl_timeout);
if (cm_rc != CURLM_OK) {
@@ -2427,82 +2462,22 @@
return FALSE;
}
- if (curl_timeout >= 0) {
- timeout.tv_sec = curl_timeout / 1000;
- if (timeout.tv_sec > 1)
- timeout.tv_sec = 1;
- else
- timeout.tv_usec = (curl_timeout % 1000) * 1000;
- }
- else if (!still_running) {
- // do not sleep for 1s if no more transfers are running
- timeout.tv_sec = 0;
- }
+ if (curl_timeout <= 0) // No wait
+ continue;
- // Get file descriptors from the transfers
- cm_rc = curl_multi_fdset(dd->multi_handle, &fdread, &fdwrite,
- &fdexcep, &maxfd);
+ if (curl_timeout > 1000) // Wait no more than 1s
+ curl_timeout = 1000;
+
+ cm_rc = curl_multi_wait(dd->multi_handle, NULL, 0, curl_timeout, NULL);
if (cm_rc != CURLM_OK) {
g_set_error(err, LR_DOWNLOADER_ERROR, LRE_CURLM,
- "curl_multi_fdset() error: %s",
+ "curl_multi_wait() error: %s",
curl_multi_strerror(cm_rc));
return FALSE;
}
-
- rc = select(maxfd+1, &fdread, &fdwrite, &fdexcep, &timeout);
- if (rc < 0) {
- if (errno == EINTR) {
- g_debug("%s: select() interrupted by signal", __func__);
- //goto retry;
- } else {
- g_set_error(err, LR_DOWNLOADER_ERROR, LRE_SELECT,
- "select() error: %s", g_strerror(errno));
- return FALSE;
- }
- }
-
- // This do-while loop is important. Because if curl_multi_perform sets
- // still_running to 0, we need to check if there are any next
- // transfers available (we need to call check_transfer_statuses).
- // Because if there will be no next transfers available and the
- // curl multi handle is empty (all transfers already
- // finished - this is what still_running == 0 means),
- // then the next iteration of main downloding loop cause a 1sec
- // waiting on the select() call.
- do {
- // Check if any handle finished and potentialy add one or more
- // waiting downloads to the multi_handle.
- rc = check_transfer_statuses(dd, err);
- if (!rc)
- return FALSE;
-
- if (lr_interrupt) {
- g_set_error(err, LR_DOWNLOADER_ERROR, LRE_INTERRUPTED,
- "Interrupted by signal");
- return FALSE;
- }
-
- // Do curl_multi_perform()
- do { // Before version 7.20.0 CURLM_CALL_MULTI_PERFORM can appear
- cm_rc = curl_multi_perform(dd->multi_handle, &still_running);
- if (lr_interrupt) {
- // Check interrupt after each call of curl_multi_perform
- g_set_error(err, LR_DOWNLOADER_ERROR, LRE_INTERRUPTED,
- "Interrupted by signal");
- return FALSE;
- }
- } while (cm_rc == CURLM_CALL_MULTI_PERFORM);
-
- if (cm_rc != CURLM_OK) {
- g_set_error(err, LR_DOWNLOADER_ERROR, LRE_CURLM,
- "curl_multi_perform() error: %s",
- curl_multi_strerror(cm_rc));
- return FALSE;
- }
- } while (still_running == 0 && dd->running_transfers);
}
- return check_transfer_statuses(dd, err);
+ return TRUE;
}
gboolean
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/librepo-1.9.3/librepo/downloadtarget.h new/librepo-1.9.6/librepo/downloadtarget.h
--- old/librepo-1.9.3/librepo/downloadtarget.h 2018-12-12 12:36:43.000000000 +0100
+++ new/librepo-1.9.6/librepo/downloadtarget.h 2019-03-27 12:26:10.000000000 +0100
@@ -152,6 +152,12 @@
gint64 zck_header_size; /*!<
Zchunk header size */
+
+ double total_to_download; /*!<
+ Total to download in zchunk file */
+
+ double downloaded; /*!<
+ Amount already downloaded in zchunk file */
#endif /* WITH_ZCHUNK */
} LrDownloadTarget;
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/librepo-1.9.3/librepo/fastestmirror.c new/librepo-1.9.6/librepo/fastestmirror.c
--- old/librepo-1.9.3/librepo/fastestmirror.c 2018-12-12 12:36:43.000000000 +0100
+++ new/librepo-1.9.6/librepo/fastestmirror.c 2019-03-27 12:26:10.000000000 +0100
@@ -27,6 +27,7 @@
#include
#include "util.h"
+#include "cleanup.h"
#include "handle_internal.h"
#include "rcodes.h"
#include "fastestmirror.h"
@@ -248,7 +249,8 @@
// Gen cache content
GError *tmp_err = NULL;
- gchar *content = g_key_file_to_data(cache->keyfile, NULL, &tmp_err);
+ _cleanup_free_ gchar *content =
+ g_key_file_to_data(cache->keyfile, NULL, &tmp_err);
if (tmp_err) {
g_propagate_error(err, tmp_err);
return FALSE;
@@ -264,7 +266,6 @@
fputs(content, f);
fclose(f);
- g_free(content);
return TRUE;
}
@@ -418,7 +419,7 @@
int still_running;
gdouble elapsed_time = 0.0;
- GTimer *timer = g_timer_new();
+ _cleanup_timer_destroy_ GTimer *timer = g_timer_new();
g_timer_start(timer);
do {
@@ -465,6 +466,7 @@
g_set_error(err, LR_FASTESTMIRROR_ERROR, LRE_CURLM,
"curl_multi_fdset() error: %s",
curl_multi_strerror(cm_rc));
+ curl_multi_cleanup(multihandle);
return FALSE;
}
@@ -475,6 +477,7 @@
} else {
g_set_error(err, LR_FASTESTMIRROR_ERROR, LRE_SELECT,
"select() error: %s", g_strerror(errno));
+ curl_multi_cleanup(multihandle);
return FALSE;
}
}
@@ -486,8 +489,6 @@
} while(still_running && elapsed_time < length_of_measurement);
- g_timer_destroy(timer);
-
// Remove curl easy handles from multi handle
// and calculate plain_connect_time
for (GSList *elem = list; elem; elem = g_slist_next(elem)) {
@@ -723,7 +724,7 @@
if (!handles)
return TRUE;
- GTimer *timer = g_timer_new();
+ _cleanup_timer_destroy_ GTimer *timer = g_timer_new();
g_timer_start(timer);
LrHandle *main_handle = handles->data; // Network configuration for the
@@ -732,10 +733,11 @@
// Prepare list of hosts
gchar *fastestmirrorcache = main_handle->fastestmirrorcache;
- GHashTable *hosts_ht = g_hash_table_new_full(g_str_hash,
- g_str_equal,
- g_free,
- NULL);
+ _cleanup_hashtable_unref_ GHashTable *hosts_ht =
+ g_hash_table_new_full(g_str_hash,
+ g_str_equal,
+ g_free,
+ NULL);
for (GSList *ehandle = handles; ehandle; ehandle = g_slist_next(ehandle)) {
LrHandle *handle = ehandle->data;
@@ -761,20 +763,16 @@
}
}
- GList *tmp_list_of_urls = g_hash_table_get_keys(hosts_ht);
- GSList *list_of_urls = NULL;
+ _cleanup_list_free_ GList *tmp_list_of_urls = g_hash_table_get_keys(hosts_ht);
+ _cleanup_slist_free_ GSList *list_of_urls = NULL;
int number_of_mirrors = 0;
for (GList *elem = tmp_list_of_urls; elem; elem = g_list_next(elem)) {
list_of_urls = g_slist_prepend(list_of_urls, elem->data);
number_of_mirrors++;
}
- g_list_free(tmp_list_of_urls);
if (number_of_mirrors <= 1) {
// Nothing to do
- g_slist_free(list_of_urls);
- g_hash_table_destroy(hosts_ht);
- g_timer_destroy(timer);
return TRUE;
}
@@ -784,9 +782,6 @@
err);
if (!ret) {
g_debug("%s: lr_fastestmirror failed", __func__);
- g_slist_free(list_of_urls);
- g_hash_table_destroy(hosts_ht);
- g_timer_destroy(timer);
return FALSE;
}
@@ -825,12 +820,8 @@
handle->internal_mirrorlist = g_slist_reverse(new_list);
}
- g_slist_free(list_of_urls);
- g_hash_table_destroy(hosts_ht);
-
g_timer_stop(timer);
g_debug("%s: Duration: %f", __func__, g_timer_elapsed(timer, NULL));
- g_timer_destroy(timer);
return TRUE;
}
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/librepo-1.9.3/librepo/handle.c new/librepo-1.9.6/librepo/handle.c
--- old/librepo-1.9.3/librepo/handle.c 2018-12-12 12:36:43.000000000 +0100
+++ new/librepo-1.9.6/librepo/handle.c 2019-03-27 12:26:10.000000000 +0100
@@ -796,7 +796,9 @@
return TRUE;
} else if (localpath && !handle->mirrorlisturl) {
// Just try to use mirrorlist of the local repository
- gchar *path = lr_pathconcat(localpath, "mirrorlist", NULL);
+ _cleanup_free_ gchar *path;
+ path = lr_pathconcat(localpath, "mirrorlist", NULL);
+
if (g_file_test(path, G_FILE_TEST_IS_REGULAR)) {
g_debug("%s: Local mirrorlist found at %s", __func__, path);
fd = open(path, O_RDONLY);
@@ -804,13 +806,10 @@
g_set_error(err, LR_HANDLE_ERROR, LRE_IO,
"Cannot open %s: %s",
path, g_strerror(errno));
- g_free(path);
return FALSE;
}
- g_free(path);
} else {
// No local mirrorlist
- g_free(path);
return TRUE;
}
} else if (!handle->mirrorlisturl) {
@@ -912,7 +911,9 @@
return TRUE;
} else if (localpath && !handle->metalinkurl) {
// Just try to use metalink of the local repository
- gchar *path = lr_pathconcat(localpath, "metalink.xml", NULL);
+ _cleanup_free_ gchar *path;
+ path = lr_pathconcat(localpath, "metalink.xml", NULL);
+
if (g_file_test(path, G_FILE_TEST_IS_REGULAR)) {
g_debug("%s: Local metalink.xml found at %s", __func__, path);
fd = open(path, O_RDONLY);
@@ -920,13 +921,10 @@
g_set_error(err, LR_HANDLE_ERROR, LRE_IO,
"Cannot open %s: %s",
path, g_strerror(errno));
- g_free(path);
return FALSE;
}
- g_free(path);
} else {
// No local metalink
- g_free(path);
return TRUE;
}
} else if (!handle->metalinkurl) {
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/librepo-1.9.3/librepo/package_downloader.c new/librepo-1.9.6/librepo/package_downloader.c
--- old/librepo-1.9.3/librepo/package_downloader.c 2018-12-12 12:36:43.000000000 +0100
+++ new/librepo-1.9.6/librepo/package_downloader.c 2019-03-27 12:26:10.000000000 +0100
@@ -30,6 +30,7 @@
#include
#include "types.h"
+#include "cleanup.h"
#include "util.h"
#include "package_downloader.h"
#include "handle_internal.h"
@@ -241,7 +242,7 @@
// Prepare targets
for (GSList *elem = targets; elem; elem = g_slist_next(elem)) {
- gchar *local_path;
+ _cleanup_free_ gchar *local_path = NULL;
LrPackageTarget *packagetarget = elem->data;
LrDownloadTarget *downloadtarget;
gint64 realsize = -1;
@@ -254,11 +255,12 @@
if (packagetarget->dest) {
if (g_file_test(packagetarget->dest, G_FILE_TEST_IS_DIR)) {
// Dir specified
- gchar *file_basename = g_path_get_basename(packagetarget->relative_url);
+ _cleanup_free_ gchar *file_basename;
+ file_basename = g_path_get_basename(packagetarget->relative_url);
+
local_path = g_build_filename(packagetarget->dest,
file_basename,
NULL);
- g_free(file_basename);
} else {
local_path = g_strdup(packagetarget->dest);
}
@@ -269,7 +271,6 @@
packagetarget->local_path = g_string_chunk_insert(packagetarget->chunk,
local_path);
- g_free(local_path);
// Check expected size and real size if the file exists
if (doresume
@@ -437,7 +438,7 @@
if (err && *err != NULL)
g_clear_error(err);
g_set_error(err, LR_PACKAGE_DOWNLOADER_ERROR, LRE_INTERRUPTED,
- "Insterupted by a SIGINT signal");
+ "Interrupted by a SIGINT signal");
return FALSE;
}
}
@@ -534,18 +535,19 @@
}
for (GSList *elem = targets; elem; elem = g_slist_next(elem)) {
- gchar *local_path;
+ _cleanup_free_ gchar *local_path = NULL;
LrPackageTarget *packagetarget = elem->data;
// Prepare destination filename
if (packagetarget->dest) {
if (g_file_test(packagetarget->dest, G_FILE_TEST_IS_DIR)) {
// Dir specified
- gchar *file_basename = g_path_get_basename(packagetarget->relative_url);
+ _cleanup_free_ gchar *file_basename;
+ file_basename = g_path_get_basename(packagetarget->relative_url);
+
local_path = g_build_filename(packagetarget->dest,
file_basename,
NULL);
- g_free(file_basename);
} else {
local_path = g_strdup(packagetarget->dest);
}
@@ -620,7 +622,7 @@
if (err && *err != NULL)
g_clear_error(err);
g_set_error(err, LR_PACKAGE_DOWNLOADER_ERROR, LRE_INTERRUPTED,
- "Insterupted by a SIGINT signal");
+ "Interrupted by a SIGINT signal");
return FALSE;
}
}
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/librepo-1.9.3/librepo/yum.c new/librepo-1.9.6/librepo/yum.c
--- old/librepo-1.9.3/librepo/yum.c 2018-12-12 12:36:43.000000000 +0100
+++ new/librepo-1.9.6/librepo/yum.c 2019-03-27 12:26:10.000000000 +0100
@@ -1103,7 +1103,7 @@
GError *tmp_err = NULL;
_cleanup_free_ gchar *path = NULL;
_cleanup_free_ gchar *sig = NULL;
- _cleanup_file_close_ int fd = -1;
+ _cleanup_fd_close_ int fd = -1;
if (handle->mirrorlist_fd != -1) {
// Locate mirrorlist if available.
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/librepo-1.9.3/librepo.spec new/librepo-1.9.6/librepo.spec
--- old/librepo-1.9.3/librepo.spec 2018-12-12 12:36:43.000000000 +0100
+++ new/librepo-1.9.6/librepo.spec 2019-03-27 12:26:10.000000000 +0100
@@ -1,11 +1,13 @@
+%global libcurl_version 7.28.0
+
%if 0%{?rhel} && 0%{?rhel} <= 7
# Do not build bindings for python3 for RHEL <= 7
%bcond_with python3
# python-flask is not in RHEL7
-%bcond_with tests
+%bcond_with pythontests
%else
%bcond_without python3
-%bcond_without tests
+%bcond_without pythontests
%endif
%if 0%{?rhel} > 7 || 0%{?fedora} > 29
@@ -24,7 +26,7 @@
%global dnf_conflict 2.8.8
Name: librepo
-Version: 1.9.3
+Version: 1.9.6
Release: 1%{?dist}
Summary: Repodata downloading library
@@ -39,13 +41,14 @@
BuildRequires: pkgconfig(glib-2.0)
BuildRequires: gpgme-devel
BuildRequires: libattr-devel
-BuildRequires: libcurl-devel >= 7.19.0
+BuildRequires: libcurl-devel >= %{libcurl_version}
BuildRequires: pkgconfig(libxml-2.0)
BuildRequires: pkgconfig(libcrypto)
BuildRequires: pkgconfig(openssl)
%if %{with zchunk}
BuildRequires: pkgconfig(zck) >= 0.9.11
%endif
+Requires: libcurl%{?_isa} >= %{libcurl_version}
%description
A library providing C and Python (libcURL like) API to downloading repository
@@ -64,21 +67,21 @@
%{?python_provide:%python_provide python2-%{name}}
%if 0%{?rhel} && 0%{?rhel} <= 7
BuildRequires: python-sphinx
-BuildRequires: pygpgme
%else
BuildRequires: python2-sphinx
-BuildRequires: python2-gpg
%endif
BuildRequires: python2-devel
-%if (0%{?rhel} && 0%{?rhel} <= 7) || (0%{?fedora} && 0%{?fedora} <= 27)
+%if %{with pythontests}
+BuildRequires: python2-flask
+BuildRequires: python2-nose
+%if (0%{?rhel} && 0%{?rhel} <= 7)
BuildRequires: pyxattr
+BuildRequires: pygpgme
%else
BuildRequires: python2-pyxattr
+BuildRequires: python2-gpg
%endif
-%if %{with tests}
-BuildRequires: python2-flask
-BuildRequires: python2-nose
-%endif
+%endif # with pythontests
Requires: %{name}%{?_isa} = %{version}-%{release}
Conflicts: python2-dnf < %{dnf_conflict}
@@ -90,14 +93,14 @@
%package -n python3-%{name}
Summary: Python 3 bindings for the librepo library
%{?python_provide:%python_provide python3-%{name}}
-BuildRequires: python3-gpg
BuildRequires: python3-devel
-%if %{with tests}
+%if %{with pythontests}
+BuildRequires: python3-gpg
BuildRequires: python3-flask
BuildRequires: python3-nose
+BuildRequires: python3-pyxattr
%endif
BuildRequires: python3-sphinx
-BuildRequires: python3-pyxattr
Requires: %{name}%{?_isa} = %{version}-%{release}
# Obsoletes Fedora 27 package
Obsoletes: platform-python-%{name} < %{version}-%{release}
@@ -116,19 +119,18 @@
%build
%if %{with python2}
pushd build-py2
- %cmake -DPYTHON_DESIRED:FILEPATH=%{__python2} %{!?with_zchunk:-DWITH_ZCHUNK=OFF} ..
+ %cmake -DPYTHON_DESIRED:FILEPATH=%{__python2} %{!?with_zchunk:-DWITH_ZCHUNK=OFF} -DENABLE_PYTHON_TESTS=%{?with_pythontests:ON}%{!?with_pythontests:OFF} ..
%make_build
popd
%endif
%if %{with python3}
pushd build-py3
- %cmake -DPYTHON_DESIRED:FILEPATH=%{__python3} %{!?with_zchunk:-DWITH_ZCHUNK=OFF} ..
+ %cmake -DPYTHON_DESIRED:FILEPATH=%{__python3} %{!?with_zchunk:-DWITH_ZCHUNK=OFF} -DENABLE_PYTHON_TESTS=%{?with_pythontests:ON}%{!?with_pythontests:OFF} ..
%make_build
popd
%endif
-%if %{with tests}
%check
%if %{with python2}
pushd build-py2
@@ -143,7 +145,6 @@
make ARGS="-V" test
popd
%endif
-%endif
%install
%if %{with python2}
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/librepo-1.9.3/tests/CMakeLists.txt new/librepo-1.9.6/tests/CMakeLists.txt
--- old/librepo-1.9.3/tests/CMakeLists.txt 2018-12-12 12:36:43.000000000 +0100
+++ new/librepo-1.9.6/tests/CMakeLists.txt 2019-03-27 12:26:10.000000000 +0100
@@ -30,4 +30,6 @@
ADD_TEST(test_main run_tests.sh)
-ADD_SUBDIRECTORY (python)
+IF (ENABLE_PYTHON_TESTS)
+ ADD_SUBDIRECTORY (python)
+ENDIF()
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/librepo-1.9.3/tests/python/tests/test_yum_package_downloading.py new/librepo-1.9.6/tests/python/tests/test_yum_package_downloading.py
--- old/librepo-1.9.3/tests/python/tests/test_yum_package_downloading.py 2018-12-12 12:36:43.000000000 +0100
+++ new/librepo-1.9.6/tests/python/tests/test_yum_package_downloading.py 2019-03-27 12:26:10.000000000 +0100
@@ -718,11 +718,6 @@
self.assertFalse(os.path.isfile(pkgs[1].local_path))
def test_download_packages_with_resume_02(self):
- return # This test causes issues on Fedora rawhide (F26)
- # Flask/werkzeug/SocketServer/socket fails on Broken pipe
- # and causes next test (test_download_packages_without_handle)
- # to always fail
-
# If download that should be resumed fails,
# the original file should not be modified or deleted
h = librepo.Handle()
@@ -733,25 +728,14 @@
fn = os.path.join(self.tmpdir, "package.rpm")
- # Download first 10 bytes of the package
- pkgs = []
- pkgs.append(librepo.PackageTarget(config.PACKAGE_01_01,
- handle=h,
- dest=fn,
- resume=False,
- byterangeend=9))
-
- librepo.download_packages(pkgs)
- pkg = pkgs[0]
- self.assertTrue(pkg.err is None)
- self.assertTrue(os.path.isfile(pkg.local_path))
- self.assertEqual(os.path.getsize(pkg.local_path), 10)
- fchksum = hashlib.md5(open(pkg.local_path, "rb").read()).hexdigest()
+ # Make fake unfinished download
+ CONTENT = "0123456789"
+ open(fn, "w").write(CONTENT)
# Mark the file as it was downloaded by Librepo
# Otherwise librepo refuse to resume
try:
- xattr.setxattr(pkg.local_path,
+ xattr.setxattr(fn,
"user.Librepo.DownloadInProgress".encode("utf-8"),
"".encode("utf-8"))
except IOError as err:
@@ -776,9 +760,7 @@
pkg = pkgs[0]
self.assertTrue(pkg.err)
self.assertTrue(os.path.isfile(pkg.local_path))
- self.assertEqual(os.path.getsize(pkg.local_path), 10)
- fchksum_new = hashlib.md5(open(pkg.local_path, "rb").read()).hexdigest()
- self.assertEqual(fchksum, fchksum_new)
+ self.assertEqual(open(pkg.local_path, "rb").read(), CONTENT.encode('utf-8'))
def test_download_packages_mirror_penalization_01(self):
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/librepo-1.9.3/tests/test_repoconf.c new/librepo-1.9.6/tests/test_repoconf.c
--- old/librepo-1.9.3/tests/test_repoconf.c 2018-12-12 12:36:43.000000000 +0100
+++ new/librepo-1.9.6/tests/test_repoconf.c 2019-03-27 12:26:10.000000000 +0100
@@ -509,7 +509,7 @@
START_TEST(test_write_repoconf)
{
- _cleanup_file_close_ int rc = -1;
+ _cleanup_fd_close_ int rc = -1;
gboolean ret;
LrYumRepoConfs *confs;
LrYumRepoConf * conf;
++++++ librepo-PR148-clean-up-curl-target_handle.patch ++++++
From e7d7a50ef8a6c2ac1c3492a15fffaab68546a49f Mon Sep 17 00:00:00 2001
From: Jonathan Dieter
Date: Sun, 31 Mar 2019 18:04:32 +0100
Subject: [PATCH] Clean up target->curl_handle rather than target->handle
Cleaning up target->handle causes a segfault when this codepath is run, so
this patch fixes it so we clean up target->curl_handle
Signed-off-by: Jonathan Dieter
---
librepo/downloader.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/librepo/downloader.c b/librepo/downloader.c
index d7e2d70..9650f07 100644
--- a/librepo/downloader.c
+++ b/librepo/downloader.c
@@ -1451,8 +1451,8 @@ prepare_next_transfer(LrDownload *dd, gboolean *candidatefound, GError **err)
if(target->zck_state == LR_ZCK_DL_FINISHED) {
g_debug("%s: Target already fully downloaded: %s", __func__, target->target->path);
target->state = LR_DS_FINISHED;
- curl_easy_cleanup(target->handle);
- target->handle = NULL;
+ curl_easy_cleanup(target->curl_handle);
+ target->curl_handle = NULL;
g_free(target->headercb_interrupt_reason);
target->headercb_interrupt_reason = NULL;
fclose(target->f);