Extend FileArchive to import archive entries individually

This is needed primarily for a workaround for #7971. When importing a template
that has been exported on Ardour5 on MacOS we need to fix the paths of the
archive entries.

Later we can use this functionality also to handle imported templates if
templates with the same name already exist.

This commit only adds methods and members to FileArchive, it does not modify
anything to make regressions unlikely. This, however, leads to some duplicated
code. Eventually we should consolidate this a bit.
This commit is contained in:
Johannes Mueller 2020-04-05 15:37:04 +02:00
parent 419af82645
commit c74cc2675e
2 changed files with 104 additions and 0 deletions

View File

@ -153,6 +153,8 @@ setup_archive ()
FileArchive::FileArchive (const std::string& url)
: _req (url)
, _current_entry (0)
, _archive (0)
{
if (!_req.url) {
fprintf (stderr, "Invalid Archive URL/filename\n");
@ -166,6 +168,14 @@ FileArchive::FileArchive (const std::string& url)
}
}
FileArchive::~FileArchive ()
{
if (_archive) {
archive_read_close (_archive);
archive_read_free (_archive);
}
}
int
FileArchive::inflate (const std::string& destdir)
{
@ -197,6 +207,73 @@ FileArchive::contents ()
}
}
std::string
FileArchive::next_file_name ()
{
assert (!_req.is_remote () && "FileArchive: Iterating over archive files not supported for remote archives.\n");
if (!_archive) {
_archive = setup_file_archive();
if (!_archive) {
return std::string();
}
}
int r = archive_read_next_header (_archive, &_current_entry);
if (!_req.mp.progress) {
// file i/o -- not URL
const uint64_t read = archive_filter_bytes (_archive, -1);
progress (read, _req.mp.length);
}
if (r == ARCHIVE_EOF) {
goto no_next;
}
if (r != ARCHIVE_OK) {
fprintf (stderr, "Error reading archive: %s\n", archive_error_string(_archive));
goto no_next;
}
return archive_entry_pathname (_current_entry);
no_next:
_current_entry = 0;
return std::string();
}
int
FileArchive::extract_current_file (const std::string& destpath)
{
if (!_archive || !_current_entry) {
return 0;
}
int flags = ARCHIVE_EXTRACT_TIME;
struct archive *ext;
ext = archive_write_disk_new();
archive_write_disk_set_options(ext, flags);
archive_entry_set_pathname(_current_entry, destpath.c_str());
int r = archive_write_header(ext, _current_entry);
_current_entry = 0;
if (r != ARCHIVE_OK) {
fprintf (stderr, "Error reading archive: %s\n", archive_error_string(_archive));
return -1;
}
ar_copy_data (_archive, ext);
r = archive_write_finish_entry (ext);
if (r != ARCHIVE_OK) {
fprintf (stderr, "Error reading archive: %s\n", archive_error_string(_archive));
return -1;
}
return 0;
}
std::vector<std::string>
FileArchive::contents_file ()
{
@ -456,3 +533,21 @@ FileArchive::create (const std::map<std::string, std::string>& filemap, Compress
return 0;
}
struct archive*
FileArchive::setup_file_archive ()
{
struct archive* a = setup_archive ();
GStatBuf statbuf;
if (!g_stat (_req.url, &statbuf)) {
_req.mp.length = statbuf.st_size;
} else {
_req.mp.length = -1;
}
if (ARCHIVE_OK != archive_read_open_filename (a, _req.url, 8192)) {
fprintf (stderr, "Error opening archive: %s\n", archive_error_string(a));
return 0;
}
return a;
}

View File

@ -33,10 +33,14 @@ class LIBPBD_API FileArchive
{
public:
FileArchive (const std::string& url);
~FileArchive ();
int inflate (const std::string& destdir);
std::vector<std::string> contents ();
std::string next_file_name ();
int extract_current_file (const std::string& destpath);
/* these are mapped to libarchive's lzmaz
* compression level 0..9
*/
@ -147,8 +151,13 @@ class LIBPBD_API FileArchive
bool is_url ();
struct archive* setup_file_archive ();
Request _req;
pthread_t _tid;
struct archive_entry* _current_entry;
struct archive* _archive;
};
} /* namespace */