diff --git a/CHANGELOG.md b/CHANGELOG.md index bec6c1508e1..d9e9a1e2944 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -17,6 +17,9 @@ Note that this project **does not** adhere to [Semantic Versioning](http://semve ### Changed - We improved the Latex2Unicode conversion [#8639](https://github.com/JabRef/jabref/pull/8639) +- Writing BibTeX data into a PDF (XMP) removes braces. [#8452](https://github.com/JabRef/jabref/issues/8452) +- Writing BibTeX data into a PDF (XMP) does not write the `file` field. +- Writing BibTeX data into a PDF (XMP) considers the configured keyword separator (and does not use "," as default any more) ### Fixed diff --git a/src/main/java/org/jabref/gui/exporter/WriteMetadataToPdfAction.java b/src/main/java/org/jabref/gui/exporter/WriteMetadataToPdfAction.java index 41d31ce292e..34d39b2c8d4 100644 --- a/src/main/java/org/jabref/gui/exporter/WriteMetadataToPdfAction.java +++ b/src/main/java/org/jabref/gui/exporter/WriteMetadataToPdfAction.java @@ -5,7 +5,6 @@ import java.util.Collection; import java.util.List; import java.util.Optional; -import java.util.stream.Collectors; import javafx.application.Platform; import javafx.geometry.Insets; @@ -129,8 +128,8 @@ private void writeMetadata() { .map(file -> file.findIn(stateManager.getActiveDatabase().get(), filePreferences)) .filter(Optional::isPresent) .map(Optional::get) - .filter(path -> FileUtil.isPDFFile(path)) - .collect(Collectors.toList()); + .filter(FileUtil::isPDFFile) + .toList(); Platform.runLater(() -> optionsDialog.getProgressArea() .appendText(entry.getCitationKey().orElse(Localization.lang("undefined")) + "\n")); @@ -144,13 +143,14 @@ private void writeMetadata() { if (Files.exists(file)) { try { writeMetadataToFile(file, entry, stateManager.getActiveDatabase().get(), database); - Platform.runLater( - () -> optionsDialog.getProgressArea().appendText(" " + Localization.lang("OK") + ".\n")); + Platform.runLater(() -> + optionsDialog.getProgressArea() + .appendText(" " + Localization.lang("OK") + ".\n")); entriesChanged++; } catch (Exception e) { Platform.runLater(() -> { - optionsDialog.getProgressArea().appendText(" " + Localization.lang("Error while writing") + " '" - + file.toString() + "':\n"); + optionsDialog.getProgressArea() + .appendText(" " + Localization.lang("Error while writing") + " '" + file + "':\n"); optionsDialog.getProgressArea().appendText(" " + e.getLocalizedMessage() + "\n"); }); errors++; @@ -160,23 +160,24 @@ private void writeMetadata() { Platform.runLater(() -> { optionsDialog.getProgressArea() .appendText(" " + Localization.lang("Skipped - PDF does not exist") + ":\n"); - optionsDialog.getProgressArea().appendText(" " + file.toString() + "\n"); + optionsDialog.getProgressArea() + .appendText(" " + file + "\n"); }); } } } if (optionsDialog.isCanceled()) { - Platform.runLater( - () -> optionsDialog.getProgressArea().appendText("\n" + Localization.lang("Operation canceled.") + "\n")); + Platform.runLater(() -> + optionsDialog.getProgressArea().appendText("\n" + Localization.lang("Operation canceled.") + "\n")); break; } } Platform.runLater(() -> { optionsDialog.getProgressArea() .appendText("\n" - + Localization.lang("Finished writing metadata for %0 file (%1 skipped, %2 errors).", String - .valueOf(entriesChanged), String.valueOf(skipped), String.valueOf(errors))); + + Localization.lang("Finished writing metadata for %0 file (%1 skipped, %2 errors).", + String.valueOf(entriesChanged), String.valueOf(skipped), String.valueOf(errors))); optionsDialog.done(); }); @@ -192,7 +193,7 @@ private void writeMetadata() { * This writes both XMP data and embeds a corresponding .bib file */ synchronized private void writeMetadataToFile(Path file, BibEntry entry, BibDatabaseContext databaseContext, BibDatabase database) throws Exception { - XmpUtilWriter.writeXmp(file, entry, database, xmpPreferences); + new XmpUtilWriter(xmpPreferences).writeXmp(file, entry, database); EmbeddedBibFilePdfExporter embeddedBibExporter = new EmbeddedBibFilePdfExporter(databaseContext.getMode(), entryTypesManager, fieldWriterPreferences); embeddedBibExporter.exportToFileByPath(databaseContext, database, filePreferences, file); diff --git a/src/main/java/org/jabref/gui/fieldeditors/LinkedFileViewModel.java b/src/main/java/org/jabref/gui/fieldeditors/LinkedFileViewModel.java index ba5b41f394f..c2db24cd29f 100644 --- a/src/main/java/org/jabref/gui/fieldeditors/LinkedFileViewModel.java +++ b/src/main/java/org/jabref/gui/fieldeditors/LinkedFileViewModel.java @@ -426,7 +426,7 @@ public void writeMetadataToPdf() { synchronized (linkedFile) { try { // Similar code can be found at {@link org.jabref.gui.exporter.WriteMetadataToPdfAction.writeMetadataToFile} - XmpUtilWriter.writeXmp(file.get(), entry, databaseContext.getDatabase(), preferences.getXmpPreferences()); + new XmpUtilWriter(preferences.getXmpPreferences()).writeXmp(file.get(), entry, databaseContext.getDatabase()); EmbeddedBibFilePdfExporter embeddedBibExporter = new EmbeddedBibFilePdfExporter(databaseContext.getMode(), Globals.entryTypesManager, preferences.getFieldWriterPreferences()); embeddedBibExporter.exportToFileByPath(databaseContext, databaseContext.getDatabase(), preferences.getFilePreferences(), file.get()); @@ -462,7 +462,7 @@ public void download() { BackgroundTask downloadTask = prepareDownloadTask(targetDirectory.get(), urlDownload); downloadTask.onSuccess(destination -> { - boolean isDuplicate = false; + boolean isDuplicate; try { isDuplicate = FileNameUniqueness.isDuplicatedFile(targetDirectory.get(), destination.getFileName(), dialogService); } catch (IOException e) { diff --git a/src/main/java/org/jabref/logic/bibtex/comparator/EntryComparator.java b/src/main/java/org/jabref/logic/bibtex/comparator/EntryComparator.java index 3892d220251..7e11b1d8de0 100644 --- a/src/main/java/org/jabref/logic/bibtex/comparator/EntryComparator.java +++ b/src/main/java/org/jabref/logic/bibtex/comparator/EntryComparator.java @@ -66,7 +66,7 @@ public int compare(BibEntry e1, BibEntry e2) { } } - // If the field is author or editor, we rearrange names so they are + // If the field is author or editor, we rearrange names to achieve that they are // sorted according to last name. if (sortField.getProperties().contains(FieldProperty.PERSON_NAMES)) { if (f1 != null) { diff --git a/src/main/java/org/jabref/logic/exporter/XmpExporter.java b/src/main/java/org/jabref/logic/exporter/XmpExporter.java index 49dce5cbd3b..23730852ee0 100644 --- a/src/main/java/org/jabref/logic/exporter/XmpExporter.java +++ b/src/main/java/org/jabref/logic/exporter/XmpExporter.java @@ -1,8 +1,6 @@ package org.jabref.logic.exporter; -import java.io.BufferedWriter; import java.io.IOException; -import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Path; import java.util.Collections; @@ -57,7 +55,7 @@ public void export(BibDatabaseContext databaseContext, Path file, List if (file.getParent() == null) { entryFile = Path.of(suffix); } else { - entryFile = Path.of(file.getParent().toString() + "/" + suffix); + entryFile = Path.of(file.getParent() + "/" + suffix); } this.writeBibToXmp(entryFile, Collections.singletonList(entry)); } @@ -67,10 +65,7 @@ public void export(BibDatabaseContext databaseContext, Path file, List } private void writeBibToXmp(Path file, List entries) throws IOException { - String xmpContent = XmpUtilWriter.generateXmpStringWithoutXmpDeclaration(entries, this.xmpPreferences); - try (BufferedWriter writer = Files.newBufferedWriter(file, StandardCharsets.UTF_8)) { - writer.write(xmpContent); - writer.flush(); - } + String xmpContent = new XmpUtilWriter(this.xmpPreferences).generateXmpStringWithoutXmpDeclaration(entries); + Files.writeString(file, xmpContent); } } diff --git a/src/main/java/org/jabref/logic/exporter/XmpPdfExporter.java b/src/main/java/org/jabref/logic/exporter/XmpPdfExporter.java index 397ee70f1d8..f6ece8646c2 100644 --- a/src/main/java/org/jabref/logic/exporter/XmpPdfExporter.java +++ b/src/main/java/org/jabref/logic/exporter/XmpPdfExporter.java @@ -27,7 +27,7 @@ public void export(BibDatabaseContext databaseContext, Path pdfFile, List result = new ArrayList<>(1); - try (PDDocument document = XmpUtilReader.loadWithAutomaticDecryption(filePath)) { + try (PDDocument document = new XmpUtilReader().loadWithAutomaticDecryption(filePath)) { String firstPageContents = getFirstPageContents(document); Optional entry = getEntryFromPDFContent(firstPageContents, OS.NEWLINE); entry.ifPresent(result::add); diff --git a/src/main/java/org/jabref/logic/importer/fileformat/PdfEmbeddedBibFileImporter.java b/src/main/java/org/jabref/logic/importer/fileformat/PdfEmbeddedBibFileImporter.java index e1706187f43..73eb86a0789 100644 --- a/src/main/java/org/jabref/logic/importer/fileformat/PdfEmbeddedBibFileImporter.java +++ b/src/main/java/org/jabref/logic/importer/fileformat/PdfEmbeddedBibFileImporter.java @@ -64,7 +64,7 @@ public ParserResult importDatabase(String data) throws IOException { @Override public ParserResult importDatabase(Path filePath) { - try (PDDocument document = XmpUtilReader.loadWithAutomaticDecryption(filePath)) { + try (PDDocument document = new XmpUtilReader().loadWithAutomaticDecryption(filePath)) { return new ParserResult(getEmbeddedBibFileEntries(document)); } catch (EncryptedPdfsNotSupportedException e) { return ParserResult.fromErrorMessage(Localization.lang("Decryption not supported.")); diff --git a/src/main/java/org/jabref/logic/importer/fileformat/PdfVerbatimBibTextImporter.java b/src/main/java/org/jabref/logic/importer/fileformat/PdfVerbatimBibTextImporter.java index 76fa09af9bd..aa16875ecf2 100644 --- a/src/main/java/org/jabref/logic/importer/fileformat/PdfVerbatimBibTextImporter.java +++ b/src/main/java/org/jabref/logic/importer/fileformat/PdfVerbatimBibTextImporter.java @@ -56,7 +56,7 @@ public ParserResult importDatabase(String data) throws IOException { @Override public ParserResult importDatabase(Path filePath) { List result = new ArrayList<>(1); - try (PDDocument document = XmpUtilReader.loadWithAutomaticDecryption(filePath)) { + try (PDDocument document = new XmpUtilReader().loadWithAutomaticDecryption(filePath)) { String firstPageContents = getFirstPageContents(document); BibtexParser parser = new BibtexParser(importFormatPreferences, new DummyFileUpdateMonitor()); result = parser.parseEntries(firstPageContents); diff --git a/src/main/java/org/jabref/logic/importer/fileformat/PdfXmpImporter.java b/src/main/java/org/jabref/logic/importer/fileformat/PdfXmpImporter.java index 470d6756fc8..0b8fe47c3dc 100644 --- a/src/main/java/org/jabref/logic/importer/fileformat/PdfXmpImporter.java +++ b/src/main/java/org/jabref/logic/importer/fileformat/PdfXmpImporter.java @@ -54,7 +54,7 @@ public ParserResult importDatabase(String data) throws IOException { public ParserResult importDatabase(Path filePath) { Objects.requireNonNull(filePath); try { - return new ParserResult(XmpUtilReader.readXmp(filePath, xmpPreferences)); + return new ParserResult(new XmpUtilReader().readXmp(filePath, xmpPreferences)); } catch (IOException exception) { return ParserResult.fromError(exception); } diff --git a/src/main/java/org/jabref/logic/xmp/DublinCoreExtractor.java b/src/main/java/org/jabref/logic/xmp/DublinCoreExtractor.java index 06fa0081c05..dc70cc048b9 100644 --- a/src/main/java/org/jabref/logic/xmp/DublinCoreExtractor.java +++ b/src/main/java/org/jabref/logic/xmp/DublinCoreExtractor.java @@ -3,14 +3,15 @@ import java.util.Arrays; import java.util.Comparator; import java.util.List; -import java.util.Map.Entry; import java.util.Optional; -import java.util.Set; +import java.util.SortedSet; import java.util.TreeSet; import java.util.function.Consumer; import java.util.function.Predicate; import org.jabref.logic.TypedBibEntry; +import org.jabref.logic.formatter.casechanger.UnprotectTermsFormatter; +import org.jabref.model.database.BibDatabase; import org.jabref.model.database.BibDatabaseMode; import org.jabref.model.entry.Author; import org.jabref.model.entry.AuthorList; @@ -24,11 +25,15 @@ import org.jabref.model.entry.types.EntryTypeFactory; import org.jabref.model.strings.StringUtil; +import org.apache.pdfbox.pdmodel.PDDocument; import org.apache.xmpbox.schema.DublinCoreSchema; import org.apache.xmpbox.type.BadFieldValueException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +/** + * This class is used for both conversion from Dublin Core to BibTeX and conversion form BibTeX to Dublin Core + */ public class DublinCoreExtractor { public static final String DC_COVERAGE = "coverage"; @@ -42,6 +47,8 @@ public class DublinCoreExtractor { private final BibEntry bibEntry; + private final UnprotectTermsFormatter unprotectTermsFormatter = new UnprotectTermsFormatter(); + /** * @param dcSchema Metadata in DublinCore format. * @param resolvedEntry The BibEntry object, which is filled during metadata extraction. @@ -49,12 +56,11 @@ public class DublinCoreExtractor { public DublinCoreExtractor(DublinCoreSchema dcSchema, XmpPreferences xmpPreferences, BibEntry resolvedEntry) { this.dcSchema = dcSchema; this.xmpPreferences = xmpPreferences; - this.bibEntry = resolvedEntry; } /** - * Editor in BibTex - Contributor in DublinCore + * Editor in BibTeX - Contributor in DublinCore */ private void extractEditor() { List contributors = dcSchema.getContributors(); @@ -64,7 +70,7 @@ private void extractEditor() { } /** - * Author in BibTex - Creator in DublinCore + * Author in BibTeX - Creator in DublinCore */ private void extractAuthor() { List creators = dcSchema.getCreators(); @@ -74,7 +80,7 @@ private void extractAuthor() { } /** - * Bibtex-Fields : year, [month], [day] - 'dc:date' in DublinCore + * BibTeX-Fields : year, [month], [day] - 'dc:date' in DublinCore */ private void extractDate() { List dates = dcSchema.getUnqualifiedSequenceValueList("date"); @@ -90,7 +96,7 @@ private void extractDate() { } /** - * Abstract in BibTex - Description in DublinCore + * Abstract in BibTeX - Description in DublinCore */ private void extractAbstract() { String description = null; @@ -105,7 +111,7 @@ private void extractAbstract() { } /** - * DOI in BibTex - Identifier in DublinCore + * DOI in BibTeX - Identifier in DublinCore */ private void extractDOI() { String identifier = dcSchema.getIdentifier(); @@ -115,7 +121,7 @@ private void extractDOI() { } /** - * Publisher are equivalent in both formats (BibTex and DublinCore) + * Publisher are equivalent in both formats (BibTeX and DublinCore) */ private void extractPublisher() { List publishers = dcSchema.getPublishers(); @@ -128,7 +134,7 @@ private void extractPublisher() { * This method sets all fields, which are custom in BibTeX and therefore supported by JabRef, but which are not * included in the DublinCore format. *

- * The relation attribute of DublinCore is abused to insert these custom fields. + * The relation attribute of DublinCore is abused to store these custom fields. The prefix bibtex is used. */ private void extractBibTexFields() { Predicate isBibTeXElement = s -> s.startsWith("bibtex/"); @@ -161,7 +167,7 @@ private void extractBibTexFields() { } /** - * Rights are equivalent in both formats (BibTex and DublinCore) + * Rights are equivalent in both formats (BibTeX and DublinCore) */ private void extractRights() { String rights = null; @@ -176,7 +182,7 @@ private void extractRights() { } /** - * Source is equivalent in both formats (BibTex and DublinCore) + * Source is equivalent in both formats (BibTeX and DublinCore) */ private void extractSource() { String source = dcSchema.getSource(); @@ -186,7 +192,7 @@ private void extractSource() { } /** - * Keywords in BibTex - Subjects in DublinCore + * Keywords in BibTeX - Subjects in DublinCore */ private void extractSubject() { List subjects = dcSchema.getSubjects(); @@ -196,7 +202,7 @@ private void extractSubject() { } /** - * Title is equivalent in both formats (BibTex and DublinCore) + * Title is equivalent in both formats (BibTeX and DublinCore) */ private void extractTitle() { String title = null; @@ -211,7 +217,8 @@ private void extractTitle() { } /** - * Type is equivalent in both formats (BibTex and DublinCore) + * Type is equivalent in both formats (BibTeX and DublinCore) + *

Opposite method: {@link DublinCoreExtractor#fillType()} */ private void extractType() { List types = dcSchema.getTypes(); @@ -224,7 +231,7 @@ private void extractType() { } /** - * No Equivalent in BibTex. Will create an Unknown "Coverage" Field + * No Equivalent in BibTeX. Will create an Unknown "Coverage" Field */ private void extractCoverage() { String coverage = dcSchema.getCoverage(); @@ -234,13 +241,13 @@ private void extractCoverage() { } /** - * Language is equivalent in both formats (BibTex and DublinCore) + * Language is equivalent in both formats (BibTeX and DublinCore) */ private void extractLanguages() { StringBuilder builder = new StringBuilder(); List languages = dcSchema.getLanguages(); - if (languages != null && !languages.isEmpty()) { + if ((languages != null) && !languages.isEmpty()) { languages.forEach(language -> builder.append(",").append(language)); bibEntry.setField(StandardField.LANGUAGE, builder.substring(1)); } @@ -254,6 +261,9 @@ private void extractLanguages() { * The BibEntry is build by mapping individual fields in the dublin core (like creator, title, subject) to fields in * a bibtex bibEntry. In case special "bibtex/" entries are contained, the normal dublin core fields take * precedence. For instance, the dublin core date takes precedence over bibtex/month. + *

+ * The opposite method is {@link DublinCoreExtractor#fillDublinCoreSchema()} + *

* * @return The bibEntry extracted from the document information. */ @@ -262,6 +272,7 @@ public Optional extractBibtexEntry() { this.extractBibTexFields(); // then extract all "standard" dublin core entries + this.extractType(); this.extractEditor(); this.extractAuthor(); this.extractDate(); @@ -272,7 +283,6 @@ public Optional extractBibtexEntry() { this.extractSource(); this.extractSubject(); this.extractTitle(); - this.extractType(); this.extractCoverage(); this.extractLanguages(); @@ -284,9 +294,7 @@ public Optional extractBibtexEntry() { } /** - * Bibtex-Fields used: editor, Field: 'dc:contributor' - * - * @param authors + * BibTeX: editor; DC: 'dc:contributor' */ private void fillContributor(String authors) { AuthorList list = AuthorList.parse(authors); @@ -296,20 +304,17 @@ private void fillContributor(String authors) { } /** - * Bibtex-Fields used: author, Field: 'dc:creator' - * - * @param creators + * BibTeX: author; DC: 'dc:creator' */ private void fillCreator(String creators) { AuthorList list = AuthorList.parse(creators); - for (Author author : list.getAuthors()) { dcSchema.addCreator(author.getFirstLast(false)); } } /** - * Bibtex-Fields used: year, month, Field: 'dc:date' + * BibTeX: year, month; DC: 'dc:date' */ private void fillDate() { bibEntry.getFieldOrAlias(StandardField.DATE) @@ -317,64 +322,52 @@ private void fillDate() { } /** - * Bibtex-Fields used: abstract, Field: 'dc:description' - * - * @param description + * BibTeX: abstract; DC: 'dc:description' */ private void fillDescription(String description) { dcSchema.setDescription(description); } /** - * Bibtex-Fields used: doi, Field: 'dc:identifier' - * - * @param identifier + * BibTeX:doi; DC: 'dc:identifier' */ private void fillIdentifier(String identifier) { dcSchema.setIdentifier(identifier); } /** - * Bibtex-Fields used: publisher, Field: dc:publisher - * - * @param publisher + * BibTeX: publisher, DC: dc:publisher */ private void fillPublisher(String publisher) { dcSchema.addPublisher(publisher); } /** - * Bibtex-Fields used: keywords, Field: 'dc:subject' - * - * @param value + * BibTeX: keywords; DC: 'dc:subject' */ private void fillKeywords(String value) { - String[] keywords = value.split(","); + String[] keywords = value.split(xmpPreferences.getKeywordSeparator().toString()); for (String keyword : keywords) { dcSchema.addSubject(keyword.trim()); } } /** - * Bibtex-Fields used: title, Field: 'dc:title' - * - * @param title + * BibTeX: title; DC: 'dc:title' */ private void fillTitle(String title) { dcSchema.setTitle(title); } /** - * BibTex : Coverage (Custom Field); DC Field : Coverage - * - * @param coverage + * BibTeX: Coverage (Custom Field); DC Field : Coverage */ private void fillCoverage(String coverage) { dcSchema.setCoverage(coverage); } /** - * BibTex Field : language ; DC Field : dc:language + * BibTeX: language; DC: dc:language */ private void fillLanguages(String languages) { Arrays.stream(languages.split(",")) @@ -382,99 +375,108 @@ private void fillLanguages(String languages) { } /** - * BibTex : Rights (Custom Field); DC Field : dc:rights + * BibTeX: Rights (Custom Field); DC: dc:rights */ private void fillRights(String rights) { dcSchema.addRights(null, rights.split(",")[0]); } /** - * BibTex : Source (Custom Field); DC Field : Source + * BibTeX: Source (Custom Field); DC: Source */ private void fillSource(String source) { dcSchema.setSource(source); } /** - * All others (+ citation key) get packaged in the relation attribute - * - * @param field Key of the metadata attribute - * @param value Value of the metadata attribute + * All others (+ citation key) get packaged in the dc:relation attribute with bibtex/ prefix in the content. + * The value of the given field is fetched from the class variable {@link DublinCoreExtractor#bibEntry}. */ - private void fillCustomField(Field field, String value) { + private void fillCustomField(Field field) { + // We write the plain content of the field, because this is a custom DC field content with the semantics that + // BibTeX data is stored. Thus, we do not need to get rid of BibTeX, but can keep it. + String value = bibEntry.getField(field).get(); dcSchema.addRelation("bibtex/" + field.getName() + '/' + value); } + /** + * Opposite method: {@link DublinCoreExtractor#extractType()} + */ + private void fillType() { + // BibTeX: entry type; DC: 'dc:type' + TypedBibEntry typedEntry = new TypedBibEntry(bibEntry, BibDatabaseMode.BIBTEX); + String typeForDisplay = typedEntry.getTypeForDisplay(); + if (!typeForDisplay.isEmpty()) { + dcSchema.addType(typeForDisplay); + } + } + + /** + * Converts the content of the bibEntry to dublin core. + *

+ * The opposite method is {@link DublinCoreExtractor#extractBibtexEntry()}. + *

+ * A similar method for writing the DocumentInformationItem (DII) is {@link XmpUtilWriter#writeDocumentInformation(PDDocument, BibEntry, BibDatabase, XmpPreferences)} + *

+ */ public void fillDublinCoreSchema() { // Query privacy filter settings boolean useXmpPrivacyFilter = xmpPreferences.shouldUseXmpPrivacyFilter(); - Set> fieldValues = new TreeSet<>(Comparator.comparing(fieldStringEntry -> fieldStringEntry.getKey().getName())); - fieldValues.addAll(bibEntry.getFieldMap().entrySet()); - boolean hasStandardYearField = fieldValues.stream().anyMatch(field -> StandardField.YEAR.equals(field.getKey())); - for (Entry field : fieldValues) { - if (useXmpPrivacyFilter && xmpPreferences.getXmpPrivacyFilter().contains(field.getKey())) { + SortedSet fields = new TreeSet<>(Comparator.comparing(field -> field.getName())); + fields.addAll(bibEntry.getFields()); + for (Field field : fields) { + if (useXmpPrivacyFilter && xmpPreferences.getXmpPrivacyFilter().contains(field)) { continue; } - Field fieldEntry = field.getKey(); - if (fieldEntry instanceof StandardField) { - switch ((StandardField) fieldEntry) { - case EDITOR: - this.fillContributor(field.getValue()); - break; - case AUTHOR: - this.fillCreator(field.getValue()); - break; - case YEAR: - this.fillDate(); - break; - case ABSTRACT: - this.fillDescription(field.getValue()); - break; - case DOI: - this.fillIdentifier(field.getValue()); - break; - case PUBLISHER: - this.fillPublisher(field.getValue()); - break; - case KEYWORDS: - this.fillKeywords(field.getValue()); - break; - case TITLE: - this.fillTitle(field.getValue()); - break; - case LANGUAGE: - this.fillLanguages(field.getValue()); - break; - case DAY: - case MONTH: - if (hasStandardYearField) { - break; + String value = unprotectTermsFormatter.format(bibEntry.getField(field).get()); + if (field instanceof StandardField standardField) { + switch (standardField) { + case EDITOR -> + this.fillContributor(value); + case AUTHOR -> + this.fillCreator(value); + case YEAR -> + this.fillDate(); + case ABSTRACT -> + this.fillDescription(value); + case DOI -> + this.fillIdentifier(value); + case PUBLISHER -> + this.fillPublisher(value); + case KEYWORDS -> + this.fillKeywords(value); + case TITLE -> + this.fillTitle(value); + case LANGUAGE -> + this.fillLanguages(value); + case FILE -> { + // we do not write the "file" field, because the file is the PDF itself + } + case DAY, MONTH -> { + // we do not write day and month separately if dc:year can be used + if (!bibEntry.hasField(StandardField.YEAR)) { + this.fillCustomField(field); } - default: - this.fillCustomField(field.getKey(), field.getValue()); + } + default -> + this.fillCustomField(field); } } else { - if (DC_COVERAGE.equals(fieldEntry.getName())) { - this.fillCoverage(field.getValue()); - } else if (DC_RIGHTS.equals(fieldEntry.getName())) { - this.fillRights(field.getValue()); - } else if (DC_SOURCE.equals(fieldEntry.getName())) { - this.fillSource(field.getValue()); + if (DC_COVERAGE.equals(field.getName())) { + this.fillCoverage(value); + } else if (DC_RIGHTS.equals(field.getName())) { + this.fillRights(value); + } else if (DC_SOURCE.equals(field.getName())) { + this.fillSource(value); } else { - this.fillCustomField(field.getKey(), field.getValue()); + this.fillCustomField(field); } } } dcSchema.setFormat("application/pdf"); - - // Bibtex-Fields used: entrytype, Field: 'dc:type' - TypedBibEntry typedEntry = new TypedBibEntry(bibEntry, BibDatabaseMode.BIBTEX); - String o = typedEntry.getTypeForDisplay(); - if (!o.isEmpty()) { - dcSchema.addType(o); - } + fillType(); } } diff --git a/src/main/java/org/jabref/logic/xmp/XmpUtilReader.java b/src/main/java/org/jabref/logic/xmp/XmpUtilReader.java index 8051d301722..a5e4d009d32 100644 --- a/src/main/java/org/jabref/logic/xmp/XmpUtilReader.java +++ b/src/main/java/org/jabref/logic/xmp/XmpUtilReader.java @@ -29,7 +29,7 @@ public class XmpUtilReader { private static final String START_TAG = " readRawXmp(Path path) throws IOException { - try (PDDocument document = XmpUtilReader.loadWithAutomaticDecryption(path)) { - return XmpUtilReader.getXmpMetadata(document); + public List readRawXmp(Path path) throws IOException { + try (PDDocument document = loadWithAutomaticDecryption(path)) { + return getXmpMetadata(document); } } - /** - * Convenience method for readXMP(File). - * - * @param filename The filename from which to open the file. - * @return BibtexEntryies found in the PDF or an empty list - */ - public static List readXmp(String filename, XmpPreferences xmpPreferences) throws IOException { - return XmpUtilReader.readXmp(Path.of(filename), xmpPreferences); - } - /** * Try to read the given BibTexEntry from the XMP-stream of the given * inputstream containing a PDF-file. @@ -67,13 +57,13 @@ public static List readXmp(String filename, XmpPreferences xmpPreferen * @throws IOException Throws an IOException if the file cannot be read, so the user than remove a lock or cancel * the operation. */ - public static List readXmp(Path path, XmpPreferences xmpPreferences) + public List readXmp(Path path, XmpPreferences xmpPreferences) throws IOException { List result = new LinkedList<>(); try (PDDocument document = loadWithAutomaticDecryption(path)) { - List xmpMetaList = XmpUtilReader.getXmpMetadata(document); + List xmpMetaList = getXmpMetadata(document); if (!xmpMetaList.isEmpty()) { // Only support Dublin Core since JabRef 4.2 @@ -82,10 +72,7 @@ public static List readXmp(Path path, XmpPreferences xmpPreferences) if (dcSchema != null) { DublinCoreExtractor dcExtractor = new DublinCoreExtractor(dcSchema, xmpPreferences, new BibEntry()); Optional entry = dcExtractor.extractBibtexEntry(); - - if (entry.isPresent()) { - result.add(entry.get()); - } + entry.ifPresent(result::add); } } } @@ -111,7 +98,7 @@ public static List readXmp(Path path, XmpPreferences xmpPreferences) * * @return empty List if no metadata has been found, or cannot properly find start or end tag in metadata */ - private static List getXmpMetadata(PDDocument document) { + private List getXmpMetadata(PDDocument document) { PDDocumentCatalog catalog = document.getDocumentCatalog(); PDMetadata metaRaw = catalog.getMetadata(); List metaList = new ArrayList<>(); @@ -154,10 +141,9 @@ private static List getXmpMetadata(PDDocument document) { * @param path The path to load. * @throws IOException from the underlying @link PDDocument#load(File) */ - public static PDDocument loadWithAutomaticDecryption(Path path) throws IOException { + public PDDocument loadWithAutomaticDecryption(Path path) throws IOException { // try to load the document // also uses an empty string as default password - PDDocument doc = Loader.loadPDF(path.toFile()); - return doc; + return Loader.loadPDF(path.toFile()); } } diff --git a/src/main/java/org/jabref/logic/xmp/XmpUtilShared.java b/src/main/java/org/jabref/logic/xmp/XmpUtilShared.java index 5b741ec9d5f..7aa607917ed 100644 --- a/src/main/java/org/jabref/logic/xmp/XmpUtilShared.java +++ b/src/main/java/org/jabref/logic/xmp/XmpUtilShared.java @@ -25,7 +25,7 @@ private XmpUtilShared() { } protected static XMPMetadata parseXmpMetadata(InputStream is) throws IOException { - XMPMetadata meta = null; + XMPMetadata meta; try { DomXmpParser parser = new DomXmpParser(); meta = parser.parse(is); @@ -47,13 +47,13 @@ protected static XMPMetadata parseXmpMetadata(InputStream is) throws IOException */ public static boolean hasMetadata(Path path, XmpPreferences xmpPreferences) { try { - List bibEntries = XmpUtilReader.readXmp(path, xmpPreferences); + List bibEntries = new XmpUtilReader().readXmp(path, xmpPreferences); return !bibEntries.isEmpty(); } catch (EncryptedPdfsNotSupportedException ex) { LOGGER.info("Encryption not supported by XMPUtil"); return false; } catch (IOException e) { - XmpUtilShared.LOGGER.debug("No metadata was found. Path: " + path.toString()); + XmpUtilShared.LOGGER.debug("No metadata was found. Path: {}", path.toString()); return false; } } diff --git a/src/main/java/org/jabref/logic/xmp/XmpUtilWriter.java b/src/main/java/org/jabref/logic/xmp/XmpUtilWriter.java index 3365e08679d..7bd3c607990 100644 --- a/src/main/java/org/jabref/logic/xmp/XmpUtilWriter.java +++ b/src/main/java/org/jabref/logic/xmp/XmpUtilWriter.java @@ -7,16 +7,15 @@ import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Path; -import java.util.ArrayList; import java.util.Arrays; import java.util.List; -import java.util.Map.Entry; import java.util.function.Predicate; import java.util.stream.Collectors; import javax.xml.transform.TransformerException; import org.jabref.logic.exporter.EmbeddedBibFilePdfExporter; +import org.jabref.logic.formatter.casechanger.UnprotectTermsFormatter; import org.jabref.logic.util.io.FileUtil; import org.jabref.model.database.BibDatabase; import org.jabref.model.entry.BibEntry; @@ -35,35 +34,23 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +/** + * Writes given BibEntries into the XMP part of a PDF file. + * + * The conversion of a BibEntry to the XMP data (using Dublin Core) is done at + * {@link DublinCoreExtractor#fillDublinCoreSchema()} + */ public class XmpUtilWriter { private static final String XMP_BEGIN_END_TAG = "?xpacket"; private static final Logger LOGGER = LoggerFactory.getLogger(XmpUtilWriter.class); - /** - * Try to write the given BibTexEntry in the XMP-stream of the given - * PDF-file. - * - * Throws an IOException if the file cannot be read or written, so the user - * can remove a lock or cancel the operation. - * - * The method will overwrite existing BibTeX-XMP-data, but keep other - * existing metadata. - * - * This is a convenience method for writeXMP(File, BibEntry). - * - * @param fileName The filename from which to open the file. - * @param entry The entry to write. - * @param database maybenull An optional database which the given bibtex entries belong to, which will be used to - * resolve strings. If the database is null the strings will not be resolved. - * @throws TransformerException If the entry was malformed or unsupported. - * @throws IOException If the file could not be written to or could not be found. - */ - public static void writeXmp(String fileName, BibEntry entry, - BibDatabase database, XmpPreferences xmpPreferences) - throws IOException, TransformerException { - XmpUtilWriter.writeXmp(Path.of(fileName), entry, database, xmpPreferences); + private final UnprotectTermsFormatter unprotectTermsFormatter = new UnprotectTermsFormatter(); + private final XmpPreferences xmpPreferences; + + public XmpUtilWriter(XmpPreferences xmpPreferences) { + this.xmpPreferences = xmpPreferences; } /** @@ -80,17 +67,16 @@ public static void writeXmp(String fileName, BibEntry entry, * * @param file The path to write to. * @param entry The entry to write. - * @param database maybenull An optional database which the given bibtex entries belong to, which will be used to + * @param database An optional database which the given bibtex entries belong to, which will be used to * resolve strings. If the database is null the strings will not be resolved. * @throws TransformerException If the entry was malformed or unsupported. * @throws IOException If the file could not be written to or could not be found. */ - public static void writeXmp(Path file, BibEntry entry, - BibDatabase database, XmpPreferences xmpPreferences) + public void writeXmp(Path file, + BibEntry entry, + BibDatabase database) throws IOException, TransformerException { - List bibEntryList = new ArrayList<>(); - bibEntryList.add(entry); - XmpUtilWriter.writeXmp(file, bibEntryList, database, xmpPreferences); + writeXmp(file, List.of(entry), database); } /** @@ -99,27 +85,24 @@ public static void writeXmp(Path file, BibEntry entry, * * @param dcSchema Dublin core schema, which is filled with the bib entry. * @param entry The entry, which is added to the dublin core metadata. - * @param database maybenull An optional database which the given bibtex entries belong to, which will be used to + * @param database An optional database which the given bibtex entries belong to, which will be used to * resolve strings. If the database is null the strings will not be resolved. - * @param xmpPreferences The user's xmp preferences. */ - private static void writeToDCSchema(DublinCoreSchema dcSchema, BibEntry entry, BibDatabase database, - XmpPreferences xmpPreferences) { - - BibEntry resolvedEntry = XmpUtilWriter.getDefaultOrDatabaseEntry(entry, database); - - writeToDCSchema(dcSchema, resolvedEntry, xmpPreferences); + private void writeToDCSchema(DublinCoreSchema dcSchema, + BibEntry entry, + BibDatabase database) { + BibEntry resolvedEntry = getDefaultOrDatabaseEntry(entry, database); + writeToDCSchema(dcSchema, resolvedEntry); } /** - * Writes the information of the bib entry to the dublin core schema using - * a custom extractor. + * Writes the information of the bib entry to the dublin core schema using a custom extractor. * * @param dcSchema Dublin core schema, which is filled with the bib entry. * @param entry The entry, which is added to the dublin core metadata. - * @param xmpPreferences The user's xmp preferences. */ - private static void writeToDCSchema(DublinCoreSchema dcSchema, BibEntry entry, XmpPreferences xmpPreferences) { + private void writeToDCSchema(DublinCoreSchema dcSchema, + BibEntry entry) { DublinCoreExtractor dcExtractor = new DublinCoreExtractor(dcSchema, xmpPreferences, entry); dcExtractor.fillDublinCoreSchema(); } @@ -131,11 +114,12 @@ private static void writeToDCSchema(DublinCoreSchema dcSchema, BibEntry entry, X * * @param document The pdf document to write to. * @param entries The BibTeX entries that are written as schemas - * @param database maybenull An optional database which the given BibTeX entries belong to, which will be used to + * @param database An optional database which the given BibTeX entries belong to, which will be used to * resolve strings. If the database is null the strings will not be resolved. */ - private static void writeDublinCore(PDDocument document, - List entries, BibDatabase database, XmpPreferences xmpPreferences) + private void writeDublinCore(PDDocument document, + List entries, + BibDatabase database) throws IOException, TransformerException { List resolvedEntries; @@ -167,7 +151,7 @@ private static void writeDublinCore(PDDocument document, for (BibEntry entry : resolvedEntries) { DublinCoreSchema dcSchema = DublinCoreSchemaCustom.copyDublinCoreSchema(meta.createAndAddDublinCoreSchema()); - XmpUtilWriter.writeToDCSchema(dcSchema, entry, null, xmpPreferences); + writeToDCSchema(dcSchema, entry, null); } // Save to stream and then input that stream to the PDF @@ -181,19 +165,17 @@ private static void writeDublinCore(PDDocument document, /** * This method generates an xmp metadata string in dublin core format. - *
* * @param entries A list of entries, which are added to the dublin core metadata. - * @param xmpPreferences The user's xmp preferences. * * @return If something goes wrong (e.g. an exception is thrown), the method returns an empty string, * otherwise it returns the xmp metadata as a string in dublin core format. */ - private static String generateXmpStringWithXmpDeclaration(List entries, XmpPreferences xmpPreferences) { + private String generateXmpStringWithXmpDeclaration(List entries) { XMPMetadata meta = XMPMetadata.createXMPMetadata(); for (BibEntry entry : entries) { DublinCoreSchema dcSchema = meta.createAndAddDublinCoreSchema(); - XmpUtilWriter.writeToDCSchema(dcSchema, entry, xmpPreferences); + writeToDCSchema(dcSchema, entry); } try (ByteArrayOutputStream os = new ByteArrayOutputStream()) { XmpSerializer serializer = new XmpSerializer(); @@ -217,13 +199,12 @@ private static String generateXmpStringWithXmpDeclaration(List entries *
* * @param entries A list of entries, which are added to the dublin core metadata. - * @param xmpPreferences The user's xmp preferences. * * @return If something goes wrong (e.g. an exception is thrown), the method returns an empty string, * otherwise it returns the xmp metadata without metadata description as a string in dublin core format. */ - public static String generateXmpStringWithoutXmpDeclaration(List entries, XmpPreferences xmpPreferences) { - String xmpContent = XmpUtilWriter.generateXmpStringWithXmpDeclaration(entries, xmpPreferences); + public String generateXmpStringWithoutXmpDeclaration(List entries) { + String xmpContent = generateXmpStringWithXmpDeclaration(entries); // remove the tags to enable the usage of the CTAN package xmpincl Predicate isBeginOrEndTag = s -> s.contains(XMP_BEGIN_END_TAG); @@ -235,30 +216,25 @@ public static String generateXmpStringWithoutXmpDeclaration(List entri /** * Try to write the given BibTexEntry in the Document Information (the * properties of the pdf). - * - * Existing fields values are overriden if the bibtex entry has the + *

+ * Existing fields values are overridden if the bibtex entry has the * corresponding value set. + *

+ * The method to write DublineCore is {@link DublinCoreExtractor#fillDublinCoreSchema()} * * @param document The pdf document to write to. * @param entry The Bibtex entry that is written into the PDF properties. * - * @param database maybenull An optional database which the given bibtex entries belong to, which will be used to + * @param database An optional database which the given bibtex entries belong to, which will be used to * resolve strings. If the database is null the strings will not be resolved. */ - private static void writeDocumentInformation(PDDocument document, - BibEntry entry, BibDatabase database, XmpPreferences xmpPreferences) { - + private void writeDocumentInformation(PDDocument document, + BibEntry entry, + BibDatabase database) { PDDocumentInformation di = document.getDocumentInformation(); + BibEntry resolvedEntry = getDefaultOrDatabaseEntry(entry, database); - BibEntry resolvedEntry = XmpUtilWriter.getDefaultOrDatabaseEntry(entry, database); - - // Query privacy filter settings boolean useXmpPrivacyFilter = xmpPreferences.shouldUseXmpPrivacyFilter(); - - // Set all the values including key and entryType - for (Entry fieldValuePair : resolvedEntry.getFieldMap().entrySet()) { - Field field = fieldValuePair.getKey(); - String fieldContent = fieldValuePair.getValue(); - + for (Field field : resolvedEntry.getFields()) { if (useXmpPrivacyFilter && xmpPreferences.getXmpPrivacyFilter().contains(field)) { // erase field instead of adding it if (StandardField.AUTHOR.equals(field)) { @@ -275,16 +251,20 @@ private static void writeDocumentInformation(PDDocument document, continue; } + // LaTeX content is removed from the string for "standard" fields in the PDF + String value = unprotectTermsFormatter.format(resolvedEntry.getField(field).get()); + if (StandardField.AUTHOR.equals(field)) { - di.setAuthor(fieldContent); + di.setAuthor(value); } else if (StandardField.TITLE.equals(field)) { - di.setTitle(fieldContent); + di.setTitle(value); } else if (StandardField.KEYWORDS.equals(field)) { - di.setKeywords(fieldContent); + di.setKeywords(value); } else if (StandardField.ABSTRACT.equals(field)) { - di.setSubject(fieldContent); + di.setSubject(value); } else { - di.setCustomMetadataValue("bibtex/" + field, fieldContent); + // We hit the case of an PDF-unsupported field --> write it directly + di.setCustomMetadataValue("bibtex/" + field, resolvedEntry.getField(field).get()); } } di.setCustomMetadataValue("bibtex/entrytype", resolvedEntry.getType().getDisplayName()); @@ -304,15 +284,14 @@ private static void writeDocumentInformation(PDDocument document, * * @param path The file to write the entries to. * @param bibtexEntries The entries to write to the file. * - * @param database maybenull An optional database which the given bibtex entries belong to, which will be used + * @param database An optional database which the given bibtex entries belong to, which will be used * to resolve strings. If the database is null the strings will not be resolved. - * @param xmpPreferences Write information also in PDF document properties * @throws TransformerException If the entry was malformed or unsupported. * @throws IOException If the file could not be written to or could not be found. */ - public static void writeXmp(Path path, - List bibtexEntries, BibDatabase database, - XmpPreferences xmpPreferences) + public void writeXmp(Path path, + List bibtexEntries, + BibDatabase database) throws IOException, TransformerException { List resolvedEntries; if (database == null) { @@ -332,8 +311,8 @@ public static void writeXmp(Path path, // Write schemas (PDDocumentInformation and DublinCoreSchema) to the document metadata if (resolvedEntries.size() > 0) { - XmpUtilWriter.writeDocumentInformation(document, resolvedEntries.get(0), null, xmpPreferences); - XmpUtilWriter.writeDublinCore(document, resolvedEntries, null, xmpPreferences); + writeDocumentInformation(document, resolvedEntries.get(0), null); + writeDublinCore(document, resolvedEntries, null); } // Save updates to original file @@ -348,7 +327,7 @@ public static void writeXmp(Path path, Files.delete(newFile); } - private static BibEntry getDefaultOrDatabaseEntry(BibEntry defaultEntry, BibDatabase database) { + private BibEntry getDefaultOrDatabaseEntry(BibEntry defaultEntry, BibDatabase database) { if (database == null) { return defaultEntry; } else { diff --git a/src/main/java/org/jabref/model/entry/BibEntry.java b/src/main/java/org/jabref/model/entry/BibEntry.java index 4371bc8270b..2643e777c84 100644 --- a/src/main/java/org/jabref/model/entry/BibEntry.java +++ b/src/main/java/org/jabref/model/entry/BibEntry.java @@ -597,7 +597,7 @@ public Optional clearField(Field field) { */ public Optional clearField(Field field, EntriesEventSource eventSource) { Optional oldValue = getField(field); - if (!oldValue.isPresent()) { + if (oldValue.isEmpty()) { return Optional.empty(); } @@ -777,7 +777,8 @@ public Optional removeKeywords(KeywordList keywordsToRemove, Charac return putKeywords(keywordList, keywordDelimiter); } - public Optional replaceKeywords(KeywordList keywordsToReplace, Keyword newValue, + public Optional replaceKeywords(KeywordList keywordsToReplace, + Keyword newValue, Character keywordDelimiter) { KeywordList keywordList = getKeywords(keywordDelimiter); keywordList.replaceAll(keywordsToReplace, newValue); @@ -835,6 +836,18 @@ public BibEntry withField(Field field, String value) { return this; } + public BibEntry withDate(Date date) { + setDate(date); + this.setChanged(false); + return this; + } + + public BibEntry withMonth(Month parsedMonth) { + setMonth(parsedMonth); + this.setChanged(false); + return this; + } + /* * Returns user comments (arbitrary text before the entry), if they exist. If not, returns the empty String */ diff --git a/src/test/java/org/jabref/logic/bibtex/comparator/EntryComparatorTest.java b/src/test/java/org/jabref/logic/bibtex/comparator/EntryComparatorTest.java index 4dd225aa275..cfaa38b42da 100644 --- a/src/test/java/org/jabref/logic/bibtex/comparator/EntryComparatorTest.java +++ b/src/test/java/org/jabref/logic/bibtex/comparator/EntryComparatorTest.java @@ -10,50 +10,52 @@ class EntryComparatorTest { - private BibEntry entry1 = new BibEntry(); - private BibEntry entry2 = new BibEntry(); - + @SuppressWarnings("EqualsWithItself") @Test - void recognizeIdenticObjectsAsEqual() { - BibEntry e2 = entry1; - assertEquals(0, new EntryComparator(false, false, StandardField.TITLE).compare(entry1, e2)); + void recognizeIdenticalObjectsAsEqual() { + BibEntry entry = new BibEntry(); + assertEquals(0, new EntryComparator(false, false, StandardField.TITLE).compare(entry, entry)); } @Test - void compareAuthorFieldBiggerAscending() throws Exception { - entry1.setField(StandardField.AUTHOR, "Stephen King"); - entry2.setField(StandardField.AUTHOR, "Henning Mankell"); - + void compareAuthorFieldBiggerAscending() { + BibEntry entry1 = new BibEntry() + .withField(StandardField.AUTHOR, "Stephen King"); + BibEntry entry2 = new BibEntry() + .withField(StandardField.AUTHOR, "Henning Mankell"); EntryComparator entryComparator = new EntryComparator(false, false, StandardField.AUTHOR); assertEquals(-2, entryComparator.compare(entry1, entry2)); } @Test - void bothEntriesHaveNotSetTheFieldToCompareAscending() throws Exception { - - entry1.setField(StandardField.BOOKTITLE, "Stark - The Dark Half (1989)"); - entry2.setField(StandardField.COMMENTATOR, "Some Commentator"); + void bothEntriesHaveNotSetTheFieldToCompareAscending() { + BibEntry entry1 = new BibEntry() + .withField(StandardField.BOOKTITLE, "Stark - The Dark Half (1989)"); + BibEntry entry2 = new BibEntry() + .withField(StandardField.COMMENTATOR, "Some Commentator"); EntryComparator entryComparator = new EntryComparator(false, false, StandardField.TITLE); assertEquals(-1, entryComparator.compare(entry1, entry2)); } @Test - void secondEntryHasNotSetFieldToCompareAscending() throws Exception { - - entry1.setField(StandardField.TITLE, "Stark - The Dark Half (1989)"); - entry2.setField(StandardField.COMMENTATOR, "Some Commentator"); + void secondEntryHasNotSetFieldToCompareAscending() { + BibEntry entry1 = new BibEntry() + .withField(StandardField.TITLE, "Stark - The Dark Half (1989)"); + BibEntry entry2 = new BibEntry() + .withField(StandardField.COMMENTATOR, "Some Commentator"); EntryComparator entryComparator = new EntryComparator(false, false, StandardField.TITLE); assertEquals(-1, entryComparator.compare(entry1, entry2)); } @Test - void firstEntryHasNotSetFieldToCompareAscending() throws Exception { - - entry1.setField(StandardField.COMMENTATOR, "Some Commentator"); - entry2.setField(StandardField.TITLE, "Stark - The Dark Half (1989)"); + void firstEntryHasNotSetFieldToCompareAscending() { + BibEntry entry1 = new BibEntry() + .withField(StandardField.COMMENTATOR, "Some Commentator"); + BibEntry entry2 = new BibEntry() + .withField(StandardField.TITLE, "Stark - The Dark Half (1989)"); EntryComparator entryComparator = new EntryComparator(false, false, StandardField.TITLE); @@ -61,10 +63,11 @@ void firstEntryHasNotSetFieldToCompareAscending() throws Exception { } @Test - void bothEntriesNumericAscending() throws Exception { - - entry1.setField(StandardField.EDITION, "1"); - entry2.setField(StandardField.EDITION, "3"); + void bothEntriesNumericAscending() { + BibEntry entry1 = new BibEntry() + .withField(StandardField.EDITION, "1"); + BibEntry entry2 = new BibEntry() + .withField(StandardField.EDITION, "3"); EntryComparator entryComparator = new EntryComparator(false, false, StandardField.EDITION); @@ -73,29 +76,29 @@ void bothEntriesNumericAscending() throws Exception { @Test void compareObjectsByKeyAscending() { - BibEntry e1 = new BibEntry(); - BibEntry e2 = new BibEntry(); - e1.setCitationKey("Mayer2019b"); - e2.setCitationKey("Mayer2019a"); + BibEntry e1 = new BibEntry() + .withCitationKey("Mayer2019b"); + BibEntry e2 = new BibEntry() + .withCitationKey("Mayer2019a"); assertEquals(1, new EntryComparator(false, false, InternalField.KEY_FIELD).compare(e1, e2)); assertEquals(-1, new EntryComparator(false, false, InternalField.KEY_FIELD).compare(e2, e1)); } @Test void compareObjectsByKeyWithNull() { - BibEntry e1 = new BibEntry(); + BibEntry e1 = new BibEntry() + .withCitationKey("Mayer2019b"); BibEntry e2 = new BibEntry(); - e1.setCitationKey("Mayer2019b"); assertEquals(-1, new EntryComparator(false, false, InternalField.KEY_FIELD).compare(e1, e2)); assertEquals(1, new EntryComparator(false, false, InternalField.KEY_FIELD).compare(e2, e1)); } @Test void compareObjectsByKeyWithBlank() { - BibEntry e1 = new BibEntry(); - BibEntry e2 = new BibEntry(); - e1.setCitationKey("Mayer2019b"); - e2.setCitationKey(" "); + BibEntry e1 = new BibEntry() + .withCitationKey("Mayer2019b"); + BibEntry e2 = new BibEntry() + .withCitationKey(" "); assertEquals(-1, new EntryComparator(false, false, InternalField.KEY_FIELD).compare(e1, e2)); assertEquals(1, new EntryComparator(false, false, InternalField.KEY_FIELD).compare(e2, e1)); } diff --git a/src/test/java/org/jabref/logic/exporter/XmpExporterTest.java b/src/test/java/org/jabref/logic/exporter/XmpExporterTest.java index 84af17aed90..05266a60aac 100644 --- a/src/test/java/org/jabref/logic/exporter/XmpExporterTest.java +++ b/src/test/java/org/jabref/logic/exporter/XmpExporterTest.java @@ -24,14 +24,12 @@ public class XmpExporterTest { private Exporter exporter; - private BibDatabaseContext databaseContext; + private final BibDatabaseContext databaseContext = new BibDatabaseContext(); private final XmpPreferences xmpPreferences = mock(XmpPreferences.class); @BeforeEach public void setUp() { exporter = new XmpExporter(xmpPreferences); - - databaseContext = new BibDatabaseContext(); } @Test @@ -44,21 +42,23 @@ public void exportSingleEntry(@TempDir Path testFolder) throws Exception { exporter.export(databaseContext, file, Collections.singletonList(entry)); String actual = String.join("\n", Files.readAllLines(file)); // we are using \n to join, so we need it in the expected string as well, \r\n would fail - String expected = " \n" + - " \n" + - " \n" + - " \n" + - " Alan Turing\n" + - " \n" + - " \n" + - " application/pdf\n" + - " \n" + - " \n" + - " Misc\n" + - " \n" + - " \n" + - " \n" + - " "; + String expected = """ + + + + + Alan Turing + + + application/pdf + + + Misc + + + + + """.stripTrailing(); assertEquals(expected, actual); } @@ -78,39 +78,41 @@ public void writeMultipleEntriesInASingleFile(@TempDir Path testFolder) throws E String actual = String.join("\n", Files.readAllLines(file)); // we are using \n to join, so we need it in the expected string as well, \r\n would fail - String expected = " \n" + - " \n" + - " \n" + - " \n" + - " Alan Turing\n" + - " \n" + - " \n" + - " application/pdf\n" + - " \n" + - " \n" + - " Misc\n" + - " \n" + - " \n" + - " \n" + - " \n" + - " \n" + - " \n" + - " Michael Armbrust\n" + - " \n" + - " \n" + - " \n" + - " \n" + - " bibtex/citationkey/Armbrust2010\n" + - " \n" + - " \n" + - " application/pdf\n" + - " \n" + - " \n" + - " Misc\n" + - " \n" + - " \n" + - " \n" + - " "; + String expected = """ + + + + + Alan Turing + + + application/pdf + + + Misc + + + + + + + Michael Armbrust + + + + + bibtex/citationkey/Armbrust2010 + + + application/pdf + + + Misc + + + + + """.stripTrailing(); assertEquals(expected, actual); } @@ -122,61 +124,65 @@ public void writeMultipleEntriesInDifferentFiles(@TempDir Path testFolder) throw BibEntry entryTuring = new BibEntry() .withField(StandardField.AUTHOR, "Alan Turing"); - BibEntry entryArmbrust = new BibEntry() .withField(StandardField.AUTHOR, "Michael Armbrust") .withCitationKey("Armbrust2010"); exporter.export(databaseContext, file, List.of(entryTuring, entryArmbrust)); + // Nothing written in given file List lines = Files.readAllLines(file); assertEquals(Collections.emptyList(), lines); + // turing contains the turing entry only Path fileTuring = Path.of(file.getParent().toString(), entryTuring.getId() + "_null.xmp"); - String actualTuring = String.join("\n", Files.readAllLines(fileTuring)); // we are using \n to join, so we need it in the expected string as well, \r\n would fail - - String expectedTuring = " \n" + - " \n" + - " \n" + - " \n" + - " Alan Turing\n" + - " \n" + - " \n" + - " application/pdf\n" + - " \n" + - " \n" + - " Misc\n" + - " \n" + - " \n" + - " \n" + - " "; - + // we are using \n to join, so we need it in the expected string as well, \r\n would fail + String actualTuring = String.join("\n", Files.readAllLines(fileTuring)); + String expectedTuring = """ + + + + + Alan Turing + + + application/pdf + + + Misc + + + + + """.stripTrailing(); assertEquals(expectedTuring, actualTuring); + // armbrust contains the armbrust entry only Path fileArmbrust = Path.of(file.getParent().toString(), entryArmbrust.getId() + "_Armbrust2010.xmp"); - String actualArmbrust = String.join("\n", Files.readAllLines(fileArmbrust)); // we are using \n to join, so we need it in the expected string as well, \r\n would fail - - String expectedArmbrust = " \n" + - " \n" + - " \n" + - " \n" + - " Michael Armbrust\n" + - " \n" + - " \n" + - " \n" + - " \n" + - " bibtex/citationkey/Armbrust2010\n" + - " \n" + - " \n" + - " application/pdf\n" + - " \n" + - " \n" + - " Misc\n" + - " \n" + - " \n" + - " \n" + - " "; - + // we are using \n to join, so we need it in the expected string as well, \r\n would fail + String actualArmbrust = String.join("\n", Files.readAllLines(fileArmbrust)); + String expectedArmbrust = """ + + + + + Michael Armbrust + + + + + bibtex/citationkey/Armbrust2010 + + + application/pdf + + + Misc + + + + + """.stripTrailing(); assertEquals(expectedArmbrust, actualArmbrust); } @@ -188,21 +194,25 @@ public void exportSingleEntryWithPrivacyFilter(@TempDir Path testFolder) throws Path file = testFolder.resolve("ThisIsARandomlyNamedFile"); Files.createFile(file); - BibEntry entry = new BibEntry(); - entry.setField(StandardField.AUTHOR, "Alan Turing"); + BibEntry entry = new BibEntry() + .withField(StandardField.AUTHOR, "Alan Turing"); exporter.export(databaseContext, file, Collections.singletonList(entry)); + String actual = String.join("\n", Files.readAllLines(file)); - String expected = " \n" + - " \n" + - " application/pdf\n" + - " \n" + - " \n" + - " Misc\n" + - " \n" + - " \n" + - " \n" + - " "; + String expected = """ + + + application/pdf + + + Misc + + + + + """.stripTrailing(); + assertEquals(expected, actual); } } diff --git a/src/test/java/org/jabref/logic/xmp/XmpUtilReaderTest.java b/src/test/java/org/jabref/logic/xmp/XmpUtilReaderTest.java index 20d1b8c6ae1..074b4fbf34f 100644 --- a/src/test/java/org/jabref/logic/xmp/XmpUtilReaderTest.java +++ b/src/test/java/org/jabref/logic/xmp/XmpUtilReaderTest.java @@ -10,7 +10,6 @@ import org.jabref.logic.importer.ImportFormatPreferences; import org.jabref.logic.importer.fileformat.BibtexImporter; -import org.jabref.logic.importer.fileformat.BibtexParser; import org.jabref.model.entry.BibEntry; import org.jabref.model.entry.LinkedFile; import org.jabref.model.schema.DublinCoreSchemaCustom; @@ -29,15 +28,14 @@ class XmpUtilReaderTest { private XmpPreferences xmpPreferences; - private BibtexParser parser; private BibtexImporter testImporter; + private final XmpUtilReader xmpUtilReader = new XmpUtilReader(); /** * Create a temporary PDF-file with a single empty page. */ @BeforeEach void setUp() { - ImportFormatPreferences importFormatPreferences = mock(ImportFormatPreferences.class, Answers.RETURNS_DEEP_STUBS); xmpPreferences = mock(XmpPreferences.class); // The code assumes privacy filters to be off when(xmpPreferences.shouldUseXmpPrivacyFilter()).thenReturn(false); @@ -53,7 +51,7 @@ void setUp() { @Test void testReadArticleDublinCoreReadRawXmp() throws IOException, URISyntaxException { Path path = Path.of(XmpUtilShared.class.getResource("article_dublinCore_without_day.pdf").toURI()); - List meta = XmpUtilReader.readRawXmp(path); + List meta = xmpUtilReader.readRawXmp(path); DublinCoreSchema dcSchema = DublinCoreSchemaCustom.copyDublinCoreSchema(meta.get(0).getDublinCoreSchema()); DublinCoreExtractor dcExtractor = new DublinCoreExtractor(dcSchema, xmpPreferences, new BibEntry()); @@ -71,7 +69,7 @@ void testReadArticleDublinCoreReadRawXmp() throws IOException, URISyntaxExceptio @Test void testReadArticleDublinCoreReadXmp() throws IOException, URISyntaxException { Path pathPdf = Path.of(XmpUtilShared.class.getResource("article_dublinCore.pdf").toURI()); - List entries = XmpUtilReader.readXmp(pathPdf, xmpPreferences); + List entries = xmpUtilReader.readXmp(pathPdf, xmpPreferences); Path bibFile = Path.of(XmpUtilShared.class.getResource("article_dublinCore.bib").toURI()); List expected = testImporter.importDatabase(bibFile).getDatabase().getEntries(); @@ -86,11 +84,11 @@ void testReadArticleDublinCoreReadXmp() throws IOException, URISyntaxException { @Test void testReadArticleDublinCoreReadXmpPartialDate() throws IOException, URISyntaxException { Path pathPdf = Path.of(XmpUtilShared.class.getResource("article_dublinCore_partial_date.pdf").toURI()); - List entries = XmpUtilReader.readXmp(pathPdf, xmpPreferences); + List entries = xmpUtilReader.readXmp(pathPdf, xmpPreferences); Path bibFile = Path.of(XmpUtilShared.class.getResource("article_dublinCore_partial_date.bib").toURI()); List expected = testImporter.importDatabase(bibFile).getDatabase().getEntries(); - expected.forEach(bibEntry -> bibEntry.setFiles(Arrays.asList( + expected.forEach(bibEntry -> bibEntry.setFiles(List.of( new LinkedFile("", pathPdf.toAbsolutePath(), "PDF")) )); @@ -102,7 +100,7 @@ void testReadArticleDublinCoreReadXmpPartialDate() throws IOException, URISyntax */ @Test void testReadEmtpyMetadata() throws IOException, URISyntaxException { - List entries = XmpUtilReader.readXmp(Path.of(XmpUtilShared.class.getResource("empty_metadata.pdf").toURI()), xmpPreferences); + List entries = xmpUtilReader.readXmp(Path.of(XmpUtilShared.class.getResource("empty_metadata.pdf").toURI()), xmpPreferences); assertEquals(Collections.emptyList(), entries); } @@ -112,12 +110,12 @@ void testReadEmtpyMetadata() throws IOException, URISyntaxException { @Test void testReadPDMetadata() throws IOException, URISyntaxException { Path pathPdf = Path.of(XmpUtilShared.class.getResource("PD_metadata.pdf").toURI()); - List entries = XmpUtilReader.readXmp(pathPdf, xmpPreferences); + List entries = xmpUtilReader.readXmp(pathPdf, xmpPreferences); Path bibFile = Path.of(XmpUtilShared.class.getResource("PD_metadata.bib").toURI()); List expected = testImporter.importDatabase(bibFile).getDatabase().getEntries(); - expected.forEach(bibEntry -> bibEntry.setFiles(Arrays.asList( + expected.forEach(bibEntry -> bibEntry.setFiles(List.of( new LinkedFile("", pathPdf.toAbsolutePath(), "PDF")) )); @@ -129,7 +127,7 @@ void testReadPDMetadata() throws IOException, URISyntaxException { */ @Test void testReadNoDescriptionMetadata() throws IOException, URISyntaxException { - List entries = XmpUtilReader.readXmp(Path.of(XmpUtilShared.class.getResource("no_description_metadata.pdf").toURI()), xmpPreferences); + List entries = xmpUtilReader.readXmp(Path.of(XmpUtilShared.class.getResource("no_description_metadata.pdf").toURI()), xmpPreferences); assertEquals(Collections.emptyList(), entries); } } diff --git a/src/test/java/org/jabref/logic/xmp/XmpUtilWriterTest.java b/src/test/java/org/jabref/logic/xmp/XmpUtilWriterTest.java index 537e0d08a42..20f3eb6906d 100644 --- a/src/test/java/org/jabref/logic/xmp/XmpUtilWriterTest.java +++ b/src/test/java/org/jabref/logic/xmp/XmpUtilWriterTest.java @@ -1,12 +1,9 @@ package org.jabref.logic.xmp; -import java.io.IOException; import java.nio.file.Path; -import java.util.Arrays; import java.util.List; -import javax.xml.transform.TransformerException; - +import org.jabref.logic.exporter.XmpExporterTest; import org.jabref.model.entry.BibEntry; import org.jabref.model.entry.Date; import org.jabref.model.entry.Month; @@ -27,114 +24,184 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; +/** + * This tests the writing to a PDF. If the creation of the RDF content should be checked, please head to {@link XmpExporterTest} + */ class XmpUtilWriterTest { - private static BibEntry olly2018; - private static BibEntry toral2006; - private static BibEntry vapnik2000; + @TempDir + private Path tempDir; + + private final BibEntry olly2018 = new BibEntry(StandardEntryType.Article) + .withCitationKey("Olly2018") + .withField(StandardField.AUTHOR, "Olly and Johannes") + .withField(StandardField.TITLE, "Stefan's palace") + .withField(StandardField.JOURNAL, "Test Journal") + .withField(StandardField.VOLUME, "1") + .withField(StandardField.NUMBER, "1") + .withField(StandardField.PAGES, "1-2") + .withMonth(Month.MARCH) + .withField(StandardField.ISSN, "978-123-123") + .withField(StandardField.NOTE, "NOTE") + .withField(StandardField.ABSTRACT, "ABSTRACT") + .withField(StandardField.COMMENT, "COMMENT") + .withField(StandardField.DOI, "10/3212.3123") + .withField(StandardField.FILE, ":article_dublinCore.pdf:PDF") + .withField(StandardField.GROUPS, "NO") + .withField(StandardField.HOWPUBLISHED, "online") + .withField(StandardField.KEYWORDS, "k1, k2") + .withField(StandardField.OWNER, "me") + .withField(StandardField.REVIEW, "review") + .withField(StandardField.URL, "https://www.olly2018.edu"); + + private final BibEntry toral2006 = new BibEntry(StandardEntryType.InProceedings) + .withField(StandardField.AUTHOR, "Antonio Toral and Rafael Munoz") + .withField(StandardField.TITLE, "A proposal to automatically build and maintain gazetteers for Named Entity Recognition by using Wikipedia") + .withField(StandardField.BOOKTITLE, "Proceedings of EACL") + .withField(StandardField.PAGES, "56--61") + .withField(StandardField.EPRINTTYPE, "asdf") + .withField(StandardField.OWNER, "Ich") + .withField(StandardField.URL, "www.url.de"); + private final BibEntry vapnik2000 = new BibEntry(StandardEntryType.Book) + .withCitationKey("vapnik2000") + .withField(StandardField.TITLE, "The Nature of Statistical Learning Theory") + .withField(StandardField.PUBLISHER, "Springer Science + Business Media") + .withField(StandardField.AUTHOR, "Vladimir N. Vapnik") + .withField(StandardField.DOI, "10.1007/978-1-4757-3264-1") + .withField(StandardField.OWNER, "Ich") + .withField(StandardField.LANGUAGE, "English, Japanese") + .withDate(new Date(2000, 5)) + .withField(new UnknownField(DC_COVERAGE), "coverageField") + .withField(new UnknownField((DC_SOURCE)), "JabRef") + .withField(new UnknownField(DC_RIGHTS), "Right To X"); private XmpPreferences xmpPreferences; - private void initBibEntries() { - olly2018 = new BibEntry(StandardEntryType.Article); - olly2018.setCitationKey("Olly2018"); - olly2018.setField(StandardField.AUTHOR, "Olly and Johannes"); - olly2018.setField(StandardField.TITLE, "Stefan's palace"); - olly2018.setField(StandardField.JOURNAL, "Test Journal"); - olly2018.setField(StandardField.VOLUME, "1"); - olly2018.setField(StandardField.NUMBER, "1"); - olly2018.setField(StandardField.PAGES, "1-2"); - olly2018.setMonth(Month.MARCH); - olly2018.setField(StandardField.ISSN, "978-123-123"); - olly2018.setField(StandardField.NOTE, "NOTE"); - olly2018.setField(StandardField.ABSTRACT, "ABSTRACT"); - olly2018.setField(StandardField.COMMENT, "COMMENT"); - olly2018.setField(StandardField.DOI, "10/3212.3123"); - olly2018.setField(StandardField.FILE, ":article_dublinCore.pdf:PDF"); - olly2018.setField(StandardField.GROUPS, "NO"); - olly2018.setField(StandardField.HOWPUBLISHED, "online"); - olly2018.setField(StandardField.KEYWORDS, "k1, k2"); - olly2018.setField(StandardField.OWNER, "me"); - olly2018.setField(StandardField.REVIEW, "review"); - olly2018.setField(StandardField.URL, "https://www.olly2018.edu"); - - toral2006 = new BibEntry(StandardEntryType.InProceedings); - toral2006.setField(StandardField.AUTHOR, "Toral, Antonio and Munoz, Rafael"); - toral2006.setField(StandardField.TITLE, "A proposal to automatically build and maintain gazetteers for Named Entity Recognition by using Wikipedia"); - toral2006.setField(StandardField.BOOKTITLE, "Proceedings of EACL"); - toral2006.setField(StandardField.PAGES, "56--61"); - toral2006.setField(StandardField.EPRINTTYPE, "asdf"); - toral2006.setField(StandardField.OWNER, "Ich"); - toral2006.setField(StandardField.URL, "www.url.de"); - - vapnik2000 = new BibEntry(StandardEntryType.Book); - vapnik2000.setCitationKey("vapnik2000"); - vapnik2000.setField(StandardField.TITLE, "The Nature of Statistical Learning Theory"); - vapnik2000.setField(StandardField.PUBLISHER, "Springer Science + Business Media"); - vapnik2000.setField(StandardField.AUTHOR, "Vladimir N. Vapnik"); - vapnik2000.setField(StandardField.DOI, "10.1007/978-1-4757-3264-1"); - vapnik2000.setField(StandardField.OWNER, "Ich"); - vapnik2000.setField(StandardField.LANGUAGE, "English, Japanese"); - vapnik2000.setDate(new Date(2000, 5)); - - vapnik2000.setField(new UnknownField(DC_COVERAGE), "coverageField"); - vapnik2000.setField(new UnknownField((DC_SOURCE)), "JabRef"); - vapnik2000.setField(new UnknownField(DC_RIGHTS), "Right To X"); - } - - /** - * Create a temporary PDF-file with a single empty page. - */ @BeforeEach void setUp() { xmpPreferences = mock(XmpPreferences.class); + when(xmpPreferences.getKeywordSeparator()).thenReturn(','); // The code assumes privacy filters to be off when(xmpPreferences.shouldUseXmpPrivacyFilter()).thenReturn(false); - - when(xmpPreferences.getKeywordSeparator()).thenReturn(','); - - this.initBibEntries(); } /** * Test for writing a PDF file with a single DublinCore metadata entry. */ - @Test - void testWriteXmp(@TempDir Path tempDir) throws IOException, TransformerException { + void singleEntryWorks(BibEntry entry) throws Exception { Path pdfFile = this.createDefaultFile("JabRef_writeSingle.pdf", tempDir); - // read a bib entry from the tests before - BibEntry entry = vapnik2000; - entry.setCitationKey("WriteXMPTest"); - entry.setId("ID4711"); + new XmpUtilWriter(xmpPreferences).writeXmp(pdfFile.toAbsolutePath(), entry, null); - // write the changed bib entry to the PDF - XmpUtilWriter.writeXmp(pdfFile.toAbsolutePath().toString(), entry, null, xmpPreferences); + List entriesWritten = new XmpUtilReader().readXmp(pdfFile, xmpPreferences); - // read entry again - List entriesWritten = XmpUtilReader.readXmp(pdfFile.toAbsolutePath().toString(), xmpPreferences); BibEntry entryWritten = entriesWritten.get(0); entryWritten.clearField(StandardField.FILE); + entry.clearField(StandardField.FILE); - // compare the two entries - assertEquals(entry, entryWritten); + assertEquals(List.of(entry), entriesWritten); + } + + @Test + void olly2018Works() throws Exception { + singleEntryWorks(olly2018); + } + + @Test + void toral2006Works() throws Exception { + singleEntryWorks(toral2006); + } + + @Test + void vapnik2000Works() throws Exception { + singleEntryWorks(vapnik2000); + } + + @Test + void testWriteTwoBibEntries(@TempDir Path tempDir) throws Exception { + Path pdfFile = this.createDefaultFile("JabRef_writeTwo.pdf", tempDir); + List entries = List.of(olly2018, toral2006); + new XmpUtilWriter(xmpPreferences).writeXmp(pdfFile.toAbsolutePath(), entries, null); + List entryList = new XmpUtilReader().readXmp(pdfFile.toAbsolutePath(), xmpPreferences); + + // the file field is not written - and the read file field contains the PDF file name + // thus, we do not need to compare + entries.forEach(entry -> entry.clearField(StandardField.FILE)); + entryList.forEach(entry -> entry.clearField(StandardField.FILE)); + + assertEquals(entries, entryList); + } + + @Test + void testWriteThreeBibEntries(@TempDir Path tempDir) throws Exception { + Path pdfFile = this.createDefaultFile("JabRef_writeThree.pdf", tempDir); + List entries = List.of(olly2018, vapnik2000, toral2006); + new XmpUtilWriter(xmpPreferences).writeXmp(pdfFile.toAbsolutePath(), entries, null); + List entryList = new XmpUtilReader().readXmp(pdfFile.toAbsolutePath(), xmpPreferences); + + // the file field is not written - and the read file field contains the PDF file name + // thus, we do not need to compare + entries.forEach(entry -> entry.clearField(StandardField.FILE)); + entryList.forEach(entry -> entry.clearField(StandardField.FILE)); + + assertEquals(entries, entryList); + } + + @Test + void proctingBracesAreRemovedAtTitle(@TempDir Path tempDir) throws Exception { + Path pdfFile = this.createDefaultFile("JabRef_writeBraces.pdf", tempDir); + BibEntry original = new BibEntry() + .withField(StandardField.TITLE, "Some {P}rotected {T}erm"); + new XmpUtilWriter(xmpPreferences).writeXmp(pdfFile.toAbsolutePath(), List.of(original), null); + List entryList = new XmpUtilReader().readXmp(pdfFile.toAbsolutePath(), xmpPreferences); + + entryList.forEach(entry -> entry.clearField(StandardField.FILE)); + + BibEntry expected = new BibEntry() + .withField(StandardField.TITLE, "Some Protected Term"); + assertEquals(List.of(expected), entryList); } - /** - * Test, which writes multiple metadata entries to a PDF and reads them again to test the size. - */ @Test - void testWriteMultipleBibEntries(@TempDir Path tempDir) throws IOException, TransformerException { - Path pdfFile = this.createDefaultFile("JabRef_writeMultiple.pdf", tempDir); + void proctingBracesAreKeptAtPages(@TempDir Path tempDir) throws Exception { + Path pdfFile = this.createDefaultFile("JabRef_writeBraces.pdf", tempDir); + BibEntry original = new BibEntry() + .withField(StandardField.PAGES, "{55}-{99}"); + new XmpUtilWriter(xmpPreferences).writeXmp(pdfFile.toAbsolutePath(), List.of(original), null); + List entryList = new XmpUtilReader().readXmp(pdfFile.toAbsolutePath(), xmpPreferences); - List entries = Arrays.asList(olly2018, vapnik2000, toral2006); + entryList.forEach(entry -> entry.clearField(StandardField.FILE)); - XmpUtilWriter.writeXmp(Path.of(pdfFile.toAbsolutePath().toString()), entries, null, xmpPreferences); + assertEquals(List.of(original), entryList); + } + + @Test + void doubleDashAtPageNumberIsKept(@TempDir Path tempDir) throws Exception { + Path pdfFile = this.createDefaultFile("JabRef_writeBraces.pdf", tempDir); + BibEntry original = new BibEntry() + .withField(StandardField.PAGES, "2--33"); + new XmpUtilWriter(xmpPreferences).writeXmp(pdfFile.toAbsolutePath(), List.of(original), null); + List entryList = new XmpUtilReader().readXmp(pdfFile.toAbsolutePath(), xmpPreferences); + + entryList.forEach(entry -> entry.clearField(StandardField.FILE)); + + assertEquals(List.of(original), entryList); + } - List entryList = XmpUtilReader.readXmp(Path.of(pdfFile.toAbsolutePath().toString()), xmpPreferences); - assertEquals(3, entryList.size()); + @Test + void singleEntry(@TempDir Path tempDir) throws Exception { + Path pdfFile = this.createDefaultFile("JabRef.pdf", tempDir); + new XmpUtilWriter(xmpPreferences).writeXmp(pdfFile.toAbsolutePath(), List.of(vapnik2000), null); + List entryList = new XmpUtilReader().readXmp(pdfFile.toAbsolutePath(), xmpPreferences); + + vapnik2000.clearField(StandardField.FILE); + entryList.forEach(entry -> entry.clearField(StandardField.FILE)); + assertEquals(List.of(vapnik2000), entryList); } - private Path createDefaultFile(String fileName, Path tempDir) throws IOException { + /** + * Creates a temporary PDF-file with a single empty page. + */ + private Path createDefaultFile(String fileName, Path tempDir) throws Exception { // create a default PDF Path pdfFile = tempDir.resolve(fileName); try (PDDocument pdf = new PDDocument()) { @@ -142,7 +209,6 @@ private Path createDefaultFile(String fileName, Path tempDir) throws IOException pdf.addPage(new PDPage()); pdf.save(pdfFile.toAbsolutePath().toString()); } - return pdfFile; } }