diff --git a/CHANGELOG.md b/CHANGELOG.md index 0f1277ee13f..ff69d86f76d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,6 +12,7 @@ Note that this project **does not** adhere to [Semantic Versioning](http://semve ### Added - We added a field showing the BibTeX/biblatex source for added and deleted entries in the "External Changes Resolver" dialog. [#9509](https://github.com/JabRef/jabref/issues/9509) +- We added a search history list in the search field's right click menu [#7906](https://github.com/JabRef/jabref/issues/7906) - We added a full text fetcher for IACR eprints. [#9651](https://github.com/JabRef/jabref/pull/9651) - We added "Attach file from URL" to right-click context menu to download and store a file with the reference library. [#9646](https://github.com/JabRef/jabref/issues/9646) - We enabled updating an existing entry with data from InspireHEP. [#9351](https://github.com/JabRef/jabref/issues/9351) @@ -23,8 +24,16 @@ Note that this project **does not** adhere to [Semantic Versioning](http://semve ### Changed - 'Get full text' now also checks the file url. [#568](https://github.com/koppor/jabref/issues/568) +- `log.txt` now contains an entry if a BibTeX entry could not be parsed. +- JabRef writes a new backup file only if there is a change. Before, JabRef created a backup upon start. [#9679](https://github.com/JabRef/jabref/pull/9679) +- We modified the `Add Group` dialog to use the most recently selected group hierarchical context. [#9141](https://github.com/JabRef/jabref/issues/9141) - We refined the 'main directory not found' error message. [#9625](https://github.com/JabRef/jabref/pull/9625) -- We modified the `Add Group` dialog to use the most recently selected group hierarchical context [#9141](https://github.com/JabRef/jabref/issues/9141) +- JabRef writes a new backup file only if there is a change. Before, JabRef created a backup upon start. [#9679](https://github.com/JabRef/jabref/pull/9679) +- Backups of libraries are not stored per JabRef version, but collected together. +- We streamlined the paths for logs and backups: The parent path fragement is always `logs` or `backups`. +- `log.txt` now contains debug messages. Debugging needs to be enabled explicitly. [#9678](https://github.com/JabRef/jabref/pull/9678) +- `log.txt` does not contain entries for non-found files during PDF indexing. [#9678](https://github.com/JabRef/jabref/pull/9678) +- We improved the Medline importer to correctly import ISO dates for `revised`. [#9536](https://github.com/JabRef/jabref/issues/9536) @@ -37,6 +46,7 @@ Note that this project **does not** adhere to [Semantic Versioning](http://semve - We fixed an issue where the command line export using `--exportMatches` flag does not create an output bib file [#9581](https://github.com/JabRef/jabref/issues/9581) - We fixed an issue where custom field in the custom entry types could not be set to mulitline [#9609](https://github.com/JabRef/jabref/issues/9609) - We fixed an issue where the Office XML exporter did not resolve BibTeX-Strings when exporting entries [forum#3741](https://discourse.jabref.org/t/exporting-bibtex-constant-strings-to-ms-office-2007-xml/3741) +- JabRef is now more relaxed when parsing field content: In case a field content ended with `\`, the combination `\}` was treated as plain `}`. [#9668](https://github.com/JabRef/jabref/issues/9668) ### Removed diff --git a/build.gradle b/build.gradle index 4ff7932d690..719814fdb0f 100644 --- a/build.gradle +++ b/build.gradle @@ -271,8 +271,6 @@ processResources { task generateSource(dependsOn: ["generateBstGrammarSource", "generateSearchGrammarSource", - "generateMedlineSource", - "generateBibtexmlSource", "generateEndnoteSource", "generateModsSource", "generateCitaviSource"]) { @@ -302,24 +300,6 @@ tasks.register("generateSearchGrammarSource", JavaExec) { args = ["-o","src-gen/main/java/org/jabref/search" , "-visitor", "-no-listener", "-package", "org.jabref.search", "$projectDir/src/main/antlr4/org/jabref/search/Search.g4"] } -task generateMedlineSource(type: XjcTask) { - group = 'JabRef' - description = "Generates java files for the medline importer." - - schemaFile = "src/main/resources/xjc/medline/medline.xsd" - outputDirectory = "src-gen/main/java" - javaPackage = "org.jabref.logic.importer.fileformat.medline" -} - -task generateBibtexmlSource(type: XjcTask) { - group = 'JabRef' - description = "Generates java files for the bibtexml importer." - - schemaFile = "src/main/resources/xjc/bibtexml/bibtexml.xsd" - outputDirectory = "src-gen/main/java/" - javaPackage = "org.jabref.logic.importer.fileformat.bibtexml" -} - task generateEndnoteSource(type: XjcTask) { group = 'JabRef' description = "Generates java files for the endnote importer." diff --git a/docs/code-howtos/logging.md b/docs/code-howtos/logging.md index 741600cd625..09b252b525e 100644 --- a/docs/code-howtos/logging.md +++ b/docs/code-howtos/logging.md @@ -5,20 +5,44 @@ parent: Code Howtos JabRef uses the logging facade [SLF4j](https://www.slf4j.org). All log messages are passed internally to [tinylog](https://tinylog.org/v2/) which handles any filtering, formatting and writing of log messages. -* Obtaining a logger for a class: +Obtaining a logger for a class: - ```java - private static final Logger LOGGER = LoggerFactory.getLogger(.class); - ``` +```java +private static final Logger LOGGER = LoggerFactory.getLogger(.class); +``` -* If the logging event is caused by an exception, please add the exception to the log message as: +Please always use `LOGGER.debug` for debugging. - ```java - catch (SomeException e) { - LOGGER.warn("Warning text.", e); - ... - } - ``` +Example: + +```java +String example = "example"; +LOGGER.debug("Some state {}", example); +``` + +Enable logging in `tinylog.properties`: + +```properties +level@org.jabref.example.ExampleClass = debug +``` + +If the logging event is caused by an exception, please add the exception to the log message as: + +```java + catch (SomeException e) { + LOGGER.warn("Warning text.", e); + ... + } +``` + +When running tests, `tinylog-test.properties` is used. +It is located under `src/test/resources`. As default, only `info` is logged. +When developing, it makes sense to use `debug` as log level. +One can change the log level per class using the pattern `level@class=debug` is set to `debug`. +In the `.properties` file, this is done for `org.jabref.model.entry.BibEntry`. + +## Further reading + +SLF4J also support parameterized logging, e.g. if you want to print out multiple arguments in a log statement use a pair of curly braces (`{}`). +Head to for examples. -* SLF4J also support parameterized logging, e.g. if you want to print out multiple arguments in a log statement use a pair of curly braces. [Examples](https://www.slf4j.org/faq.html#logging\_performance) -* When running tests, `tinylog-test.properties` is used. It is located under `src/test/resources`. As default, only `info` is logged. When developing, it makes sense to use `debug` as log level. One can change the log level per class using the pattern `level@class=debug` is set to `debug`. In the `.properties` file, this is done for `org.jabref.model.entry.BibEntry`. diff --git a/src/main/java/org/jabref/cli/Launcher.java b/src/main/java/org/jabref/cli/Launcher.java index 8e8b4e956ad..8a95d5e299e 100644 --- a/src/main/java/org/jabref/cli/Launcher.java +++ b/src/main/java/org/jabref/cli/Launcher.java @@ -24,6 +24,7 @@ import org.jabref.logic.remote.client.RemoteClient; import org.jabref.logic.shared.restserver.rest.Root; import org.jabref.logic.util.BuildInfo; +import org.jabref.logic.util.OS; import org.jabref.migrations.PreferencesMigrations; import org.jabref.model.database.BibDatabaseContext; import org.jabref.model.database.BibDatabaseMode; @@ -118,10 +119,12 @@ static void startServer() { * the log configuration programmatically anymore. */ private static void addLogToDisk() { - Path directory = Path.of(AppDirsFactory.getInstance().getUserLogDir( - "jabref", - new BuildInfo().version.toString(), - "org.jabref")); + Path directory = Path.of(AppDirsFactory.getInstance() + .getUserDataDir( + OS.APP_DIR_APP_NAME, + "logs", + OS.APP_DIR_APP_AUTHOR)) + .resolve(new BuildInfo().version.toString()); try { Files.createDirectories(directory); } catch (IOException e) { @@ -133,7 +136,7 @@ private static void addLogToDisk() { // https://tinylog.org/v2/configuration/#shared-file-writer Map configuration = Map.of( "writerFile", "shared file", - "writerFile.level", "info", + "writerFile.level", "debug", "writerFile.file", directory.resolve("log.txt").toString(), "writerFile.charset", "UTF-8"); diff --git a/src/main/java/org/jabref/gui/ClipBoardManager.java b/src/main/java/org/jabref/gui/ClipBoardManager.java index a75dd1798d9..b91d1be858b 100644 --- a/src/main/java/org/jabref/gui/ClipBoardManager.java +++ b/src/main/java/org/jabref/gui/ClipBoardManager.java @@ -87,7 +87,7 @@ public static String getContents() { return result; } - public Optional getBibTeXEntriesFromClipbaord() { + public Optional getBibTeXEntriesFromClipboard() { return Optional.ofNullable(clipboard.getContent(DragAndDropDataFormats.ENTRIES)).map(String.class::cast); } diff --git a/src/main/java/org/jabref/gui/StateManager.java b/src/main/java/org/jabref/gui/StateManager.java index d7c3a797d9c..fffab647dd0 100644 --- a/src/main/java/org/jabref/gui/StateManager.java +++ b/src/main/java/org/jabref/gui/StateManager.java @@ -71,6 +71,8 @@ public class StateManager { private final ObjectProperty lastAutomaticFieldEditorEdit = new SimpleObjectProperty<>(); + private final ObservableList searchHistory = FXCollections.observableArrayList(); + public StateManager() { activeGroups.bind(Bindings.valueAt(selectedGroups, activeDatabase.orElseOpt(null))); } @@ -212,4 +214,25 @@ public List collectAllDatabasePaths() { () -> list.add(""))); return list; } + + public void addSearchHistory(String search) { + searchHistory.remove(search); + searchHistory.add(search); + } + + public ObservableList getWholeSearchHistory() { + return searchHistory; + } + + public List getLastSearchHistory(int size) { + int sizeSearches = searchHistory.size(); + if (size < sizeSearches) { + return searchHistory.subList(sizeSearches - size, sizeSearches); + } + return searchHistory; + } + + public void clearSearchHistory() { + searchHistory.clear(); + } } diff --git a/src/main/java/org/jabref/gui/backup/BackupResolverDialog.java b/src/main/java/org/jabref/gui/backup/BackupResolverDialog.java index 64f24662ecf..bdd2fbd2d16 100644 --- a/src/main/java/org/jabref/gui/backup/BackupResolverDialog.java +++ b/src/main/java/org/jabref/gui/backup/BackupResolverDialog.java @@ -32,7 +32,7 @@ public BackupResolverDialog(Path originalPath) { getDialogPane().setMinHeight(180); getDialogPane().getButtonTypes().setAll(RESTORE_FROM_BACKUP, REVIEW_BACKUP, IGNORE_BACKUP); - Optional backupPathOpt = BackupFileUtil.getPathOfLatestExisingBackupFile(originalPath, BackupFileType.BACKUP); + Optional backupPathOpt = BackupFileUtil.getPathOfLatestExistingBackupFile(originalPath, BackupFileType.BACKUP); String backupFilename = backupPathOpt.map(Path::getFileName).map(Path::toString).orElse(Localization.lang("File not found")); String content = new StringBuilder() .append(Localization.lang("A backup file for '%0' was found at [%1]", diff --git a/src/main/java/org/jabref/gui/dialogs/BackupUIManager.java b/src/main/java/org/jabref/gui/dialogs/BackupUIManager.java index 3c0536bf4af..ceba43dd088 100644 --- a/src/main/java/org/jabref/gui/dialogs/BackupUIManager.java +++ b/src/main/java/org/jabref/gui/dialogs/BackupUIManager.java @@ -63,7 +63,7 @@ private static Optional showReviewBackupDialog(DialogService dialo // This will be modified by using the `DatabaseChangesResolverDialog`. BibDatabaseContext originalDatabase = originalParserResult.getDatabaseContext(); - Path backupPath = BackupFileUtil.getPathOfLatestExisingBackupFile(originalPath, BackupFileType.BACKUP).orElseThrow(); + Path backupPath = BackupFileUtil.getPathOfLatestExistingBackupFile(originalPath, BackupFileType.BACKUP).orElseThrow(); BibDatabaseContext backupDatabase = OpenDatabase.loadDatabase(backupPath, importFormatPreferences, new DummyFileUpdateMonitor()).getDatabaseContext(); DatabaseChangeResolverFactory changeResolverFactory = new DatabaseChangeResolverFactory(dialogService, originalDatabase, preferencesService.getBibEntryPreferences()); diff --git a/src/main/java/org/jabref/gui/edit/EditAction.java b/src/main/java/org/jabref/gui/edit/EditAction.java index c02bc2dd619..9942c6b5d29 100644 --- a/src/main/java/org/jabref/gui/edit/EditAction.java +++ b/src/main/java/org/jabref/gui/edit/EditAction.java @@ -1,5 +1,6 @@ package org.jabref.gui.edit; +import javafx.scene.control.TextField; import javafx.scene.control.TextInputControl; import javafx.scene.web.WebView; @@ -22,6 +23,7 @@ public class EditAction extends SimpleCommand { private static final Logger LOGGER = LoggerFactory.getLogger(EditAction.class); private final JabRefFrame frame; + private TextField text; private final StandardActions action; private final StateManager stateManager; @@ -52,8 +54,12 @@ public void execute() { // DELETE_ENTRY in text field should do forward delete switch (action) { case COPY -> textInput.copy(); + case UNDO -> textInput.undo(); + case REDO -> textInput.redo(); case CUT -> textInput.cut(); case PASTE -> textInput.paste(); + case DELETE -> textInput.clear(); + case SELECT_ALL -> textInput.selectAll(); case DELETE_ENTRY -> textInput.deleteNextChar(); default -> throw new IllegalStateException("Only cut/copy/paste supported in TextInputControl but got " + action); } diff --git a/src/main/java/org/jabref/gui/externalfiles/ImportHandler.java b/src/main/java/org/jabref/gui/externalfiles/ImportHandler.java index 959d45ba5e5..d2da01c37b4 100644 --- a/src/main/java/org/jabref/gui/externalfiles/ImportHandler.java +++ b/src/main/java/org/jabref/gui/externalfiles/ImportHandler.java @@ -258,7 +258,7 @@ private void generateKeys(List entries) { } public List handleBibTeXData(String entries) { - BibtexParser parser = new BibtexParser(preferencesService.getImportFormatPreferences(), Globals.getFileUpdateMonitor()); + BibtexParser parser = new BibtexParser(preferencesService.getImportFormatPreferences(), fileUpdateMonitor); try { return parser.parseEntries(new ByteArrayInputStream(entries.getBytes(StandardCharsets.UTF_8))); } catch (ParseException ex) { diff --git a/src/main/java/org/jabref/gui/maintable/MainTable.java b/src/main/java/org/jabref/gui/maintable/MainTable.java index d5951089c1d..7f1bd38c38b 100644 --- a/src/main/java/org/jabref/gui/maintable/MainTable.java +++ b/src/main/java/org/jabref/gui/maintable/MainTable.java @@ -322,9 +322,9 @@ private void clearAndSelectLast() { public void paste() { List entriesToAdd; - entriesToAdd = this.clipBoardManager.getBibTeXEntriesFromClipbaord() + entriesToAdd = this.clipBoardManager.getBibTeXEntriesFromClipboard() .map(importHandler::handleBibTeXData) - .orElseGet(this::handleNonBibteXStringData); + .orElseGet(this::handleNonBibTeXStringData); for (BibEntry entry : entriesToAdd) { importHandler.importEntryWithDuplicateCheck(database, entry); @@ -334,7 +334,7 @@ public void paste() { } } - private List handleNonBibteXStringData() { + private List handleNonBibTeXStringData() { String data = ClipBoardManager.getContents(); List entries = new ArrayList<>(); try { diff --git a/src/main/java/org/jabref/gui/search/GlobalSearchBar.java b/src/main/java/org/jabref/gui/search/GlobalSearchBar.java index d37c5b2657f..0f7cb05553b 100644 --- a/src/main/java/org/jabref/gui/search/GlobalSearchBar.java +++ b/src/main/java/org/jabref/gui/search/GlobalSearchBar.java @@ -14,6 +14,8 @@ import javafx.beans.binding.BooleanBinding; import javafx.beans.property.BooleanProperty; import javafx.beans.property.SimpleBooleanProperty; +import javafx.collections.ListChangeListener; +import javafx.collections.ObservableList; import javafx.css.PseudoClass; import javafx.event.Event; import javafx.geometry.Insets; @@ -138,6 +140,19 @@ public GlobalSearchBar(JabRefFrame frame, StateManager stateManager, Preferences } }); + searchField.setContextMenu(SearchFieldRightClickMenu.create( + keyBindingRepository, + stateManager, + searchField)); + + ObservableList search = stateManager.getWholeSearchHistory(); + search.addListener((ListChangeListener.Change change) -> { + searchField.setContextMenu(SearchFieldRightClickMenu.create( + keyBindingRepository, + stateManager, + searchField)); + }); + ClipBoardManager.addX11Support(searchField); regularExpressionButton = IconTheme.JabRefIcons.REG_EX.asToggleButton(); @@ -292,6 +307,7 @@ public void performSearch() { informUserAboutInvalidSearchQuery(); return; } + this.stateManager.addSearchHistory(searchField.textProperty().get()); stateManager.setSearchQuery(searchQuery); } diff --git a/src/main/java/org/jabref/gui/search/SearchFieldRightClickMenu.java b/src/main/java/org/jabref/gui/search/SearchFieldRightClickMenu.java new file mode 100644 index 00000000000..2e7a0156276 --- /dev/null +++ b/src/main/java/org/jabref/gui/search/SearchFieldRightClickMenu.java @@ -0,0 +1,72 @@ +package org.jabref.gui.search; + +import javafx.scene.control.ContextMenu; +import javafx.scene.control.Menu; +import javafx.scene.control.MenuItem; +import javafx.scene.control.SeparatorMenuItem; + +import org.jabref.gui.StateManager; +import org.jabref.gui.actions.ActionFactory; +import org.jabref.gui.actions.SimpleCommand; +import org.jabref.gui.actions.StandardActions; +import org.jabref.gui.edit.EditAction; +import org.jabref.gui.keyboard.KeyBindingRepository; +import org.jabref.logic.l10n.Localization; + +import org.controlsfx.control.textfield.CustomTextField; + +public class SearchFieldRightClickMenu { + public static ContextMenu create(KeyBindingRepository keyBindingRepository, + StateManager stateManager, + CustomTextField searchField) { + ActionFactory factory = new ActionFactory(keyBindingRepository); + ContextMenu contextMenu = new ContextMenu(); + + contextMenu.getItems().addAll( + factory.createMenuItem(StandardActions.UNDO, new EditAction(StandardActions.UNDO, null, stateManager)), + factory.createMenuItem(StandardActions.REDO, new EditAction(StandardActions.REDO, null, stateManager)), + factory.createMenuItem(StandardActions.CUT, new EditAction(StandardActions.CUT, null, stateManager)), + factory.createMenuItem(StandardActions.COPY, new EditAction(StandardActions.COPY, null, stateManager)), + factory.createMenuItem(StandardActions.PASTE, new EditAction(StandardActions.PASTE, null, stateManager)), + factory.createMenuItem(StandardActions.DELETE, new EditAction(StandardActions.DELETE, null, stateManager)), + + new SeparatorMenuItem(), + + factory.createMenuItem(StandardActions.SELECT_ALL, new EditAction(StandardActions.SELECT_ALL, null, stateManager)), + createSearchFromHistorySubMenu(factory, stateManager, searchField) + ); + + return contextMenu; + } + + private static Menu createSearchFromHistorySubMenu(ActionFactory factory, + StateManager stateManager, + CustomTextField searchField) { + Menu searchFromHistorySubMenu = factory.createMenu(() -> Localization.lang("Search from history...")); + + int num = stateManager.getLastSearchHistory(10).size(); + if (num == 0) { + MenuItem item = new MenuItem(Localization.lang("your search history is empty")); + searchFromHistorySubMenu.getItems().addAll(item); + } else { + for (int i = 0; i < num; i++) { + int finalI = i; + MenuItem item = factory.createMenuItem(() -> stateManager.getLastSearchHistory(10).get(finalI), new SimpleCommand() { + @Override + public void execute() { + searchField.setText(stateManager.getLastSearchHistory(10).get(finalI)); + } + }); + searchFromHistorySubMenu.getItems().addAll(item); + } + MenuItem clear = factory.createMenuItem(() -> Localization.lang("Clear history"), new SimpleCommand() { + @Override + public void execute() { + stateManager.clearSearchHistory(); + } + }); + searchFromHistorySubMenu.getItems().addAll(new SeparatorMenuItem(), clear); + } + return searchFromHistorySubMenu; + } +} diff --git a/src/main/java/org/jabref/logic/autosaveandbackup/BackupManager.java b/src/main/java/org/jabref/logic/autosaveandbackup/BackupManager.java index c88930619dd..789671c0b14 100644 --- a/src/main/java/org/jabref/logic/autosaveandbackup/BackupManager.java +++ b/src/main/java/org/jabref/logic/autosaveandbackup/BackupManager.java @@ -64,7 +64,7 @@ public class BackupManager { // During a write, the less recent backup file is deleted private final Queue backupFilesQueue = new LinkedBlockingQueue<>(); - private boolean needsBackup = true; + private boolean needsBackup = false; BackupManager(BibDatabaseContext bibDatabaseContext, BibEntryTypesManager entryTypesManager, PreferencesService preferences) { this.bibDatabaseContext = bibDatabaseContext; @@ -87,7 +87,7 @@ static Path getBackupPathForNewBackup(Path originalPath) { * Determines the most recent existing backup file name */ static Optional getLatestBackupPath(Path originalPath) { - return BackupFileUtil.getPathOfLatestExisingBackupFile(originalPath, BackupFileType.BACKUP); + return BackupFileUtil.getPathOfLatestExistingBackupFile(originalPath, BackupFileType.BACKUP); } /** diff --git a/src/main/java/org/jabref/logic/importer/fileformat/BibtexParser.java b/src/main/java/org/jabref/logic/importer/fileformat/BibtexParser.java index 20da30e16ed..5d24472dba0 100644 --- a/src/main/java/org/jabref/logic/importer/fileformat/BibtexParser.java +++ b/src/main/java/org/jabref/logic/importer/fileformat/BibtexParser.java @@ -278,10 +278,10 @@ private void parseAndAddEntry(String type) { database.insertEntry(entry); } catch (IOException ex) { - // Trying to make the parser more robust. + // This makes the parser more robust: // If an exception is thrown when parsing an entry, drop the entry and try to resume parsing. - LOGGER.debug("Could not parse entry", ex); + LOGGER.warn("Could not parse entry", ex); parserResult.addWarning(Localization.lang("Error occurred when parsing entry") + ": '" + ex.getMessage() + "'. " + "\n\n" + Localization.lang("JabRef skipped the entry.")); } @@ -290,7 +290,7 @@ private void parseAndAddEntry(String type) { private void parseJabRefComment(Map meta) { StringBuilder buffer; try { - buffer = parseBracketedTextExactly(); + buffer = parseBracketedFieldContent(); } catch (IOException e) { // if we get an IO Exception here, then we have an unbracketed comment, // which means that we should just return and the comment will be picked up as arbitrary text @@ -500,6 +500,16 @@ private int peek() throws IOException { return character; } + private char[] peekTwoCharacters() throws IOException { + char character1 = (char) read(); + char character2 = (char) read(); + unread(character2); + unread(character1); + return new char[] { + character1, character2 + }; + } + private int read() throws IOException { int character = pushbackReader.read(); @@ -635,7 +645,7 @@ private String parseFieldContent(Field field) throws IOException { // Value is a string enclosed in brackets. There can be pairs // of brackets inside a field, so we need to count the // brackets to know when the string is finished. - StringBuilder text = parseBracketedTextExactly(); + StringBuilder text = parseBracketedFieldContent(); value.append(fieldContentFormatter.format(text, field)); } else if (Character.isDigit((char) character)) { // value is a number String number = parseTextToken(); @@ -668,7 +678,6 @@ private String parseTextToken() throws IOException { int character = read(); if (character == -1) { eof = true; - return token.toString(); } @@ -886,7 +895,11 @@ private boolean isClosingBracketNext() { } } - private StringBuilder parseBracketedTextExactly() throws IOException { + /** + * This is called if a field in the form of field = {content} is parsed. + * The global variable character contains {. + */ + private StringBuilder parseBracketedFieldContent() throws IOException { StringBuilder value = new StringBuilder(); consume('{'); @@ -898,7 +911,35 @@ private StringBuilder parseBracketedTextExactly() throws IOException { while (true) { character = (char) read(); - boolean isClosingBracket = (character == '}') && (lastCharacter != '\\'); + boolean isClosingBracket = false; + if (character == '}') { + if (lastCharacter == '\\') { + // We hit `\}` + // It could be that a user has a backslash at the end of the entry, but intended to put a file path + // We want to be relaxed at that case + // First described at https://github.com/JabRef/jabref/issues/9668 + char[] nextTwoCharacters = peekTwoCharacters(); + // Check for "\},\n" - Example context: ` path = {c:\temp\},\n` + // On Windows, it could be "\},\r\n", thus we rely in OS.NEWLINE.charAt(0) (which returns '\r' or '\n'). + // In all cases, we should check for '\n' as the file could be encoded with Linux line endings on Windows. + if ((nextTwoCharacters[0] == ',') && ((nextTwoCharacters[1] == OS.NEWLINE.charAt(0)) || (nextTwoCharacters[1] == '\n'))) { + // We hit '\}\r` or `\}\n` + // Heuristics: Unwanted escaping of } + // + // Two consequences: + // + // 1. Keep `\` as read + // This is already done + // + // 2. Treat `}` as closing bracket + isClosingBracket = true; + } else { + isClosingBracket = false; + } + } else { + isClosingBracket = true; + } + } if (isClosingBracket && (brackets == 0)) { return value; diff --git a/src/main/java/org/jabref/logic/importer/fileformat/MedlineImporter.java b/src/main/java/org/jabref/logic/importer/fileformat/MedlineImporter.java index b9e9eac39d4..4522c34429f 100644 --- a/src/main/java/org/jabref/logic/importer/fileformat/MedlineImporter.java +++ b/src/main/java/org/jabref/logic/importer/fileformat/MedlineImporter.java @@ -4,7 +4,6 @@ import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; -import java.io.Serializable; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Collections; @@ -15,62 +14,24 @@ import java.util.Objects; import java.util.Optional; +import javax.xml.XMLConstants; import javax.xml.stream.XMLInputFactory; import javax.xml.stream.XMLStreamException; import javax.xml.stream.XMLStreamReader; +import javax.xml.stream.events.XMLEvent; import org.jabref.logic.importer.Importer; import org.jabref.logic.importer.ParseException; import org.jabref.logic.importer.Parser; import org.jabref.logic.importer.ParserResult; -import org.jabref.logic.importer.fileformat.medline.Abstract; -import org.jabref.logic.importer.fileformat.medline.AbstractText; -import org.jabref.logic.importer.fileformat.medline.AffiliationInfo; import org.jabref.logic.importer.fileformat.medline.ArticleId; -import org.jabref.logic.importer.fileformat.medline.ArticleIdList; -import org.jabref.logic.importer.fileformat.medline.ArticleTitle; -import org.jabref.logic.importer.fileformat.medline.Author; -import org.jabref.logic.importer.fileformat.medline.AuthorList; -import org.jabref.logic.importer.fileformat.medline.Book; -import org.jabref.logic.importer.fileformat.medline.BookDocument; -import org.jabref.logic.importer.fileformat.medline.BookTitle; -import org.jabref.logic.importer.fileformat.medline.Chemical; -import org.jabref.logic.importer.fileformat.medline.ContributionDate; -import org.jabref.logic.importer.fileformat.medline.DateCompleted; -import org.jabref.logic.importer.fileformat.medline.DateCreated; -import org.jabref.logic.importer.fileformat.medline.DateRevised; -import org.jabref.logic.importer.fileformat.medline.ELocationID; -import org.jabref.logic.importer.fileformat.medline.GeneSymbolList; -import org.jabref.logic.importer.fileformat.medline.GeneralNote; -import org.jabref.logic.importer.fileformat.medline.ISSN; import org.jabref.logic.importer.fileformat.medline.Investigator; -import org.jabref.logic.importer.fileformat.medline.InvestigatorList; -import org.jabref.logic.importer.fileformat.medline.Journal; -import org.jabref.logic.importer.fileformat.medline.JournalIssue; -import org.jabref.logic.importer.fileformat.medline.Keyword; -import org.jabref.logic.importer.fileformat.medline.KeywordList; -import org.jabref.logic.importer.fileformat.medline.MedlineCitation; -import org.jabref.logic.importer.fileformat.medline.MedlineJournalInfo; import org.jabref.logic.importer.fileformat.medline.MeshHeading; -import org.jabref.logic.importer.fileformat.medline.MeshHeadingList; -import org.jabref.logic.importer.fileformat.medline.OtherID; -import org.jabref.logic.importer.fileformat.medline.Pagination; +import org.jabref.logic.importer.fileformat.medline.OtherId; import org.jabref.logic.importer.fileformat.medline.PersonalNameSubject; -import org.jabref.logic.importer.fileformat.medline.PersonalNameSubjectList; -import org.jabref.logic.importer.fileformat.medline.PubDate; -import org.jabref.logic.importer.fileformat.medline.PublicationType; -import org.jabref.logic.importer.fileformat.medline.Publisher; -import org.jabref.logic.importer.fileformat.medline.PubmedArticle; -import org.jabref.logic.importer.fileformat.medline.PubmedArticleSet; -import org.jabref.logic.importer.fileformat.medline.PubmedBookArticle; -import org.jabref.logic.importer.fileformat.medline.PubmedBookArticleSet; -import org.jabref.logic.importer.fileformat.medline.PubmedBookData; -import org.jabref.logic.importer.fileformat.medline.QualifierName; -import org.jabref.logic.importer.fileformat.medline.Section; -import org.jabref.logic.importer.fileformat.medline.Sections; -import org.jabref.logic.importer.fileformat.medline.Text; import org.jabref.logic.util.StandardFileType; import org.jabref.model.entry.BibEntry; +import org.jabref.model.entry.Date; import org.jabref.model.entry.Month; import org.jabref.model.entry.field.Field; import org.jabref.model.entry.field.FieldFactory; @@ -80,10 +41,6 @@ import org.jabref.model.strings.StringUtil; import com.google.common.base.Joiner; -import jakarta.xml.bind.JAXBContext; -import jakarta.xml.bind.JAXBElement; -import jakarta.xml.bind.JAXBException; -import jakarta.xml.bind.Unmarshaller; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -98,7 +55,6 @@ public class MedlineImporter extends Importer implements Parser { private static final String KEYWORD_SEPARATOR = "; "; private static final Locale ENGLISH = Locale.ENGLISH; - private Unmarshaller unmarshaller; private static String join(List list, String string) { return Joiner.on(string).join(list); @@ -140,130 +96,294 @@ public boolean isRecognizedFormat(BufferedReader reader) throws IOException { } @Override - public ParserResult importDatabase(BufferedReader reader) throws IOException { - Objects.requireNonNull(reader); + public ParserResult importDatabase(BufferedReader input) throws IOException { + Objects.requireNonNull(input); List bibItems = new ArrayList<>(); try { - Object unmarshalledObject = unmarshallRoot(reader); - - // check whether we have an article set, an article, a book article or a book article set - if (unmarshalledObject instanceof PubmedArticleSet) { - PubmedArticleSet articleSet = (PubmedArticleSet) unmarshalledObject; - for (Object article : articleSet.getPubmedArticleOrPubmedBookArticle()) { - if (article instanceof PubmedArticle) { - PubmedArticle currentArticle = (PubmedArticle) article; - parseArticle(currentArticle, bibItems); - } - if (article instanceof PubmedBookArticle) { - PubmedBookArticle currentArticle = (PubmedBookArticle) article; - parseBookArticle(currentArticle, bibItems); + XMLInputFactory xmlInputFactory = XMLInputFactory.newInstance(); + + // prevent xxe (https://rules.sonarsource.com/java/RSPEC-2755) + xmlInputFactory.setProperty(XMLConstants.ACCESS_EXTERNAL_SCHEMA, ""); + // required for reading Unicode characters such as ö + xmlInputFactory.setProperty(XMLInputFactory.IS_COALESCING, true); + + XMLStreamReader reader = xmlInputFactory.createXMLStreamReader(input); + + while (reader.hasNext()) { + reader.next(); + if (isStartXMLEvent(reader)) { + String elementName = reader.getName().getLocalPart(); + switch (elementName) { + case "PubmedArticle" -> { + parseArticle(reader, bibItems, elementName); + } + case "PubmedBookArticle" -> { + parseBookArticle(reader, bibItems, elementName); + } } } - } else if (unmarshalledObject instanceof PubmedArticle) { - PubmedArticle article = (PubmedArticle) unmarshalledObject; - parseArticle(article, bibItems); - } else if (unmarshalledObject instanceof PubmedBookArticle) { - PubmedBookArticle currentArticle = (PubmedBookArticle) unmarshalledObject; - parseBookArticle(currentArticle, bibItems); - } else { - PubmedBookArticleSet bookArticleSet = (PubmedBookArticleSet) unmarshalledObject; - for (PubmedBookArticle bookArticle : bookArticleSet.getPubmedBookArticle()) { - parseBookArticle(bookArticle, bibItems); - } } - } catch (JAXBException | XMLStreamException e) { + } catch (XMLStreamException e) { LOGGER.debug("could not parse document", e); return ParserResult.fromError(e); } + return new ParserResult(bibItems); } - private Object unmarshallRoot(BufferedReader reader) throws JAXBException, XMLStreamException { - initUmarshaller(); + private void parseBookArticle(XMLStreamReader reader, List bibItems, String startElement) + throws XMLStreamException { + Map fields = new HashMap<>(); - XMLInputFactory xmlInputFactory = XMLInputFactory.newFactory(); - XMLStreamReader xmlStreamReader = xmlInputFactory.createXMLStreamReader(reader); + while (reader.hasNext()) { + reader.next(); + if (isStartXMLEvent(reader)) { + String elementName = reader.getName().getLocalPart(); + switch (elementName) { + case "BookDocument" -> { + parseBookDocument(reader, fields, elementName); + } + case "PublicationStatus" -> { + reader.next(); + if (isCharacterXMLEvent(reader)) { + putIfValueNotNull(fields, StandardField.PUBSTATE, reader.getText()); + } + } + } + } - // go to the root element - while (!xmlStreamReader.isStartElement()) { - xmlStreamReader.next(); + if (isEndXMLEvent(reader) && reader.getName().getLocalPart().equals(startElement)) { + break; + } } - return unmarshaller.unmarshal(xmlStreamReader); - } + BibEntry entry = new BibEntry(StandardEntryType.Article); + entry.setField(fields); - private void initUmarshaller() throws JAXBException { - if (unmarshaller == null) { - // Lazy init because this is expensive - JAXBContext context = JAXBContext.newInstance("org.jabref.logic.importer.fileformat.medline"); - unmarshaller = context.createUnmarshaller(); - } + bibItems.add(entry); } - private void parseBookArticle(PubmedBookArticle currentArticle, List bibItems) { - Map fields = new HashMap<>(); - if (currentArticle.getBookDocument() != null) { - BookDocument bookDocument = currentArticle.getBookDocument(); - fields.put(StandardField.PMID, bookDocument.getPMID().getContent()); - if (bookDocument.getDateRevised() != null) { - DateRevised dateRevised = bookDocument.getDateRevised(); - addDateRevised(fields, dateRevised); - } - if (bookDocument.getAbstract() != null) { - Abstract abs = bookDocument.getAbstract(); - addAbstract(fields, abs); - } - if (bookDocument.getPagination() != null) { - Pagination pagination = bookDocument.getPagination(); - addPagination(fields, pagination); - } - if (bookDocument.getSections() != null) { - ArrayList result = new ArrayList<>(); - Sections sections = bookDocument.getSections(); - for (Section section : sections.getSection()) { - for (Serializable content : section.getSectionTitle().getContent()) { - if (content instanceof String) { - result.add((String) content); + private void parseBookDocument(XMLStreamReader reader, Map fields, String startElement) + throws XMLStreamException { + // multiple occurrences of the following fields can be present + List sectionTitleList = new ArrayList<>(); + List keywordList = new ArrayList<>(); + List publicationTypeList = new ArrayList<>(); + List articleTitleList = new ArrayList<>(); + + while (reader.hasNext()) { + reader.next(); + if (isStartXMLEvent(reader)) { + String elementName = reader.getName().getLocalPart(); + switch (elementName) { + case "PMID" -> { + reader.next(); + if (isCharacterXMLEvent(reader)) { + fields.put(StandardField.PMID, reader.getText()); } } + case "DateRevised", "ContributionDate" -> { + parseDate(reader, fields, elementName); + } + case "Abstract" -> { + addAbstract(reader, fields, elementName); + } + case "Pagination" -> { + addPagination(reader, fields, elementName); + } + case "Section" -> { + parseSections(reader, sectionTitleList); + } + case "Keyword" -> { + reader.next(); + if (isCharacterXMLEvent(reader)) { + keywordList.add(reader.getText()); + } + } + case "PublicationType" -> { + reader.next(); + if (isCharacterXMLEvent(reader)) { + publicationTypeList.add(reader.getText()); + } + } + case "ArticleTitle" -> { + reader.next(); + if (isCharacterXMLEvent(reader)) { + articleTitleList.add(reader.getText()); + } + } + case "Book" -> { + parseBookInformation(reader, fields, elementName); + } } - fields.put(new UnknownField("sections"), join(result, "; ")); } - if (bookDocument.getKeywordList() != null) { - addKeyWords(fields, bookDocument.getKeywordList()); - } - if (bookDocument.getContributionDate() != null) { - addContributionDate(fields, bookDocument.getContributionDate()); + + if (isEndXMLEvent(reader) && reader.getName().getLocalPart().equals(startElement)) { + break; } - if (bookDocument.getPublicationType() != null) { - List result = new ArrayList<>(); - for (PublicationType type : bookDocument.getPublicationType()) { - if (type.getContent() != null) { - result.add(type.getContent()); + } + + // populate multiple occurrence fields + if (!sectionTitleList.isEmpty()) { + fields.put(new UnknownField("sections"), join(sectionTitleList, "; ")); + } + addKeywords(fields, keywordList); + if (!publicationTypeList.isEmpty()) { + fields.put(new UnknownField("pubtype"), join(publicationTypeList, ", ")); + } + if (!articleTitleList.isEmpty()) { + fields.put(new UnknownField("article"), join(articleTitleList, ", ")); + } + } + + private void parseBookInformation(XMLStreamReader reader, Map fields, String startElement) + throws XMLStreamException { + List isbnList = new ArrayList<>(); + List titleList = new ArrayList<>(); + + while (reader.hasNext()) { + reader.next(); + if (isStartXMLEvent(reader)) { + String elementName = reader.getName().getLocalPart(); + switch (elementName) { + case "PublisherName" -> { + reader.next(); + if (isCharacterXMLEvent(reader)) { + putIfValueNotNull(fields, StandardField.PUBLISHER, reader.getText()); + } + } + case "PublisherLocation" -> { + reader.next(); + if (isCharacterXMLEvent(reader)) { + putIfValueNotNull(fields, new UnknownField("publocation"), reader.getText()); + } + } + case "BookTitle" -> { + handleTextElement(reader, titleList, elementName); + } + case "PubDate" -> { + addPubDate(reader, fields, elementName); + } + case "AuthorList" -> { + handleAuthorList(reader, fields, elementName); + } + case "Volume" -> { + reader.next(); + if (isCharacterXMLEvent(reader)) { + putIfValueNotNull(fields, StandardField.VOLUME, reader.getText()); + } + } + case "Edition" -> { + reader.next(); + if (isCharacterXMLEvent(reader)) { + putIfValueNotNull(fields, StandardField.EDITION, reader.getText()); + } + } + case "Medium" -> { + reader.next(); + if (isCharacterXMLEvent(reader)) { + putIfValueNotNull(fields, new UnknownField("medium"), reader.getText()); + } + } + case "ReportNumber" -> { + reader.next(); + if (isCharacterXMLEvent(reader)) { + putIfValueNotNull(fields, new UnknownField("reportnumber"), reader.getText()); + } + } + case "ELocationID" -> { + String eidType = reader.getAttributeValue(null, "EIdType"); + reader.next(); + if (isCharacterXMLEvent(reader)) { + handleElocationId(fields, reader, eidType); + } + } + case "Isbn" -> { + reader.next(); + if (isCharacterXMLEvent(reader)) { + isbnList.add(reader.getText()); + } } } - fields.put(new UnknownField("pubtype"), join(result, ", ")); } - if (bookDocument.getArticleTitle() != null) { - ArticleTitle articleTitle = bookDocument.getArticleTitle(); - ArrayList titles = new ArrayList<>(); - for (Serializable content : articleTitle.getContent()) { - if (content instanceof String) { - titles.add((String) content); + + if (isEndXMLEvent(reader) && reader.getName().getLocalPart().equals(startElement)) { + break; + } + } + + if (!isbnList.isEmpty()) { + fields.put(StandardField.ISBN, join(isbnList, ", ")); + } + + if (!titleList.isEmpty()) { + putIfValueNotNull(fields, StandardField.TITLE, join(titleList, " ")); + } + } + + private void handleElocationId(Map fields, XMLStreamReader reader, String eidType) { + if (eidType.equals("doi")) { + fields.put(StandardField.DOI, reader.getText()); + } + if (eidType.equals("pii")) { + fields.put(new UnknownField("pii"), reader.getText()); + } + } + + private void parseSections(XMLStreamReader reader, List sectionTitleList) throws XMLStreamException { + int sectionLevel = 0; + + while (reader.hasNext()) { + reader.next(); + if (isStartXMLEvent(reader)) { + String elementName = reader.getName().getLocalPart(); + switch (elementName) { + case "SectionTitle" -> { + reader.next(); + if (isCharacterXMLEvent(reader) && sectionLevel == 0) { + // we only collect SectionTitles from root level Sections + sectionTitleList.add(reader.getText()); + } + } + case "Section" -> { + sectionLevel++; } } - fields.put(new UnknownField("article"), join(titles, ", ")); } - if (bookDocument.getBook() != null) { - addBookInformation(fields, bookDocument.getBook()); + + if (isEndXMLEvent(reader) && reader.getName().getLocalPart().equals("Section")) { + if (sectionLevel == 0) { + break; + } else { + sectionLevel--; + } } } + } + + private void parseArticle(XMLStreamReader reader, List bibItems, String startElement) + throws XMLStreamException { + Map fields = new HashMap<>(); + + while (reader.hasNext()) { + reader.next(); + if (isStartXMLEvent(reader)) { + String elementName = reader.getName().getLocalPart(); + switch (elementName) { + case "MedlineCitation" -> { + parseMedlineCitation(reader, fields, elementName); + } + case "PubmedData" -> { + parsePubmedData(reader, fields, elementName); + } + } + } - if (currentArticle.getPubmedBookData() != null) { - PubmedBookData bookData = currentArticle.getPubmedBookData(); - putIfValueNotNull(fields, StandardField.PUBSTATE, bookData.getPublicationStatus()); + if (isEndXMLEvent(reader) && reader.getName().getLocalPart().equals(startElement)) { + break; + } } BibEntry entry = new BibEntry(StandardEntryType.Article); @@ -272,373 +392,752 @@ private void parseBookArticle(PubmedBookArticle currentArticle, List b bibItems.add(entry); } - private void addBookInformation(Map fields, Book book) { - if (book.getPublisher() != null) { - Publisher publisher = book.getPublisher(); - putIfValueNotNull(fields, new UnknownField("publocation"), publisher.getPublisherLocation()); - putStringFromSerializableList(fields, StandardField.PUBLISHER, publisher.getPublisherName().getContent()); - } - if (book.getBookTitle() != null) { - BookTitle title = book.getBookTitle(); - putStringFromSerializableList(fields, StandardField.TITLE, title.getContent()); - } - if (book.getPubDate() != null) { - addPubDate(fields, book.getPubDate()); - } - if (book.getAuthorList() != null) { - List authorLists = book.getAuthorList(); - // authorLists size should be one - if (authorLists.size() == 1) { - for (AuthorList authorList : authorLists) { - handleAuthors(fields, authorList); + private void parsePubmedData(XMLStreamReader reader, Map fields, String startElement) + throws XMLStreamException { + String publicationStatus = ""; + List articleIdList = new ArrayList<>(); + + while (reader.hasNext()) { + reader.next(); + if (isStartXMLEvent(reader)) { + String elementName = reader.getName().getLocalPart(); + switch (elementName) { + case "PublicationStatus" -> { + reader.next(); + if (isCharacterXMLEvent(reader)) { + publicationStatus = reader.getText(); + } + } + case "ArticleId" -> { + String idType = reader.getAttributeValue(null, "IdType"); + reader.next(); + if (isCharacterXMLEvent(reader)) { + articleIdList.add(new ArticleId(idType, reader.getText())); + } + } } - } else { - LOGGER.info(String.format("Size of authorlist was %s", authorLists.size())); } - } - putIfValueNotNull(fields, StandardField.VOLUME, book.getVolume()); - putIfValueNotNull(fields, StandardField.EDITION, book.getEdition()); - putIfValueNotNull(fields, new UnknownField("medium"), book.getMedium()); - putIfValueNotNull(fields, new UnknownField("reportnumber"), book.getReportNumber()); - - if (book.getELocationID() != null) { - for (ELocationID id : book.getELocationID()) { - addElocationID(fields, id); + if (isEndXMLEvent(reader) && reader.getName().getLocalPart().equals(startElement)) { + break; } } - if (book.getIsbn() != null) { - fields.put(StandardField.ISBN, join(book.getIsbn(), ", ")); + + if (fields.get(new UnknownField("revised")) != null) { + putIfValueNotNull(fields, StandardField.PUBSTATE, publicationStatus); + if (!articleIdList.isEmpty()) { + addArticleIdList(fields, articleIdList); + } } } - private void putStringFromSerializableList(Map fields, Field field, List contentList) { - StringBuilder result = new StringBuilder(); - for (Serializable content : contentList) { - if (content instanceof String) { - result.append((String) content); + private void parseMedlineCitation(XMLStreamReader reader, Map fields, String startElement) + throws XMLStreamException { + // multiple occurrences of the following fields can be present + List citationSubsets = new ArrayList<>(); + List meshHeadingList = new ArrayList<>(); + List personalNameSubjectList = new ArrayList<>(); + List otherIdList = new ArrayList<>(); + List keywordList = new ArrayList<>(); + List spaceFlightMissionList = new ArrayList<>(); + List investigatorList = new ArrayList<>(); + List generalNoteList = new ArrayList<>(); + + String status = reader.getAttributeValue(null, "Status"); + String owner = reader.getAttributeValue(null, "Owner"); + int latestVersion = 0; + fields.put(new UnknownField("status"), status); + fields.put(StandardField.OWNER, owner); + + while (reader.hasNext()) { + reader.next(); + if (isStartXMLEvent(reader)) { + String elementName = reader.getName().getLocalPart(); + switch (elementName) { + case "DateCreated", "DateCompleted", "DateRevised" -> { + parseDate(reader, fields, elementName); + } + case "Article" -> { + parseArticleInformation(reader, fields); + } + case "PMID" -> { + String versionStr = reader.getAttributeValue(null, "Version"); + reader.next(); + if (versionStr != null) { + int version = Integer.parseInt(versionStr); + if (isCharacterXMLEvent(reader) && version > latestVersion) { + latestVersion = version; + fields.put(StandardField.PMID, reader.getText()); + } + } + } + case "MedlineJournalInfo" -> { + parseMedlineJournalInfo(reader, fields, elementName); + } + case "ChemicalList" -> { + parseChemicalList(reader, fields, elementName); + } + case "CitationSubset" -> { + reader.next(); + if (isCharacterXMLEvent(reader)) { + citationSubsets.add(reader.getText()); + } + } + case "GeneSymbolList" -> { + parseGeneSymbolList(reader, fields, elementName); + } + case "MeshHeading" -> { + parseMeshHeading(reader, meshHeadingList, elementName); + } + case "NumberOfReferences" -> { + reader.next(); + if (isCharacterXMLEvent(reader)) { + putIfValueNotNull(fields, new UnknownField("references"), reader.getText()); + } + } + case "PersonalNameSubject" -> { + parsePersonalNameSubject(reader, personalNameSubjectList, elementName); + } + case "OtherID" -> { + String otherIdSource = reader.getAttributeValue(null, "Source"); + reader.next(); + if (isCharacterXMLEvent(reader)) { + String content = reader.getText(); + otherIdList.add(new OtherId(otherIdSource, content)); + } + } + case "Keyword" -> { + reader.next(); + if (isCharacterXMLEvent(reader)) { + keywordList.add(reader.getText()); + } + } + case "SpaceFlightMission" -> { + reader.next(); + if (isCharacterXMLEvent(reader)) { + spaceFlightMissionList.add(reader.getText()); + } + } + case "Investigator" -> { + parseInvestigator(reader, investigatorList, elementName); + } + case "GeneralNote" -> { + reader.next(); + if (isCharacterXMLEvent(reader)) { + generalNoteList.add(reader.getText()); + } + } + } + } + + if (isEndXMLEvent(reader) && reader.getName().getLocalPart().equals(startElement)) { + break; } } - if (result.length() > 0) { - fields.put(field, result.toString()); + + // populate multiple occurrence fields + if (!citationSubsets.isEmpty()) { + fields.put(new UnknownField("citation-subset"), join(citationSubsets, ", ")); + } + addMeshHeading(fields, meshHeadingList); + addPersonalNames(fields, personalNameSubjectList); + addOtherId(fields, otherIdList); + addKeywords(fields, keywordList); + if (!spaceFlightMissionList.isEmpty()) { + fields.put(new UnknownField("space-flight-mission"), join(spaceFlightMissionList, ", ")); } + addInvestigators(fields, investigatorList); + addNotes(fields, generalNoteList); } - private void addContributionDate(Map fields, ContributionDate contributionDate) { - if ((contributionDate.getDay() != null) && (contributionDate.getMonth() != null) - && (contributionDate.getYear() != null)) { - String result = convertToDateFormat(contributionDate.getYear(), contributionDate.getMonth(), - contributionDate.getDay()); - fields.put(new UnknownField("contribution"), result); + private void parseInvestigator(XMLStreamReader reader, List investigatorList, String startElement) + throws XMLStreamException { + String lastName = ""; + String foreName = ""; + List affiliationList = new ArrayList<>(); + + while (reader.hasNext()) { + reader.next(); + if (isStartXMLEvent(reader)) { + String elementName = reader.getName().getLocalPart(); + switch (elementName) { + case "LastName" -> { + reader.next(); + if (isCharacterXMLEvent(reader)) { + lastName = reader.getText(); + } + } + case "ForeName" -> { + reader.next(); + if (isCharacterXMLEvent(reader)) { + foreName = reader.getText(); + } + } + case "Affiliation" -> { + reader.next(); + if (isCharacterXMLEvent(reader)) { + affiliationList.add(reader.getText()); + } + } + } + } + + if (isEndXMLEvent(reader) && reader.getName().getLocalPart().equals(startElement)) { + break; + } } - } - private String convertToDateFormat(String year, String month, String day) { - return String.format("%s-%s-%s", year, month, day); + investigatorList.add(new Investigator(lastName, foreName, affiliationList)); } - private void parseArticle(PubmedArticle article, List bibItems) { - Map fields = new HashMap<>(); + private void parsePersonalNameSubject(XMLStreamReader reader, List personalNameSubjectList, String startElement) + throws XMLStreamException { + String lastName = ""; + String foreName = ""; + + while (reader.hasNext()) { + reader.next(); + if (isStartXMLEvent(reader)) { + String elementName = reader.getName().getLocalPart(); + switch (elementName) { + case "LastName" -> { + reader.next(); + if (isCharacterXMLEvent(reader)) { + lastName = reader.getText(); + } + } + case "ForeName" -> { + reader.next(); + if (isCharacterXMLEvent(reader)) { + foreName = reader.getText(); + } + } + } + } + + if (isEndXMLEvent(reader) && reader.getName().getLocalPart().equals(startElement)) { + break; + } + } - if (article.getPubmedData() != null) { - if (article.getMedlineCitation().getDateRevised() != null) { - DateRevised dateRevised = article.getMedlineCitation().getDateRevised(); - addDateRevised(fields, dateRevised); - putIfValueNotNull(fields, StandardField.PUBSTATE, article.getPubmedData().getPublicationStatus()); - if (article.getPubmedData().getArticleIdList() != null) { - ArticleIdList articleIdList = article.getPubmedData().getArticleIdList(); - addArticleIdList(fields, articleIdList); + personalNameSubjectList.add(new PersonalNameSubject(lastName, foreName)); + } + + private void parseMeshHeading(XMLStreamReader reader, List meshHeadingList, String startElement) + throws XMLStreamException { + String descriptorName = ""; + List qualifierNames = new ArrayList<>(); + + while (reader.hasNext()) { + reader.next(); + if (isStartXMLEvent(reader)) { + String elementName = reader.getName().getLocalPart(); + switch (elementName) { + case "DescriptorName" -> { + reader.next(); + if (isCharacterXMLEvent(reader)) { + descriptorName = reader.getText(); + } + } + case "QualifierName" -> { + reader.next(); + if (isCharacterXMLEvent(reader)) { + qualifierNames.add(reader.getText()); + } + } } } + + if (isEndXMLEvent(reader) && reader.getName().getLocalPart().equals(startElement)) { + break; + } } - if (article.getMedlineCitation() != null) { - MedlineCitation medlineCitation = article.getMedlineCitation(); - fields.put(new UnknownField("status"), medlineCitation.getStatus()); - DateCreated dateCreated = medlineCitation.getDateCreated(); - if (medlineCitation.getDateCreated() != null) { - fields.put(new UnknownField("created"), - convertToDateFormat(dateCreated.getYear(), dateCreated.getMonth(), dateCreated.getDay())); + meshHeadingList.add(new MeshHeading(descriptorName, qualifierNames)); + } + + private void parseGeneSymbolList(XMLStreamReader reader, Map fields, String startElement) + throws XMLStreamException { + List geneSymbols = new ArrayList<>(); + + while (reader.hasNext()) { + reader.next(); + if (isStartXMLEvent(reader)) { + String elementName = reader.getName().getLocalPart(); + if (elementName.equals("GeneSymbol")) { + reader.next(); + if (isCharacterXMLEvent(reader)) { + geneSymbols.add(reader.getText()); + } + } } - fields.put(new UnknownField("pubmodel"), medlineCitation.getArticle().getPubModel()); - if (medlineCitation.getDateCompleted() != null) { - DateCompleted dateCompleted = medlineCitation.getDateCompleted(); - fields.put(new UnknownField("completed"), - convertToDateFormat(dateCompleted.getYear(), dateCompleted.getMonth(), dateCompleted.getDay())); + if (isEndXMLEvent(reader) && reader.getName().getLocalPart().equals(startElement)) { + break; } + } - fields.put(StandardField.PMID, medlineCitation.getPMID().getContent()); - fields.put(StandardField.OWNER, medlineCitation.getOwner()); + if (!geneSymbols.isEmpty()) { + fields.put(new UnknownField("gene-symbols"), join(geneSymbols, ", ")); + } + } - addArticleInformation(fields, medlineCitation.getArticle().getContent()); + private void parseChemicalList(XMLStreamReader reader, Map fields, String startElement) + throws XMLStreamException { + List chemicalNames = new ArrayList<>(); - MedlineJournalInfo medlineJournalInfo = medlineCitation.getMedlineJournalInfo(); - putIfValueNotNull(fields, new UnknownField("country"), medlineJournalInfo.getCountry()); - putIfValueNotNull(fields, new UnknownField("journal-abbreviation"), medlineJournalInfo.getMedlineTA()); - putIfValueNotNull(fields, new UnknownField("nlm-id"), medlineJournalInfo.getNlmUniqueID()); - putIfValueNotNull(fields, new UnknownField("issn-linking"), medlineJournalInfo.getISSNLinking()); - if (medlineCitation.getChemicalList() != null) { - if (medlineCitation.getChemicalList().getChemical() != null) { - addChemicals(fields, medlineCitation.getChemicalList().getChemical()); + while (reader.hasNext()) { + reader.next(); + if (isStartXMLEvent(reader)) { + String elementName = reader.getName().getLocalPart(); + if (elementName.equals("NameOfSubstance")) { + reader.next(); + if (isCharacterXMLEvent(reader)) { + chemicalNames.add(reader.getText()); + } } } - if (medlineCitation.getCitationSubset() != null) { - fields.put(new UnknownField("citation-subset"), join(medlineCitation.getCitationSubset(), ", ")); + + if (isEndXMLEvent(reader) && reader.getName().getLocalPart().equals(startElement)) { + break; } - if (medlineCitation.getGeneSymbolList() != null) { - addGeneSymbols(fields, medlineCitation.getGeneSymbolList()); + } + + fields.put(new UnknownField("chemicals"), join(chemicalNames, ", ")); + } + + private void parseMedlineJournalInfo(XMLStreamReader reader, Map fields, String startElement) + throws XMLStreamException { + while (reader.hasNext()) { + reader.next(); + if (isStartXMLEvent(reader)) { + String elementName = reader.getName().getLocalPart(); + switch (elementName) { + case "Country" -> { + reader.next(); + if (isCharacterXMLEvent(reader)) { + putIfValueNotNull(fields, new UnknownField("country"), reader.getText()); + } + } + case "MedlineTA" -> { + reader.next(); + if (isCharacterXMLEvent(reader)) { + putIfValueNotNull(fields, new UnknownField("journal-abbreviation"), reader.getText()); + } + } + case "NlmUniqueID" -> { + reader.next(); + if (isCharacterXMLEvent(reader)) { + putIfValueNotNull(fields, new UnknownField("nlm-id"), reader.getText()); + } + } + case "ISSNLinking" -> { + reader.next(); + if (isCharacterXMLEvent(reader)) { + putIfValueNotNull(fields, new UnknownField("issn-linking"), reader.getText()); + } + } + } } - if (medlineCitation.getMeshHeadingList() != null) { - addMeashHeading(fields, medlineCitation.getMeshHeadingList()); + + if (isEndXMLEvent(reader) && reader.getName().getLocalPart().equals(startElement)) { + break; } - putIfValueNotNull(fields, new UnknownField("references"), medlineCitation.getNumberOfReferences()); - if (medlineCitation.getPersonalNameSubjectList() != null) { - addPersonalNames(fields, medlineCitation.getPersonalNameSubjectList()); + } + } + + private void parseArticleInformation(XMLStreamReader reader, Map fields) throws XMLStreamException { + List titleList = new ArrayList<>(); + String pubmodel = reader.getAttributeValue(null, "PubModel"); + fields.put(new UnknownField("pubmodel"), pubmodel); + + while (reader.hasNext()) { + reader.next(); + if (isStartXMLEvent(reader)) { + String elementName = reader.getName().getLocalPart(); + switch (elementName) { + case "Journal" -> { + parseJournal(reader, fields); + } + case "ArticleTitle" -> { + handleTextElement(reader, titleList, elementName); + } + case "Pagination" -> { + addPagination(reader, fields, elementName); + } + case "ELocationID" -> { + String eidType = reader.getAttributeValue(null, "EIdType"); + String validYN = reader.getAttributeValue(null, "ValidYN"); + reader.next(); + if (isCharacterXMLEvent(reader) && "Y".equals(validYN)) { + handleElocationId(fields, reader, eidType); + } + } + case "Abstract" -> { + addAbstract(reader, fields, elementName); + } + case "AuthorList" -> { + handleAuthorList(reader, fields, elementName); + } + } } - if (medlineCitation.getOtherID() != null) { - addOtherId(fields, medlineCitation.getOtherID()); + + if (isEndXMLEvent(reader) && reader.getName().getLocalPart().equals("Article")) { + break; } - if (medlineCitation.getKeywordList() != null) { - addKeyWords(fields, medlineCitation.getKeywordList()); + } + + if (!titleList.isEmpty()) { + fields.put(StandardField.TITLE, StringUtil.stripBrackets(join(titleList, " "))); + } + } + + private void parseJournal(XMLStreamReader reader, Map fields) throws XMLStreamException { + while (reader.hasNext()) { + reader.next(); + if (isStartXMLEvent(reader)) { + String elementName = reader.getName().getLocalPart(); + switch (elementName) { + case "Title" -> { + reader.next(); + if (isCharacterXMLEvent(reader)) { + putIfValueNotNull(fields, StandardField.JOURNAL, reader.getText()); + } + } + case "ISSN" -> { + reader.next(); + if (isCharacterXMLEvent(reader)) { + putIfValueNotNull(fields, StandardField.ISSN, reader.getText()); + } + } + case "Volume" -> { + reader.next(); + if (isCharacterXMLEvent(reader)) { + putIfValueNotNull(fields, StandardField.VOLUME, reader.getText()); + } + } + case "Issue" -> { + reader.next(); + if (isCharacterXMLEvent(reader)) { + putIfValueNotNull(fields, StandardField.ISSUE, reader.getText()); + } + } + case "PubDate" -> { + addPubDate(reader, fields, elementName); + } + } } - if (medlineCitation.getSpaceFlightMission() != null) { - fields.put(new UnknownField("space-flight-mission"), join(medlineCitation.getSpaceFlightMission(), ", ")); + + if (isEndXMLEvent(reader) && reader.getName().getLocalPart().equals("Journal")) { + break; } - if (medlineCitation.getInvestigatorList() != null) { - addInvestigators(fields, medlineCitation.getInvestigatorList()); + } + } + + private void parseDate(XMLStreamReader reader, Map fields, String startElement) + throws XMLStreamException { + Optional year = Optional.empty(); + Optional month = Optional.empty(); + Optional day = Optional.empty(); + + // mapping from date XML element to field name + Map dateFieldMap = Map.of( + "DateCreated", "created", + "DateCompleted", "completed", + "DateRevised", "revised", + "ContributionDate", "contribution", + "PubDate", "" + ); + + while (reader.hasNext()) { + reader.next(); + if (isStartXMLEvent(reader)) { + String elementName = reader.getName().getLocalPart(); + switch (elementName) { + case "Year" -> { + reader.next(); + if (isCharacterXMLEvent(reader)) { + year = Optional.of(reader.getText()); + } + } + case "Month" -> { + reader.next(); + if (isCharacterXMLEvent(reader)) { + month = Optional.of(reader.getText()); + } + } + case "Day" -> { + reader.next(); + if (isCharacterXMLEvent(reader)) { + day = Optional.of(reader.getText()); + } + } + } } - if (medlineCitation.getGeneralNote() != null) { - addNotes(fields, medlineCitation.getGeneralNote()); + + if (isEndXMLEvent(reader) && reader.getName().getLocalPart().equals(startElement)) { + break; } } - BibEntry entry = new BibEntry(StandardEntryType.Article); - entry.setField(fields); - - bibItems.add(entry); + Optional date = Date.parse(year, month, day); + date.ifPresent(dateValue -> + fields.put(new UnknownField(dateFieldMap.get(startElement)), dateValue.getNormalized())); } - private void addArticleIdList(Map fields, ArticleIdList articleIdList) { - for (ArticleId id : articleIdList.getArticleId()) { - if (id.getIdType() != null) { - if ("pubmed".equals(id.getIdType())) { - fields.put(StandardField.PMID, id.getContent()); + private void addArticleIdList(Map fields, List articleIdList) { + for (ArticleId id : articleIdList) { + if (!id.idType().isBlank()) { + if ("pubmed".equals(id.idType())) { + fields.computeIfAbsent(StandardField.PMID, k -> id.content()); } else { - fields.put(FieldFactory.parseField(StandardEntryType.Article, id.getIdType()), id.getContent()); + fields.computeIfAbsent(FieldFactory.parseField(StandardEntryType.Article, id.idType()), k -> id.content()); } } } } - private void addNotes(Map fields, List generalNote) { + private void addNotes(Map fields, List generalNoteList) { List notes = new ArrayList<>(); - for (GeneralNote note : generalNote) { - if (note != null) { - notes.add(note.getContent()); + + for (String note : generalNoteList) { + if (!note.isBlank()) { + notes.add(note); } } - fields.put(StandardField.NOTE, join(notes, ", ")); + + if (!notes.isEmpty()) { + fields.put(StandardField.NOTE, join(notes, ", ")); + } } - private void addInvestigators(Map fields, InvestigatorList investigatorList) { + private void addInvestigators(Map fields, List investigatorList) { List investigatorNames = new ArrayList<>(); List affiliationInfos = new ArrayList<>(); - String name; + // add the investigators like the authors - if (investigatorList.getInvestigator() != null) { - List investigators = investigatorList.getInvestigator(); - for (Investigator investigator : investigators) { - name = investigator.getLastName(); - if (investigator.getForeName() != null) { - name += ", " + investigator.getForeName(); + if (!investigatorList.isEmpty()) { + for (Investigator investigator : investigatorList) { + StringBuilder result = new StringBuilder(investigator.lastName()); + if (!investigator.foreName().isBlank()) { + result.append(", ").append(investigator.foreName()); } - investigatorNames.add(name); + investigatorNames.add(result.toString()); // now add the affiliation info - if (investigator.getAffiliationInfo() != null) { - for (AffiliationInfo info : investigator.getAffiliationInfo()) { - for (Serializable affiliation : info.getAffiliation().getContent()) { - if (affiliation instanceof String) { - affiliationInfos.add((String) affiliation); - } - } - } - fields.put(new UnknownField("affiliation"), join(affiliationInfos, ", ")); + if (!investigator.affiliationList().isEmpty()) { + affiliationInfos.addAll(investigator.affiliationList()); } } + + if (!affiliationInfos.isEmpty()) { + fields.put(new UnknownField("affiliation"), join(affiliationInfos, ", ")); + } + fields.put(new UnknownField("investigator"), join(investigatorNames, " and ")); } } - private void addKeyWords(Map fields, List allKeywordLists) { - List keywordStrings = new ArrayList<>(); - // add keywords to the list - for (KeywordList keywordList : allKeywordLists) { - for (Keyword keyword : keywordList.getKeyword()) { - for (Serializable content : keyword.getContent()) { - if (content instanceof String) { - keywordStrings.add((String) content); - } - } - } - } - // Check whether MeshHeadingList exist or not + private void addKeywords(Map fields, List keywordList) { + // Check whether MeshHeadingList exists or not if (fields.get(StandardField.KEYWORDS) == null) { - fields.put(StandardField.KEYWORDS, join(keywordStrings, KEYWORD_SEPARATOR)); + fields.put(StandardField.KEYWORDS, join(keywordList, KEYWORD_SEPARATOR)); } else { - if (keywordStrings.size() > 0) { + if (!keywordList.isEmpty()) { // if it exists, combine the MeshHeading with the keywords - String result = join(keywordStrings, "; "); + String result = join(keywordList, "; "); result = fields.get(StandardField.KEYWORDS) + KEYWORD_SEPARATOR + result; fields.put(StandardField.KEYWORDS, result); } } } - private void addOtherId(Map fields, List otherID) { - for (OtherID id : otherID) { - if ((id.getSource() != null) && (id.getContent() != null)) { - fields.put(FieldFactory.parseField(StandardEntryType.Article, id.getSource()), id.getContent()); + private void addOtherId(Map fields, List otherIdList) { + for (OtherId id : otherIdList) { + if (!id.source().isBlank() && !id.content().isBlank()) { + fields.put(FieldFactory.parseField(StandardEntryType.Article, id.source()), id.content()); } } } - private void addPersonalNames(Map fields, PersonalNameSubjectList personalNameSubjectList) { + private void addPersonalNames(Map fields, List personalNameSubjectList) { if (fields.get(StandardField.AUTHOR) == null) { // if no authors appear, then add the personal names as authors List personalNames = new ArrayList<>(); - if (personalNameSubjectList.getPersonalNameSubject() != null) { - List personalNameSubject = personalNameSubjectList.getPersonalNameSubject(); - for (PersonalNameSubject personalName : personalNameSubject) { - String name = personalName.getLastName(); - if (personalName.getForeName() != null) { - name += ", " + personalName.getForeName(); - } - personalNames.add(name); + + if (!personalNameSubjectList.isEmpty()) { + for (PersonalNameSubject personalNameSubject : personalNameSubjectList) { + StringBuilder result = new StringBuilder(personalNameSubject.lastName()); + if (!personalNameSubject.foreName().isBlank()) { + result.append(", ").append(personalNameSubject.foreName()); + } + personalNames.add(result.toString()); } + fields.put(StandardField.AUTHOR, join(personalNames, " and ")); } } } - private void addMeashHeading(Map fields, MeshHeadingList meshHeadingList) { - ArrayList keywords = new ArrayList<>(); - for (MeshHeading keyword : meshHeadingList.getMeshHeading()) { - StringBuilder result = new StringBuilder(keyword.getDescriptorName().getContent()); - if (keyword.getQualifierName() != null) { - for (QualifierName qualifier : keyword.getQualifierName()) { - result.append(", ").append(qualifier.getContent()); + private void addMeshHeading(Map fields, List meshHeadingList) { + List keywords = new ArrayList<>(); + + if (!meshHeadingList.isEmpty()) { + for (MeshHeading meshHeading : meshHeadingList) { + StringBuilder result = new StringBuilder(meshHeading.descriptorName()); + if (meshHeading.qualifierNames() != null) { + for (String qualifierName : meshHeading.qualifierNames()) { + result.append(", ").append(qualifierName); + } } + keywords.add(result.toString()); } - keywords.add(result.toString()); + + fields.put(StandardField.KEYWORDS, join(keywords, KEYWORD_SEPARATOR)); } - fields.put(StandardField.KEYWORDS, join(keywords, KEYWORD_SEPARATOR)); } - private void addGeneSymbols(Map fields, GeneSymbolList geneSymbolList) { - List geneSymbols = geneSymbolList.getGeneSymbol(); - fields.put(new UnknownField("gene-symbols"), join(geneSymbols, ", ")); - } + private void addPubDate(XMLStreamReader reader, Map fields, String startElement) throws XMLStreamException { + while (reader.hasNext()) { + reader.next(); + if (isStartXMLEvent(reader)) { + String elementName = reader.getName().getLocalPart(); + switch (elementName) { + case "MedlineDate" -> { + reader.next(); + if (isCharacterXMLEvent(reader)) { + fields.put(StandardField.YEAR, extractYear(reader.getText())); + } + } + case "Year" -> { + reader.next(); + if (isCharacterXMLEvent(reader)) { + fields.put(StandardField.YEAR, reader.getText()); + } + } + case "Month" -> { + reader.next(); + if (isCharacterXMLEvent(reader)) { + Optional month = Month.parse(reader.getText()); + month.ifPresent(monthValue -> fields.put(StandardField.MONTH, monthValue.getJabRefFormat())); + } + } + case "Season" -> { + reader.next(); + if (isCharacterXMLEvent(reader)) { + fields.put(new UnknownField("season"), reader.getText()); + } + } + } + } - private void addChemicals(Map fields, List chemicals) { - List chemicalNames = new ArrayList<>(); - for (Chemical chemical : chemicals) { - if (chemical != null) { - chemicalNames.add(chemical.getNameOfSubstance().getContent()); + if (isEndXMLEvent(reader) && reader.getName().getLocalPart().equals(startElement)) { + break; } } - fields.put(new UnknownField("chemicals"), join(chemicalNames, ", ")); } - private void addArticleInformation(Map fields, List content) { - for (Object object : content) { - if (object instanceof Journal) { - Journal journal = (Journal) object; - putIfValueNotNull(fields, StandardField.JOURNAL, journal.getTitle()); - - ISSN issn = journal.getISSN(); - if (issn != null) { - putIfValueNotNull(fields, StandardField.ISSN, issn.getContent()); + private void addAbstract(XMLStreamReader reader, Map fields, String startElement) + throws XMLStreamException { + List abstractTextList = new ArrayList<>(); + + while (reader.hasNext()) { + reader.next(); + if (isStartXMLEvent(reader)) { + String elementName = reader.getName().getLocalPart(); + switch (elementName) { + case "CopyrightInformation" -> { + reader.next(); + if (isCharacterXMLEvent(reader)) { + putIfValueNotNull(fields, new UnknownField("copyright"), reader.getText()); + } + } + case "AbstractText" -> { + handleTextElement(reader, abstractTextList, elementName); + } } + } - JournalIssue journalIssue = journal.getJournalIssue(); - putIfValueNotNull(fields, StandardField.VOLUME, journalIssue.getVolume()); - putIfValueNotNull(fields, StandardField.ISSUE, journalIssue.getIssue()); - - addPubDate(fields, journalIssue.getPubDate()); - } else if (object instanceof ArticleTitle) { - ArticleTitle articleTitle = (ArticleTitle) object; - fields.put(StandardField.TITLE, StringUtil.stripBrackets(articleTitle.getContent().toString())); - } else if (object instanceof Pagination) { - Pagination pagination = (Pagination) object; - addPagination(fields, pagination); - } else if (object instanceof ELocationID) { - ELocationID eLocationID = (ELocationID) object; - addElocationID(fields, eLocationID); - } else if (object instanceof Abstract) { - Abstract abs = (Abstract) object; - addAbstract(fields, abs); - } else if (object instanceof AuthorList) { - AuthorList authors = (AuthorList) object; - handleAuthors(fields, authors); + if (isEndXMLEvent(reader) && reader.getName().getLocalPart().equals(startElement)) { + break; } } - } - private void addElocationID(Map fields, ELocationID eLocationID) { - if (eLocationID.getEIdType().equals("doi")) { - fields.put(StandardField.DOI, eLocationID.getContent()); - } - if (eLocationID.getEIdType().equals("pii")) { - fields.put(new UnknownField("pii"), eLocationID.getContent()); + if (!abstractTextList.isEmpty()) { + fields.put(StandardField.ABSTRACT, join(abstractTextList, " ")); } } - private void addPubDate(Map fields, PubDate pubDate) { - if (pubDate.getYear() == null) { - // if year of the pubdate is null, the medlineDate shouldn't be null - fields.put(StandardField.YEAR, extractYear(pubDate.getMedlineDate())); - } else { - fields.put(StandardField.YEAR, pubDate.getYear()); - if (pubDate.getMonth() != null) { - Optional month = Month.parse(pubDate.getMonth()); - if (month.isPresent()) { - fields.put(StandardField.MONTH, month.get().getJabRefFormat()); + /** + * Handles text entities that can have inner tags such as {@literal <}i{@literal >}, {@literal <}b{@literal >} etc. + * We ignore the tags and return only the characters present in the enclosing parent element. + * + */ + private void handleTextElement(XMLStreamReader reader, List textList, String startElement) + throws XMLStreamException { + StringBuilder result = new StringBuilder(); + + while (reader.hasNext()) { + reader.next(); + if (isStartXMLEvent(reader)) { + String elementName = reader.getName().getLocalPart(); + switch (elementName) { + case "sup", "sub" -> { + reader.next(); + if (isCharacterXMLEvent(reader)) { + result.append("(").append(reader.getText()).append(")"); + } + } } - } else if (pubDate.getSeason() != null) { - fields.put(new UnknownField("season"), pubDate.getSeason()); + } else if (isCharacterXMLEvent(reader)) { + result.append(reader.getText().trim()).append(" "); } - } - } - private void addAbstract(Map fields, Abstract abs) { - putIfValueNotNull(fields, new UnknownField("copyright"), abs.getCopyrightInformation()); - List abstractText = new ArrayList<>(); - for (AbstractText text : abs.getAbstractText()) { - for (Serializable textContent : text.getContent()) { - if (textContent instanceof String) { - abstractText.add((String) textContent); - } + if (isEndXMLEvent(reader) && reader.getName().getLocalPart().equals(startElement)) { + break; } } - fields.put(StandardField.ABSTRACT, join(abstractText, " ")); + + textList.add(result.toString().trim()); } - private void addPagination(Map fields, Pagination pagination) { + private void addPagination(XMLStreamReader reader, Map fields, String startElement) + throws XMLStreamException { String startPage = ""; String endPage = ""; - for (JAXBElement element : pagination.getContent()) { - if ("MedlinePgn".equals(element.getName().getLocalPart())) { - putIfValueNotNull(fields, StandardField.PAGES, fixPageRange(element.getValue())); - } else if ("StartPage".equals(element.getName().getLocalPart())) { - // it could happen, that the article has only a start page - startPage = element.getValue() + endPage; - putIfValueNotNull(fields, StandardField.PAGES, startPage); - } else if ("EndPage".equals(element.getName().getLocalPart())) { - endPage = element.getValue(); - // but it should not happen, that a endpage appears without startpage - fields.put(StandardField.PAGES, fixPageRange(startPage + "-" + endPage)); + + while (reader.hasNext()) { + reader.next(); + if (isStartXMLEvent(reader)) { + String elementName = reader.getName().getLocalPart(); + switch (elementName) { + case "MedlinePgn" -> { + reader.next(); + if (isCharacterXMLEvent(reader)) { + putIfValueNotNull(fields, StandardField.PAGES, fixPageRange(reader.getText())); + } + } + case "StartPage" -> { + reader.next(); + if (isCharacterXMLEvent(reader)) { + // it could happen, that the article has only a start page + startPage = reader.getText() + endPage; + putIfValueNotNull(fields, StandardField.PAGES, startPage); + } + } + case "EndPage" -> { + reader.next(); + if (isCharacterXMLEvent(reader)) { + endPage = reader.getText(); + // but it should not happen, that a endpage appears without startpage + fields.put(StandardField.PAGES, fixPageRange(startPage + "-" + endPage)); + } + } + } + } + + if (isEndXMLEvent(reader) && reader.getName().getLocalPart().equals(startElement)) { + break; } } } @@ -648,31 +1147,68 @@ private String extractYear(String medlineDate) { return medlineDate.substring(0, 4); } - private void handleAuthors(Map fields, AuthorList authors) { + private void handleAuthorList(XMLStreamReader reader, Map fields, String startElement) throws XMLStreamException { List authorNames = new ArrayList<>(); - for (Author author : authors.getAuthor()) { - if (author.getCollectiveName() != null) { - Text collectiveNames = author.getCollectiveName(); - for (Serializable content : collectiveNames.getContent()) { - if (content instanceof String) { - authorNames.add((String) content); + + while (reader.hasNext()) { + reader.next(); + if (isStartXMLEvent(reader)) { + String elementName = reader.getName().getLocalPart(); + switch (elementName) { + case "Author" -> { + parseAuthor(reader, authorNames); } } - } else { - String authorName = author.getLastName(); - if (author.getForeName() != null) { - authorName += ", " + author.getForeName(); - } - authorNames.add(authorName); + } + + if (isEndXMLEvent(reader) && reader.getName().getLocalPart().equals(startElement)) { + break; } } + fields.put(StandardField.AUTHOR, join(authorNames, " and ")); } - private void addDateRevised(Map fields, DateRevised dateRevised) { - if ((dateRevised.getDay() != null) && (dateRevised.getMonth() != null) && (dateRevised.getYear() != null)) { - fields.put(new UnknownField("revised"), - convertToDateFormat(dateRevised.getYear(), dateRevised.getMonth(), dateRevised.getDay())); + private void parseAuthor(XMLStreamReader reader, List authorNames) throws XMLStreamException { + StringBuilder authorName = new StringBuilder(); + List collectiveNames = new ArrayList<>(); + + while (reader.hasNext()) { + reader.next(); + if (isStartXMLEvent(reader)) { + String elementName = reader.getName().getLocalPart(); + switch (elementName) { + case "CollectiveName" -> { + reader.next(); + if (isCharacterXMLEvent(reader)) { + collectiveNames.add(reader.getText()); + } + } + case "LastName" -> { + reader.next(); + if (isCharacterXMLEvent(reader)) { + authorName = new StringBuilder(reader.getText()); + } + } + case "ForeName" -> { + reader.next(); + if (isCharacterXMLEvent(reader)) { + authorName.append(", ").append(reader.getText()); + } + } + } + } + + if (isEndXMLEvent(reader) && reader.getName().getLocalPart().equals("Author")) { + break; + } + } + + if (collectiveNames.size() > 0) { + authorNames.addAll(collectiveNames); + } + if (!authorName.toString().isBlank()) { + authorNames.add(authorName.toString()); } } @@ -683,7 +1219,8 @@ private void putIfValueNotNull(Map fields, Field field, String va } /** - * Convert medline page ranges from short form to full form. Medline reports page ranges in a shorthand format. The last page is reported using only the digits which differ from the first page. i.e. 12345-51 refers to the actual range 12345-12351 + * Convert medline page ranges from short form to full form. Medline reports page ranges in a shorthand format. + * The last page is reported using only the digits which differ from the first page. i.e. 12345-51 refers to the actual range 12345-12351 */ private String fixPageRange(String pageRange) { int minusPos = pageRange.indexOf('-'); @@ -700,6 +1237,18 @@ private String fixPageRange(String pageRange) { return startPage + "--" + endPage; } + private boolean isCharacterXMLEvent(XMLStreamReader reader) { + return reader.getEventType() == XMLEvent.CHARACTERS; + } + + private boolean isStartXMLEvent(XMLStreamReader reader) { + return reader.getEventType() == XMLEvent.START_ELEMENT; + } + + private boolean isEndXMLEvent(XMLStreamReader reader) { + return reader.getEventType() == XMLEvent.END_ELEMENT; + } + @Override public List parseEntries(InputStream inputStream) throws ParseException { try { diff --git a/src/main/java/org/jabref/logic/importer/fileformat/medline/ArticleId.java b/src/main/java/org/jabref/logic/importer/fileformat/medline/ArticleId.java new file mode 100644 index 00000000000..3a8be1b9b63 --- /dev/null +++ b/src/main/java/org/jabref/logic/importer/fileformat/medline/ArticleId.java @@ -0,0 +1,7 @@ +package org.jabref.logic.importer.fileformat.medline; + +public record ArticleId( + String idType, + String content +) { +} diff --git a/src/main/java/org/jabref/logic/importer/fileformat/medline/Investigator.java b/src/main/java/org/jabref/logic/importer/fileformat/medline/Investigator.java new file mode 100644 index 00000000000..64ea31e6206 --- /dev/null +++ b/src/main/java/org/jabref/logic/importer/fileformat/medline/Investigator.java @@ -0,0 +1,10 @@ +package org.jabref.logic.importer.fileformat.medline; + +import java.util.List; + +public record Investigator( + String lastName, + String foreName, + List affiliationList +) { +} diff --git a/src/main/java/org/jabref/logic/importer/fileformat/medline/MeshHeading.java b/src/main/java/org/jabref/logic/importer/fileformat/medline/MeshHeading.java new file mode 100644 index 00000000000..a78f65f9727 --- /dev/null +++ b/src/main/java/org/jabref/logic/importer/fileformat/medline/MeshHeading.java @@ -0,0 +1,9 @@ +package org.jabref.logic.importer.fileformat.medline; + +import java.util.List; + +public record MeshHeading( + String descriptorName, + List qualifierNames +) { +} diff --git a/src/main/java/org/jabref/logic/importer/fileformat/medline/OtherId.java b/src/main/java/org/jabref/logic/importer/fileformat/medline/OtherId.java new file mode 100644 index 00000000000..4429436c332 --- /dev/null +++ b/src/main/java/org/jabref/logic/importer/fileformat/medline/OtherId.java @@ -0,0 +1,7 @@ +package org.jabref.logic.importer.fileformat.medline; + +public record OtherId( + String source, + String content +) { +} diff --git a/src/main/java/org/jabref/logic/importer/fileformat/medline/PersonalNameSubject.java b/src/main/java/org/jabref/logic/importer/fileformat/medline/PersonalNameSubject.java new file mode 100644 index 00000000000..bda9c6aefff --- /dev/null +++ b/src/main/java/org/jabref/logic/importer/fileformat/medline/PersonalNameSubject.java @@ -0,0 +1,7 @@ +package org.jabref.logic.importer.fileformat.medline; + +public record PersonalNameSubject( + String lastName, + String foreName +) { +} diff --git a/src/main/java/org/jabref/logic/pdf/search/indexing/PdfIndexer.java b/src/main/java/org/jabref/logic/pdf/search/indexing/PdfIndexer.java index 975cf83b1ec..4fea4b638a5 100644 --- a/src/main/java/org/jabref/logic/pdf/search/indexing/PdfIndexer.java +++ b/src/main/java/org/jabref/logic/pdf/search/indexing/PdfIndexer.java @@ -188,7 +188,7 @@ private void writeToIndex(BibEntry entry, LinkedFile linkedFile) { } Optional resolvedPath = linkedFile.findIn(databaseContext, filePreferences); if (resolvedPath.isEmpty()) { - LOGGER.warn("Could not find {}", linkedFile.getLink()); + LOGGER.debug("Could not find {}", linkedFile.getLink()); return; } try { diff --git a/src/main/java/org/jabref/logic/util/OS.java b/src/main/java/org/jabref/logic/util/OS.java index fabbadbd2c9..52a29bcdedc 100644 --- a/src/main/java/org/jabref/logic/util/OS.java +++ b/src/main/java/org/jabref/logic/util/OS.java @@ -8,7 +8,7 @@ public class OS { public static final String NEWLINE = System.lineSeparator(); - public static final String APP_DIR_APP_NAME = "JabRef"; + public static final String APP_DIR_APP_NAME = "jabref"; public static final String APP_DIR_APP_AUTHOR = "org.jabref"; // File separator obtained from system diff --git a/src/main/java/org/jabref/logic/util/io/BackupFileUtil.java b/src/main/java/org/jabref/logic/util/io/BackupFileUtil.java index f669f2ee567..c378386c1ed 100644 --- a/src/main/java/org/jabref/logic/util/io/BackupFileUtil.java +++ b/src/main/java/org/jabref/logic/util/io/BackupFileUtil.java @@ -10,7 +10,6 @@ import java.util.Optional; import org.jabref.logic.util.BackupFileType; -import org.jabref.logic.util.BuildInfo; import org.jabref.logic.util.OS; import net.harawata.appdirs.AppDirsFactory; @@ -25,11 +24,11 @@ private BackupFileUtil() { } public static Path getAppDataBackupDir() { - Path directory = Path.of(AppDirsFactory.getInstance().getUserDataDir( - OS.APP_DIR_APP_NAME, - new BuildInfo().version.toString(), - OS.APP_DIR_APP_AUTHOR)) - .resolve("backups"); + Path directory = Path.of(AppDirsFactory.getInstance() + .getUserDataDir( + OS.APP_DIR_APP_NAME, + "backups", + OS.APP_DIR_APP_AUTHOR)); return directory; } @@ -72,7 +71,7 @@ public static Path getPathForNewBackupFileAndCreateDirectory(Path targetFile, Ba * * @param targetFile the full path of the file to backup */ - public static Optional getPathOfLatestExisingBackupFile(Path targetFile, BackupFileType fileType) { + public static Optional getPathOfLatestExistingBackupFile(Path targetFile, BackupFileType fileType) { // The code is similar to "getPathForNewBackupFileAndCreateDirectory" String extension = "." + fileType.getExtensions().get(0); diff --git a/src/main/java/org/jabref/model/database/BibDatabaseContext.java b/src/main/java/org/jabref/model/database/BibDatabaseContext.java index 436f2b070c7..da736d6e974 100644 --- a/src/main/java/org/jabref/model/database/BibDatabaseContext.java +++ b/src/main/java/org/jabref/model/database/BibDatabaseContext.java @@ -248,13 +248,17 @@ public static Path getFulltextIndexBasePath() { public Path getFulltextIndexPath() { Path appData = getFulltextIndexBasePath(); + Path indexPath; if (getDatabasePath().isPresent()) { - LOGGER.info("Index path for {} is {}", getDatabasePath().get(), appData); - return appData.resolve(String.valueOf(this.getDatabasePath().get().hashCode())); + indexPath = appData.resolve(String.valueOf(this.getDatabasePath().get().hashCode())); + LOGGER.debug("Index path for {} is {}", getDatabasePath().get(), indexPath); + return indexPath; } - return appData.resolve("unsaved"); + indexPath = appData.resolve("unsaved"); + LOGGER.debug("Using index for unsaved database: {}", indexPath); + return indexPath; } @Override diff --git a/src/main/java/org/jabref/preferences/JabRefPreferences.java b/src/main/java/org/jabref/preferences/JabRefPreferences.java index f3202faa549..4781e359d7d 100644 --- a/src/main/java/org/jabref/preferences/JabRefPreferences.java +++ b/src/main/java/org/jabref/preferences/JabRefPreferences.java @@ -530,7 +530,10 @@ private JabRefPreferences() { // SSL defaults.put(TRUSTSTORE_PATH, Path.of(AppDirsFactory.getInstance() - .getUserDataDir(OS.APP_DIR_APP_NAME, "ssl", OS.APP_DIR_APP_AUTHOR)) + .getUserDataDir( + OS.APP_DIR_APP_NAME, + "ssl", + OS.APP_DIR_APP_AUTHOR)) .resolve("truststore.jks").toString()); defaults.put(POS_X, 0); diff --git a/src/main/resources/l10n/JabRef_en.properties b/src/main/resources/l10n/JabRef_en.properties index 0b84e032be6..48fa49e57ad 100644 --- a/src/main/resources/l10n/JabRef_en.properties +++ b/src/main/resources/l10n/JabRef_en.properties @@ -2533,3 +2533,7 @@ Unable\ to\ open\ linked\ eprint.\ Please\ set\ the\ eprinttype\ field=Unable to Unable\ to\ open\ linked\ eprint.\ Please\ verify\ that\ the\ eprint\ field\ has\ a\ valid\ '%0'\ id=Unable to open linked eprint. Please verify that the eprint field has a valid '%0' id Main\ directory\ not\ found=Main directory not found Please\ select\ a\ valid\ main\ directory\ under=Please select a valid main directory under + +Search\ from\ history...=Search from history... +your\ search\ history\ is\ empty=your search history is empty +Clear\ history =Clear history diff --git a/src/main/resources/tinylog.properties b/src/main/resources/tinylog.properties index 26696d06933..b4340fa32e9 100644 --- a/src/main/resources/tinylog.properties +++ b/src/main/resources/tinylog.properties @@ -5,3 +5,5 @@ writerAzure = application insights # More shrunk exception logs. See https://tinylog.org/v2/configuration/#strip-stack-trace-elements for details exception = strip: jdk.internal + +#level@org.jabref.model.entry.BibEntry = debug diff --git a/src/main/resources/xjc/bibtexml/bibtexml.xsd b/src/main/resources/xjc/bibtexml/bibtexml.xsd deleted file mode 100644 index 519ee240dcf..00000000000 --- a/src/main/resources/xjc/bibtexml/bibtexml.xsd +++ /dev/null @@ -1,1001 +0,0 @@ - - - - - - - - - - BibteXML bibliography schema - Author: Z.W. Hendrikse - Version: Adapted from $Revision: 1.1.1.1 $ - Copyright: GPL - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - An abstract of the work. - - - - - - - The authors affiliation. - - - - - - - A Table of Contents. - - - - - - - Copyright information. - - - - - - - Digital Object Identifier number. - - - - - - - - - - - Key words used for searching or possibly for annotation. - - - - - - - The language the document is in. - - - - - - - The Library of Congress Call Number, also seen as lib-congress. - - - - - - - A location associated with the entry, - such as the city in which a conference took place. - - - - - - - The Mathematical Reviews number. - - - - - - - The price of the document. - - - - - - - The physical dimensions of a work. - - - - - - - The WWW Universal Resource Locator that points to the item being - referenced. This often is used for technical reports to point to - the ftp site where the postscript source of the report is located. - - - - - - - Category of this bibitem, added by Zeger W. Hendrikse. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - The bibteXML equivalent of the @article tag. - An article from a journal or magazine. - Required fields: author, title, journal, year. - Optional fields: volume, number, pages, month, note. - - - - - - - - - - - - - - - - - - - - - - The bibteXML equivalent of the @book tag. - A book with an explicit publisher. - Required fields: author or editor, title, publisher, year. - Optional fields: volume or number, series, address, edition, - month, note. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - The bibteXML equivalent of the @booklet tag. - A work that is printed and bound, but without a named - publisher or sponsoring institution. - Required fields: title. - Optional fields: author, howpublished, address, month, year, note. - - - - - - - - - - - - - - - - - - - - The bibteXML equivalent of the @conference tag. - The same as INPROCEEDINGS, included for Scribe compatibility. - - - - - - - - - - - - - The bibteXML equivalent of the @inbook tag. - A part of a book, which may be a chapter (or section or - publisher or sponsoring institution. - Required fields: author or editor, title, chapter and/or pages, - publisher, year. - Optional fields: fields: volume or number, series, type, address, - edition, month, note. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - The bibteXML equivalent of the @incollection tag. - A part of a book having its own title. - Required fields: author, title, booktitle, publisher, year. - Optional fields: editor, volume or number, series, type, - chapter, pages, address, edition, month, note. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - The bibteXML equivalent of the @inproceedings tag. - An article in a conference proceedings. - Required fields: author, title, booktitle, year. - Optional fields: editor, volume or number, series, pages, - address, month, organization, publisher, note. - - - - - - - - - - - - - The bibteXML equivalent of the @manual tag. - Technical documentation. - Required field: title. - Optional fields: author, organization, address, - edition, month, year, note. - - - - - - - - - - - - - - - - - - - - - The bibteXML equivalent of the @mastersthesis tag. - A Master's thesis. - Required fields: author, title, school, year. - Optional fields: type, address, month, note. - - - - - - - - - - - - - The bibteXML equivalent of the @misc tag. - Use this type when nothing else fits. - Required fields: none. - Optional fields: author, title, howpublished, month, year, note. - - - - - - - - - - - - - - - - - - - The bibteXML equivalent of the @phdthesis tag. - A PhD thesis. - Required fields: author, title, school, year. - Optional fields: type, address, month, note. - - - - - - - - - - - - - The bibteXML equivalent of the @proceedings tag. - The proceedings of a conference. - Required fields: title, year. - Optional fields: editor, volume or number, series, - address, month, organization, publisher, note. - - - - - - - - - - - - - - - - - - - - - - - - - - The bibteXML equivalent of the @techreport tag. - A report published by a school or other institution, - usually numbered within a series. - Required fields: author, title, institution, year. - Optional fields: type, number, address, month, note. - - - - - - - - - - - - - - - - - - - - - - The bibteXML equivalent of the @unpublished tag. - A document having an author and title, but not formally published. - Required fields: author, title, note. - Optional fields: month, year. - - - - - - - - - - - - - - - - - - - - - Usually the address of the publisher or other type of - institution. For major publishing houses, van Leunen recommends - omitting the information entirely. For small publishers, on the other - hand, you can help the reader by giving the complete address. - - - - - - - - - An annotation. It is not used by the standard bibliography - styles, but may be used by others that produce an annotated - bibliography. - - - - - - - - - The name(s) of the author(s), in the format described in the - LaTeX book. - - - - - - - - - Title of a book, part of which is being cited. See the - LaTeX book for how to type titles. For book entries, use the title - field instead. - - - - - - - - - A chapter (or section or whatever) number. - - - - - - - - - The database key of the entry being cross referenced. - - - - - - - - - The edition of a book-for example, ``Second''. This - should be an ordinal, and should have the first letter capitalized, as - shown here; the standard styles convert to lower case when necessary. - - - - - - - - - - - Name(s) of editor(s), typed as indicated in the LaTeX book. - If there is also an author field, then the editor field gives the - editor of the book or collection in which the reference appears. - - - - - - - - - How something strange has been published. The first - word should be capitalized. - - - - - - - - - The sponsoring institution of a technical report. - - - - - - - - - A journal name. Abbreviations are provided for many - journals; see the Local Guide. - - - - - - - - - Used for alphabetizing, cross referencing, and creating a label - when the ``author'' information (described in Section [ref: ] is - missing. This field should not be confused with the key that appears - in the \cite command and at the beginning of the database entry. - - - - - - - - - The month in which the work was published or, for an - unpublished work, in which it was written You should use the - standard three-letter abbreviation, as described in Appendix B.1.3 of - the LaTeX book. As XML Schema supports a special month format, it is - decided to allow e.g. 05 (for May) too. - - - - - - - - - Any additional information that can help the reader. The first - word should be capitalized. - - - - - - - - - The number of a journal, magazine, technical report, or of a - work in a series. An issue of a journal or magazine is usually - identified by its volume and number; the organization that issues a - technical report usually gives it a number; and sometimes books are - given numbers in a named series. - - - - - - - - - The organization that sponsors a conference or that publishes a manual. - - - - - - - - - One or more page numbers or range of numbers, such as 42-111 - or 7,41,73-97 or 43+ (the `+" in this last example indicates pages - following that don"t form a simple range). To make it easier to - maintain Scribe-compatible databases, the standard styles convert a - single dash (as in 7-33) to the double dash used in TeX to denote - number ranges (as in 7-33). - - - - - - - - - - - The publisher's name. - - - - - - - - - The name of the school where a thesis was written. - - - - - - - - - The name of a series or set of books. When citing an entire - book, the the title field gives its title and an optional series field - gives the name of a series or multi-volume set in which the book is - published. - - - - - - - - - The work's title, typed as explained in the LaTeX book. - - - - - - - - - The work's title, typed as explained in the LaTeX book. - - - - - - - - - The volume of a journal or multivolume book. - - - - - - - - - The year of publication or, for an unpublished work, the year - it was written. Generally it should consist of four numerals, such as - 1984. Although the standard styles can handle any year whose last four - nonpunctuation characters are numerals, such as `(about 1984)", it - is decided here to keep the year limited to four numerals, as such a - type is pre-defined in XML Schema. - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/src/main/resources/xjc/medline/medline.xsd b/src/main/resources/xjc/medline/medline.xsd deleted file mode 100644 index 9c2f73a5bdd..00000000000 --- a/src/main/resources/xjc/medline/medline.xsd +++ /dev/null @@ -1,314 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/src/test/java/org/jabref/gui/externalfiles/ImportHandlerTest.java b/src/test/java/org/jabref/gui/externalfiles/ImportHandlerTest.java new file mode 100644 index 00000000000..4a076ac80aa --- /dev/null +++ b/src/test/java/org/jabref/gui/externalfiles/ImportHandlerTest.java @@ -0,0 +1,57 @@ +package org.jabref.gui.externalfiles; + +import java.util.List; + +import javax.swing.undo.UndoManager; + +import org.jabref.gui.DialogService; +import org.jabref.gui.StateManager; +import org.jabref.logic.importer.ImportFormatPreferences; +import org.jabref.logic.importer.ImportFormatReader; +import org.jabref.model.database.BibDatabaseContext; +import org.jabref.model.entry.BibEntry; +import org.jabref.model.entry.field.StandardField; +import org.jabref.model.entry.types.StandardEntryType; +import org.jabref.model.util.DummyFileUpdateMonitor; +import org.jabref.preferences.FilePreferences; +import org.jabref.preferences.PreferencesService; + +import org.junit.jupiter.api.Test; +import org.mockito.Answers; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +class ImportHandlerTest { + + @Test + void handleBibTeXData() { + ImportFormatPreferences importFormatPreferences = mock(ImportFormatPreferences.class, Answers.RETURNS_DEEP_STUBS); + + PreferencesService preferencesService = mock(PreferencesService.class); + when(preferencesService.getImportFormatPreferences()).thenReturn(importFormatPreferences); + when(preferencesService.getFilePreferences()).thenReturn(mock(FilePreferences.class)); + + ImportHandler importHandler = new ImportHandler( + mock(BibDatabaseContext.class), + preferencesService, + new DummyFileUpdateMonitor(), + mock(UndoManager.class), + mock(StateManager.class), + mock(DialogService.class), + mock(ImportFormatReader.class)); + + List bibEntries = importHandler.handleBibTeXData(""" + @InProceedings{Wen2013, + library = {Tagungen\\2013\\KWTK45\\}, + } + """); + + BibEntry expected = new BibEntry(StandardEntryType.InProceedings) + .withCitationKey("Wen2013") + .withField(StandardField.LIBRARY, "Tagungen\\2013\\KWTK45\\"); + + assertEquals(List.of(expected), bibEntries.stream().toList()); + } +} diff --git a/src/test/java/org/jabref/gui/search/GetLastSearchHistoryTest.java b/src/test/java/org/jabref/gui/search/GetLastSearchHistoryTest.java new file mode 100644 index 00000000000..390c9c81505 --- /dev/null +++ b/src/test/java/org/jabref/gui/search/GetLastSearchHistoryTest.java @@ -0,0 +1,64 @@ +package org.jabref.gui.search; + +import java.util.List; + +import javafx.stage.Stage; + +import org.jabref.gui.StateManager; +import org.jabref.testutils.category.GUITest; + +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.testfx.framework.junit5.ApplicationExtension; +import org.testfx.framework.junit5.Start; + +@GUITest +@ExtendWith(ApplicationExtension.class) +public class GetLastSearchHistoryTest { + @Start + void onStart(Stage stage) { + // Needed to init JavaFX thread + stage.show(); + } + + @Test + void testGetLastSearchHistory() { + StateManager stateManager = new StateManager(); + stateManager.addSearchHistory("test1"); + stateManager.addSearchHistory("test2"); + stateManager.addSearchHistory("test3"); + List lastSearchHistory = stateManager.getLastSearchHistory(2); + List expected = List.of("test2", "test3"); + + Assertions.assertEquals(expected, lastSearchHistory); + } + + @Test + void testduplicateSearchHistory() { + StateManager stateManager = new StateManager(); + stateManager.addSearchHistory("test1"); + stateManager.addSearchHistory("test2"); + stateManager.addSearchHistory("test3"); + stateManager.addSearchHistory("test1"); + List lastSearchHistory = stateManager.getWholeSearchHistory(); + List expected = List.of("test2", "test3", "test1"); + + Assertions.assertEquals(expected, lastSearchHistory); + } + + @Test + void testclearSearchHistory() { + StateManager stateManager = new StateManager(); + stateManager.addSearchHistory("test1"); + stateManager.addSearchHistory("test2"); + stateManager.addSearchHistory("test3"); + List lastSearchHistory = stateManager.getWholeSearchHistory(); + List expected = List.of("test1", "test2", "test3"); + Assertions.assertEquals(expected, lastSearchHistory); + stateManager.clearSearchHistory(); + lastSearchHistory = stateManager.getWholeSearchHistory(); + expected = List.of(); + Assertions.assertEquals(expected, lastSearchHistory); + } +} diff --git a/src/test/java/org/jabref/logic/bibtex/BibEntryWriterTest.java b/src/test/java/org/jabref/logic/bibtex/BibEntryWriterTest.java index 9c1dc3f3aa9..ab3346c87e4 100644 --- a/src/test/java/org/jabref/logic/bibtex/BibEntryWriterTest.java +++ b/src/test/java/org/jabref/logic/bibtex/BibEntryWriterTest.java @@ -184,7 +184,6 @@ void writeReallyUnknownTypeTest() throws Exception { @Test void roundTripTest() throws IOException { - // @formatter:off String bibtexEntry = """ @Article{test, Author = {Foo Bar}, @@ -193,7 +192,63 @@ void roundTripTest() throws IOException { Number = {1} } """.replaceAll("\n", OS.NEWLINE); - // @formatter:on + + // read in bibtex string + ParserResult result = new BibtexParser(importFormatPreferences, fileMonitor).parse(new StringReader(bibtexEntry)); + Collection entries = result.getDatabase().getEntries(); + BibEntry entry = entries.iterator().next(); + + // write out bibtex string + bibEntryWriter.write(entry, bibWriter, BibDatabaseMode.BIBTEX); + + assertEquals(bibtexEntry, stringWriter.toString()); + } + + @Test + void roundTripKeepsFilePathWithBackslashes() throws IOException { + String bibtexEntry = """ + @Article{, + file = {Tagungen\\2013\\KWTK45}, + } + """.replaceAll("\n", OS.NEWLINE); + + // read in bibtex string + ParserResult result = new BibtexParser(importFormatPreferences, fileMonitor).parse(new StringReader(bibtexEntry)); + Collection entries = result.getDatabase().getEntries(); + BibEntry entry = entries.iterator().next(); + + // write out bibtex string + bibEntryWriter.write(entry, bibWriter, BibDatabaseMode.BIBTEX); + + assertEquals(bibtexEntry, stringWriter.toString()); + } + + @Test + void roundTripKeepsEscapedCharacters() throws IOException { + String bibtexEntry = """ + @Article{, + demofield = {Tagungen\\2013\\KWTK45}, + } + """.replaceAll("\n", OS.NEWLINE); + + // read in bibtex string + ParserResult result = new BibtexParser(importFormatPreferences, fileMonitor).parse(new StringReader(bibtexEntry)); + Collection entries = result.getDatabase().getEntries(); + BibEntry entry = entries.iterator().next(); + + // write out bibtex string + bibEntryWriter.write(entry, bibWriter, BibDatabaseMode.BIBTEX); + + assertEquals(bibtexEntry, stringWriter.toString()); + } + + @Test + void roundTripKeepsFilePathEndingWithBackslash() throws IOException { + String bibtexEntry = """ + @Article{, + file = {dir\\}, + } + """.replaceAll("\n", OS.NEWLINE); // read in bibtex string ParserResult result = new BibtexParser(importFormatPreferences, fileMonitor).parse(new StringReader(bibtexEntry)); diff --git a/src/test/java/org/jabref/logic/importer/fileformat/BibtexParserTest.java b/src/test/java/org/jabref/logic/importer/fileformat/BibtexParserTest.java index 97bc6f3b8a8..4888a9c6780 100644 --- a/src/test/java/org/jabref/logic/importer/fileformat/BibtexParserTest.java +++ b/src/test/java/org/jabref/logic/importer/fileformat/BibtexParserTest.java @@ -607,6 +607,32 @@ void parseRecognizesAbsoluteFile() throws IOException { assertEquals(Optional.of("D:\\Documents\\literature\\Tansel-PRL2006.pdf"), entry.getField(StandardField.FILE)); } + @Test + void parseRecognizesFinalSlashAsSlash() throws Exception { + ParserResult result = parser + .parse(new StringReader(""" + @misc{, + test = {wired\\}, + } + """)); + + assertEquals( + List.of(new BibEntry() + .withField(new UnknownField("test"), "wired\\")), + result.getDatabase().getEntries() + ); + } + + /** + * JabRef's heuristics is not able to parse this special case. + */ + @Test + void parseFailsWithFinalSlashAsSlashWhenSingleLine() throws Exception { + ParserResult parserResult = parser.parse(new StringReader("@misc{, test = {wired\\}}")); + // In case JabRef was more relaxed, `assertFalse` would be provided here. + assertTrue(parserResult.hasWarnings()); + } + @Test void parseRecognizesDateFieldWithConcatenation() throws IOException { ParserResult result = parser diff --git a/src/test/resources/org/jabref/logic/importer/fileformat/MedlineImporterTestArticleItalics.bib b/src/test/resources/org/jabref/logic/importer/fileformat/MedlineImporterTestArticleItalics.bib new file mode 100644 index 00000000000..f64ae9d0921 --- /dev/null +++ b/src/test/resources/org/jabref/logic/importer/fileformat/MedlineImporterTestArticleItalics.bib @@ -0,0 +1,26 @@ +@Article{, + author = {Moreno-Grau, Sonia and Hernández, Isabel and Heilmann-Heimbach, Stefanie and Ruiz, Susana and Rosende-Roca, Maitée and Mauleón, Ana and Vargas, Liliana and Rodríguez-Gómez, Octavio and Alegret, Montserrat and Espinosa, Ana and Ortega, Gemma and Aguilera, Nuria and Abdelnour, Carla and Neuroimaging Initiative, Alzheimer's Disease and Gil, Silvia and Maier, Wolfgang and Sotolongo-Grau, Oscar and Tárraga, Lluís and Ramirez, Alfredo and López-Arrrieta, Jesús and Antúnez, Carmen and Serrano-Ríos, Manuel and Boada, Mercè and Ruiz, Agustín}, + journal = {Oncotarget}, + title = {Genome-wide significant risk factors on chromosome 19 and the APOE locus.}, + year = {2018}, + issn = {1949-2553}, + month = may, + pages = {24590--24600}, + volume = {9}, + abstract = {The apolipoprotein E ( APOE ) gene on chromosome 19q13.32, was the first, and remains the strongest, genetic risk factor for Alzheimer's disease (AD). Additional signals associated with AD have been located in chromosome 19, including ABCA7 (19p13.3) and CD33 ( 19q13.41). The ABCA7 gene has been replicated in most populations. However, the contribution to AD of other signals close to APOE gene remains controversial. Possible explanations for inconsistency between reports include long range linkage disequilibrium (LRLD). We analysed the contribution of ABCA7 and CD33 loci to AD risk and explore LRLD patterns across APOE region. To evaluate AD risk conferred by ABCA7 rs4147929:G>A and CD33 rs3865444:C>A, we used a large Spanish population (1796 AD cases, 2642 controls). The ABCA7 rs4147929:G>A SNP effect was nominally replicated in the Spanish cohort and reached genome-wide significance after meta-analysis (odds ratio (OR)=1.15, 95% confidence interval (95% CI)=1.12-1.19; P = 1.60 x 10 (-19)). CD33 rs3865444:C>A was not associated with AD in the dataset. The meta-analysis was also negative (OR=0.98, 95% CI=0.93-1.04; P =0.48). After exploring LRLD patterns between APOE and CD33 in several datasets, we found significant LD (D' >0.20; P <0.030) between APOE -Ɛ2 and CD33 rs3865444C>A in two of five datasets, suggesting the presence of a non-universal long range interaction between these loci affecting to some populations. In conclusion, we provide here evidence of genetic association of the ABCA7 locus in the Spanish population and also propose a plausible explanation for the controversy on the contribution of CD33 to AD susceptibility.}, + country = {United States}, + doi = {10.18632/oncotarget.25083}, + issn-linking = {1949-2553}, + issue = {37}, + journal-abbreviation = {Oncotarget}, + keywords = {ABCA7; APOE; CD33; Gerotarget; late onset Alzheimer’s disease; linkage disequilibrium}, + nlm-id = {101532965}, + owner = {NLM}, + pii = {25083}, + pmc = {PMC5973862}, + pmid = {29872490}, + pubmodel = {Electronic-eCollection}, + pubstate = {epublish}, + revised = {2019-11-20}, + status = {PubMed-not-MEDLINE}, +} diff --git a/src/test/resources/org/jabref/logic/importer/fileformat/MedlineImporterTestArticleItalics.xml b/src/test/resources/org/jabref/logic/importer/fileformat/MedlineImporterTestArticleItalics.xml new file mode 100644 index 00000000000..fa4c03e3f4d --- /dev/null +++ b/src/test/resources/org/jabref/logic/importer/fileformat/MedlineImporterTestArticleItalics.xml @@ -0,0 +1,652 @@ + + + + 29872490 + + 2019 + 11 + 20 + +
+ + 1949-2553 + + 9 + 37 + + 2018 + May + 15 + + + Oncotarget + Oncotarget + + Genome-wide significant risk factors on chromosome 19 and the + APOE locus. + + + 24590 + 24600 + 24590-24600 + + 10.18632/oncotarget.25083 + + The apolipoprotein E ( + APOE) gene on chromosome 19q13.32, was the first, and remains the strongest, genetic risk factor for Alzheimer's disease (AD). Additional signals associated with AD have been located in chromosome 19, including + ABCA7 (19p13.3) and + CD33 (19q13.41). The + ABCA7 gene has been replicated in most populations. However, the contribution to AD of other signals close to + APOE gene remains controversial. Possible explanations for inconsistency between reports include long range linkage disequilibrium (LRLD). We analysed the contribution of + ABCA7 and + CD33 loci to AD risk and explore LRLD patterns across + APOE region. To evaluate AD risk conferred by + ABCA7 rs4147929:G>A and + CD33 rs3865444:C>A, we used a large Spanish population (1796 AD cases, 2642 controls). The + ABCA7 rs4147929:G>A SNP effect was nominally replicated in the Spanish cohort and reached genome-wide significance after meta-analysis (odds ratio (OR)=1.15, 95% confidence interval (95% CI)=1.12-1.19; + P = 1.60 x 10 + -19). + CD33 rs3865444:C>A was not associated with AD in the dataset. The meta-analysis was also negative (OR=0.98, 95% CI=0.93-1.04; + P=0.48). After exploring LRLD patterns between + APOE and + CD33 in several datasets, we found significant LD (D' >0.20; + P <0.030) between + APOE-Ɛ2 and + CD33 rs3865444C>A in two of five datasets, suggesting the presence of a non-universal long range interaction between these loci affecting to some populations. In conclusion, we provide here evidence of genetic association of the + ABCA7 locus in the Spanish population and also propose a plausible explanation for the controversy on the contribution of + CD33 to AD susceptibility. + + + + + Moreno-Grau + Sonia + S + + Research Center and Memory Clinic of Fundació ACE, Institut Català de Neurociències Aplicades, Univesitat Internacional de Catalunya, Barcelona, Spain. + + + + Hernández + Isabel + I + + Research Center and Memory Clinic of Fundació ACE, Institut Català de Neurociències Aplicades, Univesitat Internacional de Catalunya, Barcelona, Spain. + + + + Heilmann-Heimbach + Stefanie + S + + Institute of Human Genetics, University of Bonn, Bonn, Germany. + + + Department of Genomics, Life & Brain Center, University of Bonn, Bonn, Germany. + + + + Ruiz + Susana + S + + Research Center and Memory Clinic of Fundació ACE, Institut Català de Neurociències Aplicades, Univesitat Internacional de Catalunya, Barcelona, Spain. + + + + Rosende-Roca + Maitée + M + + Research Center and Memory Clinic of Fundació ACE, Institut Català de Neurociències Aplicades, Univesitat Internacional de Catalunya, Barcelona, Spain. + + + + Mauleón + Ana + A + + Research Center and Memory Clinic of Fundació ACE, Institut Català de Neurociències Aplicades, Univesitat Internacional de Catalunya, Barcelona, Spain. + + + + Vargas + Liliana + L + + Research Center and Memory Clinic of Fundació ACE, Institut Català de Neurociències Aplicades, Univesitat Internacional de Catalunya, Barcelona, Spain. + + + + Rodríguez-Gómez + Octavio + O + + Research Center and Memory Clinic of Fundació ACE, Institut Català de Neurociències Aplicades, Univesitat Internacional de Catalunya, Barcelona, Spain. + + + + Alegret + Montserrat + M + + Research Center and Memory Clinic of Fundació ACE, Institut Català de Neurociències Aplicades, Univesitat Internacional de Catalunya, Barcelona, Spain. + + + + Espinosa + Ana + A + + Research Center and Memory Clinic of Fundació ACE, Institut Català de Neurociències Aplicades, Univesitat Internacional de Catalunya, Barcelona, Spain. + + + + Ortega + Gemma + G + + Research Center and Memory Clinic of Fundació ACE, Institut Català de Neurociències Aplicades, Univesitat Internacional de Catalunya, Barcelona, Spain. + + + + Aguilera + Nuria + N + + Research Center and Memory Clinic of Fundació ACE, Institut Català de Neurociències Aplicades, Univesitat Internacional de Catalunya, Barcelona, Spain. + + + + Abdelnour + Carla + C + + Research Center and Memory Clinic of Fundació ACE, Institut Català de Neurociències Aplicades, Univesitat Internacional de Catalunya, Barcelona, Spain. + + + + Neuroimaging Initiative + Alzheimer's Disease + AD + + Research Center and Memory Clinic of Fundació ACE, Institut Català de Neurociències Aplicades, Univesitat Internacional de Catalunya, Barcelona, Spain. + + + Institute of Human Genetics, University of Bonn, Bonn, Germany. + + + Department of Genomics, Life & Brain Center, University of Bonn, Bonn, Germany. + + + Department of Psychiatry and Psychotherapy, University of Bonn, Bonn, Germany. + + + German Center for Neurodegenerative Diseases, DZNE, Bonn, Germany. + + + Department of Psychiatry and Psychotherapy, University of Cologne, Cologne, Germany. + + + Memory Unit, University Hospital La Paz-Cantoblanco, Madrid, Spain. + + + Dementia Unit, University Hospital Virgen de la Arrixaca, Murcia, Spain. + + + Centro de Investigación Biomédica en Red de Diabetes y Enfermedades Metabólicas Asociadas, CIBERDEM, Spain, Hospital Clínico San Carlos, Madrid, Spain. + + + + Gil + Silvia + S + + Research Center and Memory Clinic of Fundació ACE, Institut Català de Neurociències Aplicades, Univesitat Internacional de Catalunya, Barcelona, Spain. + + + + Maier + Wolfgang + W + + Department of Psychiatry and Psychotherapy, University of Bonn, Bonn, Germany. + + + German Center for Neurodegenerative Diseases, DZNE, Bonn, Germany. + + + + Sotolongo-Grau + Oscar + O + + Research Center and Memory Clinic of Fundació ACE, Institut Català de Neurociències Aplicades, Univesitat Internacional de Catalunya, Barcelona, Spain. + + + + Tárraga + Lluís + L + + Research Center and Memory Clinic of Fundació ACE, Institut Català de Neurociències Aplicades, Univesitat Internacional de Catalunya, Barcelona, Spain. + + + + Ramirez + Alfredo + A + + Institute of Human Genetics, University of Bonn, Bonn, Germany. + + + Department of Psychiatry and Psychotherapy, University of Bonn, Bonn, Germany. + + + Department of Psychiatry and Psychotherapy, University of Cologne, Cologne, Germany. + + + + López-Arrrieta + Jesús + J + + Memory Unit, University Hospital La Paz-Cantoblanco, Madrid, Spain. + + + + Antúnez + Carmen + C + + Dementia Unit, University Hospital Virgen de la Arrixaca, Murcia, Spain. + + + + Serrano-Ríos + Manuel + M + + Centro de Investigación Biomédica en Red de Diabetes y Enfermedades Metabólicas Asociadas, CIBERDEM, Spain, Hospital Clínico San Carlos, Madrid, Spain. + + + + Boada + Mercè + M + + Research Center and Memory Clinic of Fundació ACE, Institut Català de Neurociències Aplicades, Univesitat Internacional de Catalunya, Barcelona, Spain. + + + + Ruiz + Agustín + A + + Research Center and Memory Clinic of Fundació ACE, Institut Català de Neurociències Aplicades, Univesitat Internacional de Catalunya, Barcelona, Spain. + + + + eng + + + U01 AG024904 + AG + NIA NIH HHS + United States + + + + Journal Article + + + 2018 + 05 + 15 + +
+ + United States + Oncotarget + 101532965 + 1949-2553 + + + ABCA7 + APOE + CD33 + Gerotarget + late onset Alzheimer’s disease + linkage disequilibrium + + CONFLICTS OF INTEREST None. The authors declare that they have no competing interest. +
+ + + + 2017 + 12 + 13 + + + 2018 + 3 + 22 + + + 2018 + 6 + 7 + 6 + 0 + + + 2018 + 6 + 7 + 6 + 0 + + + 2018 + 6 + 7 + 6 + 1 + + + epublish + + 29872490 + PMC5973862 + 10.18632/oncotarget.25083 + 25083 + + + + Moreno-Grau S, Ruiz A. Genome research in pre-dementia stages of Alzheimer’s disease. Expert Rev Mol Med. 2016;18:e11. + + 27237222 + + + + Corder E, Saunders A. Gene dose of apolipoprotein E type 4 allele and the risk of Alzheimer’s disease in late onset families. Science. 1993;8:41–3. + + 8346443 + + + + Corder EH, Saunders AM, Risch NJ, Strittmatter WJ, Schmechel DE, Gaskell PC, Rimmler JB, Locke PA, Conneally PM, Schmader KE. Protective effect of apolipoprotein E type 2 allele for late onset Alzheimer disease. Nat Genet. 1994;7:180–4. + + 7920638 + + + + Roses AD, Lutz MW, Amrine-Madsen H, Saunders AM, Crenshaw DG, Sundseth SS, Huentelman MJ, Welsh-Bohmer KA, Reiman EM. A TOMM40 variable-length polymorphism predicts the age of late-onset Alzheimer’s disease. Pharmacogenomics J. 2010;10:375–84. + + PMC2946560 + 20029386 + + + + Seshadri S, Fitzpatrick AL, Ikram MA, DeStefano AL, Gudnason V, Boada M, Bis JC, Smith AV, Carassquillo MM, Lambert JC, Harold D, Schrijvers EM, Ramirez-Lorca R, et al. Genome-wide analysis of genetic loci associated with Alzheimer disease. JAMA. 2010;303:1832–40. + + PMC2989531 + 20460622 + + + + Hollingworth P, Harold D, Sims R, Gerrish A, Lambert JC, Carrasquillo MM, Abraham R, Hamshere ML, Pahwa JS, Moskvina V, Dowzell K, Jones N, Stretton A, et al. Common variants at ABCA7, MS4A6A/MS4A4E, EPHA1, CD33 and CD2AP are associated with Alzheimer’s disease. Nat Genet. 2011;43:429–35. + + PMC3084173 + 21460840 + + + + Cruchaga C, Karch CM, Jin SC, Benitez BA, Cai Y, Guerreiro R, Harari O, Norton J, Budde J, Bertelsen S, Jeng AT, Cooper B, Skorupa T, et al. Rare coding variants in the phospholipase D3 gene confer risk for Alzheimer ’ s disease. Nature. 2014;505:550–4. + + PMC4050701 + 24336208 + + + + Naj AC, Jun G, Beecham GW, Wang LS, Vardarajan BN, Buros J, Gallins PJ, Buxbaum JD, Jarvik GP, Crane PK, Larson EB, Bird TD, Boeve BF, et al. Common variants at MS4A4/MS4A6E, CD2AP, CD33 and EPHA1 are associated with late-onset Alzheimer’s disease. Nat Genet. 2011;43:436–41. + + PMC3090745 + 21460841 + + + + Lambert JC, Ibrahim-Verbaas CA, Harold D, Naj AC, Sims R, Bellenguez C, DeStafano AL, Bis JC, Beecham GW, Grenier-Boley B, Russo G, Thorton-Wells TA, Jones N, et al. Meta-analysis of 74,046 individuals identifies 11 new susceptibility loci for Alzheimer’s disease. Nat Genet. 2013;45:1452–8. + + PMC3896259 + 24162737 + + + + Reitz C, Jun G, Naj A, Rajbhandary R, Vardarajan BN, Wang LS, Valladares O, Lin CF, Larson EB, Graff-Radford NR, Evans D, De Jager PL, Crane PK, et al. Variants in the ATP-binding cassette transporter (ABCA7), apolipoprotein E ε4,and the risk of late-onset Alzheimer disease in African Americans. JAMA. 2013;309:1483–92. + + PMC3667653 + 23571587 + + + + Chouraki V, Seshadri S. Genetics of Alzheimer’s disease. Adv Genet. 2014;87:245–94. + + 25311924 + + + + Heilmann S, Drichel D, Clarimon J, Fernández V, Lacour A, Wagner H, Thelen M, Hernández I, Fortea J, Alegret M, Blesa R, Mauleón A, Roca MR, et al. PLD3 in non-familial Alzheimer’s disease. Nature. 2015;520:E3–5. + + 25832411 + + + + Carrasquillo MM, Belbin O, Hunter TA, Ma L, Bisceglio GD, Zou F, Crook JE, Pankratz VS, Sando SB, Aasly JO, Barcikowska M, Wszolek ZK, Dickson DW, et al. Replication of EPHA1 and CD33 associations with late-onset Alzheimer’s disease: a multi-centre case-control study. Mol Neurodegener. 2011;6:54. + + PMC3157442 + 21798052 + + + + Sakae N, Liu CC, Shinohara M, Frisch-Daiello J, Ma L, Yamazaki Y, Tachibana M, Younkin L, Kurti A, Carrasquillo MM, Zou F, Sevlever D, Bisceglio G, et al. ABCA7 Deficiency Accelerates Amyloid-β Generation and Alzheimer’s Neuronal Pathology. J Neurosci. 2016;36:3848–59. + + PMC4812140 + 27030769 + + + + Jehle AW, Gardai SJ, Li S, Linsel-Nitschke P, Morimoto K, Janssen WJ, Vandivier RW, Wang N, Greenberg S, Dale BM, Qin C, Henson PM, Tall AR. ATP-binding cassette transporter A7 enhances phagocytosis of apoptotic cells and associated ERK signaling in macrophages. J Cell Biol. 2006;174:547–56. + + PMC2064260 + 16908670 + + + + Kim WS, Li H, Ruberu K, Chan S, Elliott DA, Low JK, Cheng D, Karl T, Garner B. Deletion of Abca7 increases cerebral amyloid-β accumulation in the J20 mouse model of Alzheimer’s disease. J Neurosci. 2013;33:4387–94. + + PMC6704948 + 23467355 + + + + Bradshaw EM, Chibnik LB, Keenan BT, Ottoboni L, Raj T, Tang A, Rosenkrantz LL, Imboywa S, Lee M, Von Korff A, Morris MC, Evans DA, Johnson K, et al. CD33 Alzheimer’s disease locus: altered monocyte function and amyloid biology. Nat Neurosci. 2013;16:848–50. + + PMC3703870 + 23708142 + + + + Cruchaga C, Nowotny P, Kauwe JSK, Ridge PG, Mayo K, Bertelsen S, Hinrichs A, Fagan AM, Holtzman DM, Morris JC, Goate AM. Association and expression analyses with single-nucleotide polymorphisms in TOMM40 in Alzheimer disease. Arch Neurol. 2011;68:1013–9. + + PMC3204798 + 21825236 + + + + Mueller JC. Linkage disequilibrium for different scales and applications. Brief Bioinform. 2004;5:355–64. + + 15606972 + + + + Ardlie KG, Kruglyak L, Seielstad M. Patterns of Linkage Disequilibrium in the Human Genome. Nat Rev Genet. 2002;3:299–309. + + 11967554 + + + + Campbell CD, Ogburn EL, Lunetta KL, Lyon HN, Freedman ML, Groop LC, Altshuler D, Ardlie KG, Hirschhorn JN. Demonstrating stratification in a European American population. Nat Genet. 2005;37:868–72. + + 16041375 + + + + Dawson E, Abecasis GR, Bumpstead S, Chen Y, Hunt S, Beare DM, Pabial J, Dibling T, Tinsley E, Kirby S, Carter D, Papaspyridonos M, Livingstone S, et al. A first-generation linkage disequilibrium map of human chromosome 22. Nature. 2002;418:544–8. + + 12110843 + + + + Beckmann JS, Estivill X, Antonarakis SE. Copy number variants and genetic traits: closer to the resolution of phenotypic to genotypic variability. Nat Rev Genet. 2007;8:639–46. + + 17637735 + + + + Antúnez C, Boada M, González-Pérez A, Gayán J, Ramírez-Lorca R, Marín J, Hernández I, Moreno-Rey C, Morón FJ, López-Arrieta J, Mauleón A, Rosende-Roca M, Noguera-Perea F, et al. The membrane-spanning 4-domains, subfamily A (MS4A) gene cluster contains a common variant associated with Alzheimer’s disease. Genome Med. 2011;3:33. + + PMC3219074 + 21627779 + + + + Ruiz A, Heilmann S, Becker T, Hernández I, Wagner H, Thelen M, Mauleón A, Rosende-Roca M, Bellenguez C, Bis JC, Harold D, Gerrish A, Sims R, et al. Follow-up of loci from the International Genomics of Alzheimer’s Disease Project identifies TRIP4 as a novel susceptibility gene. Transl Psychiatry. 2014;4:e358. + + PMC3944635 + 24495969 + + + + Calero O, Hortigüela R, Bullido M, Calero M. Apolipoprotein E genotyping method by Real Time PCR, a fast and cost-effective alternative to the TaqMan® and FRET assays. J Neurosci Methods. 2009;183:238–40. + + 19583979 + + + + Purcell S, Neale B, Todd-Brown K, Thomas L, Ferreira MA, Bender D, Maller J, Sklar P, de Bakker PI, Daly MJ, Sham PC. PLINK: a tool set for whole-genome association and population-based linkage analyses. Am J Hum Genet. 2007;81:559–75. + + PMC1950838 + 17701901 + + + + Kjeldsen EW, Tybjærg-Hansen A, Nordestgaard BG, Frikke-Schmidt R. ABCA7and risk of dementia and vascular disease in the Danish population. Ann Clin Transl Neurol. 2018;5:41–51. + + PMC5771325 + 29376091 + + + + Moreno DJ, Ruiz S, Ríos Á, Lopera F, Ostos H, Via M, Bedoya G. Association of GWAS Top Genes With Late-Onset Alzheimer’s Disease in Colombian Population. Am J Alzheimers Dis Other Demen. 2017;32:27–35. + + 28084078 + + + + Zhang DF, Li J, Wu H, Cui Y, Bi R, Zhou HJ, Wang HZ, Zhang C, Wang D, Kong QP, Li T, Fang Y, Jiang T, et al. CFH Variants Affect Structural and Functional Brain Changes and Genetic Risk of Alzheimer’s Disease. Neuropsychopharmacology. 2015;41:1034–1035. + + PMC4748428 + 26243271 + + + + Jiao B, Liu X, Zhou L, Wang MH, Zhou Y, Xiao T, Zhang W, Sun R, Waye MM, Tang B, Shen L. Polygenic Analysis of Late-Onset Alzheimer’s Disease from Mainland China. PLoS One. 2015;10:e0144898. + + PMC4683047 + 26680604 + + + + Mao YF, Guo ZY, Pu JL, Chen YX, Zhang BR. Association of CD33 and MS4A cluster variants with Alzheimer’s disease in East Asian Populations. Neurosci Lett. 2015;609:235–239. + + 26455864 + + + + Ebbert MT, Ridge PG, Wilson AR, Sharp AR, Bailey M, Norton MC, Tschanz JT, Munger RG, Corcoran CD, Kauwe JSK. Population-based Analysis of Alzheimer’s Disease Risk Alleles Implicates Genetic Interactions. Biol Psychiatry. 2014;75:732–7. + + PMC3867586 + 23954108 + + + + Omoumi A, Fok A, Greenwood T, Sadovnick AD, Feldman HH, Hsiung GY. Evaluation of late-onset Alzheimer disease genetic susceptibility risks in a Canadian population. Neurobiol Aging. 2014;35:936.e5-12. + + 24176626 + + + + Tan L, Yu JT, Zhang W, Wu ZC, Zhang Q, Liu QY, Wang W, Wang HF, Ma XY, Cui WZ. Association of GWAS-linked loci with late-onset Alzheimer’s disease in a northern Han Chinese population. Alzheimers Dement. 2013;9:546–53. + + 23232270 + + + + Chung SJ, Lee JH, Kim SY, You S, Kim MJ, Lee JY, Koh J. Association of GWAS top hits with late-onset Alzheimer disease in Korean population. Alzheimer Dis Assoc Disord. 2013;27:250–7. + + 22975751 + + + + Deng YL, Liu LH, Wang Y, Tang HD, Ren RJ, Xu W, Ma JF, Wang LL, Zhuang JP, Wang G, Chen SD. The prevalence of CD33 and MS4A6A variant in Chinese Han population with Alzheimer’s disease. Hum Genet. 2012;131:1245–9. + + 22382309 + + + + Logue MW. A Comprehensive Genetic Association Study of Alzheimer Disease in African Americans. Arch Neurol. 2011;68:1569. + + PMC3356921 + 22159054 + + + + Miyashita A, Koike A, Jun G, Wang LS, Takahashi S, Matsubara E, Kawarabayashi T, Shoji M, Tomita N, Arai H, Asada T, Harigaya Y, Ikeda M, et al. SORL1 is genetically associated with late-onset Alzheimer’s disease in Japanese, Koreans and Caucasians. PLoS One. 2013;8:e58618. + + PMC3614978 + 23565137 + + + + Weiner M, Aisen P, Jack C, Jr, Jaugust W, Trojanowski J, Shaw L, Saykin AJ, Morris JC, Cairns N, Laurel A, Toga A, Green R, Walter S, et al. The Alzheimer’s disease neuroinmaging iniciative: Progress report and future plans. Alzheimers Dement. 2010;6:202–11. + + PMC2927112 + 20451868 + + + + Li H, Wetten S, Li L, St Jean PL, Upmanyu R, Surh L, Hosford D, Barnes MR, Briley JD, Borrie M, Coletta N, Delisle R, Dhalla D, et al. Candidate single-nucleotide polymorphisms from a genomewide association study of Alzheimer disease. Arch Neurol. 2008;65:45–53. + + 17998437 + + + + Wijsman EM, Pankratz ND, Choi Y, Rothstein JH, Faber KM, Cheng R, Lee JH, Bird TD, Bennett DA, Diaz-Arrastia R, Goate AM, Farlow M, Ghetti B, et al. Genome-wide association of familial late-onset Alzheimer’s disease replicates BIN1 and CLU and nominates CUGBP2 in interaction with APOE. PLoS Genet. 2011;7:e1001308. + + PMC3040659 + 21379329 + + + + Zhang Q, Calus MP, Guldbrandtsen B, Lund MS, Sahana G. Estimation of inbreeding using pedigree, 50k SNP chip genotypes and full sequence data in three cattle breeds. BMC Genet. 2015;16:88. + + PMC4509611 + 26195126 + + + + +
+
diff --git a/src/test/resources/org/jabref/logic/importer/fileformat/MedlineImporterTestNbib.bib b/src/test/resources/org/jabref/logic/importer/fileformat/MedlineImporterTestNbib.bib index 6d897517d74..29dddebede9 100644 --- a/src/test/resources/org/jabref/logic/importer/fileformat/MedlineImporterTestNbib.bib +++ b/src/test/resources/org/jabref/logic/importer/fileformat/MedlineImporterTestNbib.bib @@ -27,7 +27,7 @@ @article{ pubmodel = {Print-Electronic}, pubstate = {ppublish}, references = {23}, - revised = {2015-9-15}, + revised = {2015-09-15}, season = {Spring}, space-flight-mission = {fly}, status = {MEDLINE},