From c1f16de433c76be6ffb3c52e90198e77959e250f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Martynas=20Jusevi=C4=8Dius?= Date: Tue, 20 Jan 2026 10:01:13 +0100 Subject: [PATCH 01/13] [maven-release-plugin] prepare for next development iteration --- pom.xml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pom.xml b/pom.xml index 32e9246e3..6f440bfff 100644 --- a/pom.xml +++ b/pom.xml @@ -3,7 +3,7 @@ com.atomgraph linkeddatahub - 5.2.1 + 5.2.2-SNAPSHOT ${packaging.type} AtomGraph LinkedDataHub @@ -46,7 +46,7 @@ https://github.com/AtomGraph/LinkedDataHub scm:git:git://github.com/AtomGraph/LinkedDataHub.git scm:git:git@github.com:AtomGraph/LinkedDataHub.git - linkeddatahub-5.2.1 + linkeddatahub-2.1.1 From b2d3bce13403b345091753b6e4d3418efd85eee5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Martynas=20Jusevi=C4=8Dius?= Date: Thu, 22 Jan 2026 12:55:59 +0100 Subject: [PATCH 02/13] Additional HTTP tests for file uploads --- http-tests/imports/PUT-file-format-browser.sh | 107 +++++++++++++++++ .../imports/PUT-file-format-explicit.sh | 113 ++++++++++++++++++ 2 files changed, 220 insertions(+) create mode 100755 http-tests/imports/PUT-file-format-browser.sh create mode 100755 http-tests/imports/PUT-file-format-explicit.sh diff --git a/http-tests/imports/PUT-file-format-browser.sh b/http-tests/imports/PUT-file-format-browser.sh new file mode 100755 index 000000000..cf5a20616 --- /dev/null +++ b/http-tests/imports/PUT-file-format-browser.sh @@ -0,0 +1,107 @@ +#!/usr/bin/env bash +set -euo pipefail + +initialize_dataset "$END_USER_BASE_URL" "$TMP_END_USER_DATASET" "$END_USER_ENDPOINT_URL" +initialize_dataset "$ADMIN_BASE_URL" "$TMP_ADMIN_DATASET" "$ADMIN_ENDPOINT_URL" +purge_cache "$END_USER_VARNISH_SERVICE" +purge_cache "$ADMIN_VARNISH_SERVICE" +purge_cache "$FRONTEND_VARNISH_SERVICE" + +pwd=$(realpath "$PWD") + +# add agent to the writers group + +add-agent-to-group.sh \ + -f "$OWNER_CERT_FILE" \ + -p "$OWNER_CERT_PWD" \ + --agent "$AGENT_URI" \ + "${ADMIN_BASE_URL}acl/groups/writers/" + +# create test file with sample content + +test_file=$(mktemp) +echo "test,data,sample" > "$test_file" +echo "1,2,3" >> "$test_file" +echo "4,5,6" >> "$test_file" + +# upload file WITHOUT explicit media type (rely on browser detection via `file -b --mime-type`) + +file_doc=$(create-file.sh \ +-f "$AGENT_CERT_FILE" \ +-p "$AGENT_CERT_PWD" \ +-b "$END_USER_BASE_URL" \ +--title "Test File for Browser Media Type" \ +--file "$test_file") + +# get the file resource URI and initial dct:format + +file_doc_ntriples=$(get.sh \ + -f "$AGENT_CERT_FILE" \ + -p "$AGENT_CERT_PWD" \ + --accept 'application/n-triples' \ + "$file_doc") + +file_uri=$(echo "$file_doc_ntriples" | sed -rn "s/<${file_doc//\//\\/}> <(.*)> \./\1/p") + +# get initial SHA1 hash +initial_sha1=$(echo "$file_doc_ntriples" | sed -rn "s/<${file_uri//\//\\/}> \"(.*)\" \./\1/p") + +# get initial dct:format (should be browser-detected) +initial_format=$(echo "$file_doc_ntriples" | sed -rn "s/<${file_uri//\//\\/}> <(.*)> \./\1/p") + +# re-upload the same file to the same document with explicit media type: text/csv +# using PUT with RDF/POST multipart format +# IMPORTANT: Include explicit dct:format in RDF to simulate user editing the format field in the form + +rdf_post="" +rdf_post+="-F \"rdf=\"\n" +rdf_post+="-F \"sb=file\"\n" +rdf_post+="-F \"pu=http://www.semanticdesktop.org/ontologies/2007/03/22/nfo#fileName\"\n" +rdf_post+="-F \"ol=@${test_file};type=text/csv\"\n" +rdf_post+="-F \"pu=http://purl.org/dc/terms/title\"\n" +rdf_post+="-F \"ol=Test File for Browser Media Type\"\n" +rdf_post+="-F \"pu=http://www.w3.org/1999/02/22-rdf-syntax-ns#type\"\n" +rdf_post+="-F \"ou=http://www.semanticdesktop.org/ontologies/2007/03/22/nfo#FileDataObject\"\n" +rdf_post+="-F \"pu=http://purl.org/dc/terms/format\"\n" +rdf_post+="-F \"ou=http://www.sparontologies.net/mediatype/text/csv\"\n" +rdf_post+="-F \"su=${file_doc}\"\n" +rdf_post+="-F \"pu=http://purl.org/dc/terms/title\"\n" +rdf_post+="-F \"ol=Test File for Browser Media Type\"\n" +rdf_post+="-F \"pu=http://www.w3.org/1999/02/22-rdf-syntax-ns#type\"\n" +rdf_post+="-F \"ou=https://www.w3.org/ns/ldt/document-hierarchy#Item\"\n" +rdf_post+="-F \"pu=http://xmlns.com/foaf/0.1/primaryTopic\"\n" +rdf_post+="-F \"ob=file\"\n" +rdf_post+="-F \"pu=http://rdfs.org/sioc/ns#has_container\"\n" +rdf_post+="-F \"ou=${END_USER_BASE_URL}files/\"\n" + +echo -e "$rdf_post" | curl -f -v -s -k -X PUT -H "Accept: text/turtle" -E "$AGENT_CERT_FILE":"$AGENT_CERT_PWD" -o /dev/null --config - "$file_doc" + +# get updated document + +updated_ntriples=$(get.sh \ + -f "$AGENT_CERT_FILE" \ + -p "$AGENT_CERT_PWD" \ + --accept 'application/n-triples' \ + "$file_doc") + +# get updated SHA1 hash (should be same as initial) +updated_sha1=$(echo "$updated_ntriples" | sed -rn "s/<${file_uri//\//\\/}> \"(.*)\" \./\1/p") + +# get updated dct:format (should be text/csv) +updated_format=$(echo "$updated_ntriples" | sed -rn "s/<${file_uri//\//\\/}> <(.*)> \./\1/p") + +# verify SHA1 is unchanged (same file content) +if [ "$initial_sha1" != "$updated_sha1" ]; then + echo "ERROR: SHA1 hash changed! Initial: $initial_sha1, Updated: $updated_sha1" + exit 1 +fi + +# verify dct:format was updated to text/csv +if [[ ! "$updated_format" =~ text/csv ]]; then + echo "ERROR: Format should have been updated to text/csv but got: $updated_format" + echo "Initial format was: $initial_format" + exit 1 +fi + +# cleanup +rm -f "$test_file" diff --git a/http-tests/imports/PUT-file-format-explicit.sh b/http-tests/imports/PUT-file-format-explicit.sh new file mode 100755 index 000000000..eaa10401e --- /dev/null +++ b/http-tests/imports/PUT-file-format-explicit.sh @@ -0,0 +1,113 @@ +#!/usr/bin/env bash +set -euo pipefail + +initialize_dataset "$END_USER_BASE_URL" "$TMP_END_USER_DATASET" "$END_USER_ENDPOINT_URL" +initialize_dataset "$ADMIN_BASE_URL" "$TMP_ADMIN_DATASET" "$ADMIN_ENDPOINT_URL" +purge_cache "$END_USER_VARNISH_SERVICE" +purge_cache "$ADMIN_VARNISH_SERVICE" +purge_cache "$FRONTEND_VARNISH_SERVICE" + +pwd=$(realpath "$PWD") + +# add agent to the writers group + +add-agent-to-group.sh \ + -f "$OWNER_CERT_FILE" \ + -p "$OWNER_CERT_PWD" \ + --agent "$AGENT_URI" \ + "${ADMIN_BASE_URL}acl/groups/writers/" + +# create test file with sample content + +test_file=$(mktemp) +echo "test,data,sample" > "$test_file" +echo "1,2,3" >> "$test_file" +echo "4,5,6" >> "$test_file" + +# upload file with explicit media type: text/plain + +file_doc=$(create-file.sh \ +-f "$AGENT_CERT_FILE" \ +-p "$AGENT_CERT_PWD" \ +-b "$END_USER_BASE_URL" \ +--title "Test File for Media Type Update" \ +--file "$test_file" \ +--file-content-type "text/plain") + +# get the file resource URI and initial dct:format + +file_doc_ntriples=$(get.sh \ + -f "$AGENT_CERT_FILE" \ + -p "$AGENT_CERT_PWD" \ + --accept 'application/n-triples' \ + "$file_doc") + +file_uri=$(echo "$file_doc_ntriples" | sed -rn "s/<${file_doc//\//\\/}> <(.*)> \./\1/p") + +# get initial SHA1 hash +initial_sha1=$(echo "$file_doc_ntriples" | sed -rn "s/<${file_uri//\//\\/}> \"(.*)\" \./\1/p") + +# get initial dct:format +initial_format=$(echo "$file_doc_ntriples" | sed -rn "s/<${file_uri//\//\\/}> <(.*)> \./\1/p") + +# verify initial format is text/plain +if [[ ! "$initial_format" =~ text/plain ]]; then + echo "ERROR: Initial format should contain text/plain but got: $initial_format" + exit 1 +fi + +# re-upload the same file to the same document with different explicit media type: text/csv +# using PUT with RDF/POST multipart format +# IMPORTANT: Include explicit dct:format in RDF to simulate user editing the format field in the form + +rdf_post="" +rdf_post+="-F \"rdf=\"\n" +rdf_post+="-F \"sb=file\"\n" +rdf_post+="-F \"pu=http://www.semanticdesktop.org/ontologies/2007/03/22/nfo#fileName\"\n" +rdf_post+="-F \"ol=@${test_file};type=text/csv\"\n" +rdf_post+="-F \"pu=http://purl.org/dc/terms/title\"\n" +rdf_post+="-F \"ol=Test File for Media Type Update\"\n" +rdf_post+="-F \"pu=http://www.w3.org/1999/02/22-rdf-syntax-ns#type\"\n" +rdf_post+="-F \"ou=http://www.semanticdesktop.org/ontologies/2007/03/22/nfo#FileDataObject\"\n" +rdf_post+="-F \"pu=http://purl.org/dc/terms/format\"\n" +rdf_post+="-F \"ou=http://www.sparontologies.net/mediatype/text/csv\"\n" +rdf_post+="-F \"su=${file_doc}\"\n" +rdf_post+="-F \"pu=http://purl.org/dc/terms/title\"\n" +rdf_post+="-F \"ol=Test File for Media Type Update\"\n" +rdf_post+="-F \"pu=http://www.w3.org/1999/02/22-rdf-syntax-ns#type\"\n" +rdf_post+="-F \"ou=https://www.w3.org/ns/ldt/document-hierarchy#Item\"\n" +rdf_post+="-F \"pu=http://xmlns.com/foaf/0.1/primaryTopic\"\n" +rdf_post+="-F \"ob=file\"\n" +rdf_post+="-F \"pu=http://rdfs.org/sioc/ns#has_container\"\n" +rdf_post+="-F \"ou=${END_USER_BASE_URL}files/\"\n" + +echo -e "$rdf_post" | curl -f -v -s -k -X PUT -H "Accept: text/turtle" -E "$AGENT_CERT_FILE":"$AGENT_CERT_PWD" -o /dev/null --config - "$file_doc" + +# get updated document + +updated_ntriples=$(get.sh \ + -f "$AGENT_CERT_FILE" \ + -p "$AGENT_CERT_PWD" \ + --accept 'application/n-triples' \ + "$file_doc") + +# get updated SHA1 hash (should be same as initial) +updated_sha1=$(echo "$updated_ntriples" | sed -rn "s/<${file_uri//\//\\/}> \"(.*)\" \./\1/p") + +# get updated dct:format (should be text/csv) +updated_format=$(echo "$updated_ntriples" | sed -rn "s/<${file_uri//\//\\/}> <(.*)> \./\1/p") + +# verify SHA1 is unchanged (same file content) +if [ "$initial_sha1" != "$updated_sha1" ]; then + echo "ERROR: SHA1 hash changed! Initial: $initial_sha1, Updated: $updated_sha1" + exit 1 +fi + +# verify dct:format was updated to text/csv +if [[ ! "$updated_format" =~ text/csv ]]; then + echo "ERROR: Format should have been updated to text/csv but got: $updated_format" + exit 1 +fi + +# cleanup +rm -f "$test_file" From 85262f0d3305a2ff8fd8ede2bbaf95c7cec97733 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Martynas=20Jusevi=C4=8Dius?= Date: Thu, 22 Jan 2026 14:29:06 +0100 Subject: [PATCH 03/13] Test fix --- http-tests/imports/PUT-file-format-browser.sh | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/http-tests/imports/PUT-file-format-browser.sh b/http-tests/imports/PUT-file-format-browser.sh index cf5a20616..dbc20e42c 100755 --- a/http-tests/imports/PUT-file-format-browser.sh +++ b/http-tests/imports/PUT-file-format-browser.sh @@ -51,7 +51,7 @@ initial_format=$(echo "$file_doc_ntriples" | sed -rn "s/<${file_uri//\//\\/}> Date: Thu, 22 Jan 2026 15:23:08 +0100 Subject: [PATCH 04/13] HTTP test updates --- .../imports/PUT-file-format-explicit.sh | 55 ++++++++----------- ...e-format-browser.sh => PUT-file-format.sh} | 51 ++++++++--------- 2 files changed, 45 insertions(+), 61 deletions(-) rename http-tests/imports/{PUT-file-format-browser.sh => PUT-file-format.sh} (62%) diff --git a/http-tests/imports/PUT-file-format-explicit.sh b/http-tests/imports/PUT-file-format-explicit.sh index eaa10401e..3c9dffd8b 100755 --- a/http-tests/imports/PUT-file-format-explicit.sh +++ b/http-tests/imports/PUT-file-format-explicit.sh @@ -24,15 +24,20 @@ echo "test,data,sample" > "$test_file" echo "1,2,3" >> "$test_file" echo "4,5,6" >> "$test_file" +# generate slug for the file document + +slug=$(uuidgen | tr '[:upper:]' '[:lower:]') + # upload file with explicit media type: text/plain file_doc=$(create-file.sh \ --f "$AGENT_CERT_FILE" \ --p "$AGENT_CERT_PWD" \ --b "$END_USER_BASE_URL" \ ---title "Test File for Media Type Update" \ ---file "$test_file" \ ---file-content-type "text/plain") + -f "$AGENT_CERT_FILE" \ + -p "$AGENT_CERT_PWD" \ + -b "$END_USER_BASE_URL" \ + --title "Test File for Media Type Update" \ + --slug "$slug" \ + --file "$test_file" \ + --file-content-type "text/plain") # get the file resource URI and initial dct:format @@ -56,32 +61,18 @@ if [[ ! "$initial_format" =~ text/plain ]]; then exit 1 fi -# re-upload the same file to the same document with different explicit media type: text/csv -# using PUT with RDF/POST multipart format -# IMPORTANT: Include explicit dct:format in RDF to simulate user editing the format field in the form - -rdf_post="" -rdf_post+="-F \"rdf=\"\n" -rdf_post+="-F \"sb=file\"\n" -rdf_post+="-F \"pu=http://www.semanticdesktop.org/ontologies/2007/03/22/nfo#fileName\"\n" -rdf_post+="-F \"ol=@${test_file};type=text/csv\"\n" -rdf_post+="-F \"pu=http://purl.org/dc/terms/title\"\n" -rdf_post+="-F \"ol=Test File for Media Type Update\"\n" -rdf_post+="-F \"pu=http://www.w3.org/1999/02/22-rdf-syntax-ns#type\"\n" -rdf_post+="-F \"ou=http://www.semanticdesktop.org/ontologies/2007/03/22/nfo#FileDataObject\"\n" -rdf_post+="-F \"pu=http://purl.org/dc/terms/format\"\n" -rdf_post+="-F \"ou=http://www.sparontologies.net/mediatype/text/csv\"\n" -rdf_post+="-F \"su=${file_doc}\"\n" -rdf_post+="-F \"pu=http://purl.org/dc/terms/title\"\n" -rdf_post+="-F \"ol=Test File for Media Type Update\"\n" -rdf_post+="-F \"pu=http://www.w3.org/1999/02/22-rdf-syntax-ns#type\"\n" -rdf_post+="-F \"ou=https://www.w3.org/ns/ldt/document-hierarchy#Item\"\n" -rdf_post+="-F \"pu=http://xmlns.com/foaf/0.1/primaryTopic\"\n" -rdf_post+="-F \"ob=file\"\n" -rdf_post+="-F \"pu=http://rdfs.org/sioc/ns#has_container\"\n" -rdf_post+="-F \"ou=${END_USER_BASE_URL}files/\"\n" - -echo -e "$rdf_post" | curl -f -v -s -k -X PUT -H "Accept: text/turtle" -E "$AGENT_CERT_FILE":"$AGENT_CERT_PWD" -o /dev/null --config - "$file_doc" +# re-upload the same file with same slug but different explicit media type: text/csv +# this simulates editing the file document through the UI and uploading a new file + +create-file.sh \ + -f "$AGENT_CERT_FILE" \ + -p "$AGENT_CERT_PWD" \ + -b "$END_USER_BASE_URL" \ + --title "Test File for Media Type Update" \ + --slug "$slug" \ + --file "$test_file" \ + --file-content-type "text/csv" \ + > /dev/null # get updated document diff --git a/http-tests/imports/PUT-file-format-browser.sh b/http-tests/imports/PUT-file-format.sh similarity index 62% rename from http-tests/imports/PUT-file-format-browser.sh rename to http-tests/imports/PUT-file-format.sh index dbc20e42c..4a30ad9d6 100755 --- a/http-tests/imports/PUT-file-format-browser.sh +++ b/http-tests/imports/PUT-file-format.sh @@ -24,14 +24,19 @@ echo "test,data,sample" > "$test_file" echo "1,2,3" >> "$test_file" echo "4,5,6" >> "$test_file" +# generate slug for the file document + +slug=$(uuidgen | tr '[:upper:]' '[:lower:]') + # upload file WITHOUT explicit media type (rely on browser detection via `file -b --mime-type`) file_doc=$(create-file.sh \ --f "$AGENT_CERT_FILE" \ --p "$AGENT_CERT_PWD" \ --b "$END_USER_BASE_URL" \ ---title "Test File for Browser Media Type" \ ---file "$test_file") + -f "$AGENT_CERT_FILE" \ + -p "$AGENT_CERT_PWD" \ + -b "$END_USER_BASE_URL" \ + --title "Test File for Browser Media Type" \ + --slug "$slug" \ + --file "$test_file") # get the file resource URI and initial dct:format @@ -49,30 +54,18 @@ initial_sha1=$(echo "$file_doc_ntriples" | sed -rn "s/<${file_uri//\//\\/}> <(.*)> \./\1/p") -# re-upload the same file to the same document with explicit media type: text/csv -# using PUT with RDF/POST multipart format -# IMPORTANT: Do NOT include explicit dct:format in RDF - test fallback to bodyPart.getMediaType() - -rdf_post="" -rdf_post+="-F \"rdf=\"\n" -rdf_post+="-F \"sb=file\"\n" -rdf_post+="-F \"pu=http://www.semanticdesktop.org/ontologies/2007/03/22/nfo#fileName\"\n" -rdf_post+="-F \"ol=@${test_file};type=text/csv\"\n" -rdf_post+="-F \"pu=http://purl.org/dc/terms/title\"\n" -rdf_post+="-F \"ol=Test File for Browser Media Type\"\n" -rdf_post+="-F \"pu=http://www.w3.org/1999/02/22-rdf-syntax-ns#type\"\n" -rdf_post+="-F \"ou=http://www.semanticdesktop.org/ontologies/2007/03/22/nfo#FileDataObject\"\n" -rdf_post+="-F \"su=${file_doc}\"\n" -rdf_post+="-F \"pu=http://purl.org/dc/terms/title\"\n" -rdf_post+="-F \"ol=Test File for Browser Media Type\"\n" -rdf_post+="-F \"pu=http://www.w3.org/1999/02/22-rdf-syntax-ns#type\"\n" -rdf_post+="-F \"ou=https://www.w3.org/ns/ldt/document-hierarchy#Item\"\n" -rdf_post+="-F \"pu=http://xmlns.com/foaf/0.1/primaryTopic\"\n" -rdf_post+="-F \"ob=file\"\n" -rdf_post+="-F \"pu=http://rdfs.org/sioc/ns#has_container\"\n" -rdf_post+="-F \"ou=${END_USER_BASE_URL}files/\"\n" - -echo -e "$rdf_post" | curl -f -v -s -k -X PUT -H "Accept: text/turtle" -E "$AGENT_CERT_FILE":"$AGENT_CERT_PWD" -o /dev/null --config - "$file_doc" +# re-upload the same file with same slug but WITH explicit media type: text/csv +# this simulates editing and uploading with a corrected format after browser auto-detection was wrong + +create-file.sh \ + -f "$AGENT_CERT_FILE" \ + -p "$AGENT_CERT_PWD" \ + -b "$END_USER_BASE_URL" \ + --title "Test File for Browser Media Type" \ + --slug "$slug" \ + --file "$test_file" \ + --file-content-type "text/csv" \ + > /dev/null # get updated document From 31d433a2d87a5d4421f20c71786f87a49b89d53d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Martynas=20Jusevi=C4=8Dius?= Date: Fri, 23 Jan 2026 11:16:58 +0100 Subject: [PATCH 05/13] Added missing class constant --- src/main/java/com/atomgraph/linkeddatahub/vocabulary/ACL.java | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/main/java/com/atomgraph/linkeddatahub/vocabulary/ACL.java b/src/main/java/com/atomgraph/linkeddatahub/vocabulary/ACL.java index 1a528e187..259d34a0e 100644 --- a/src/main/java/com/atomgraph/linkeddatahub/vocabulary/ACL.java +++ b/src/main/java/com/atomgraph/linkeddatahub/vocabulary/ACL.java @@ -60,6 +60,9 @@ public static String getURI() /** acl:Append access mode */ public static final OntClass Append = m_model.createClass( NS + "Append" ); + /** acl:Control access mode */ + public static final OntClass Control = m_model.createClass( NS + "Control" ); + /** acl:AuthenticatedAgent class */ public static final OntClass AuthenticatedAgent = m_model.createClass( NS + "AuthenticatedAgent" ); From 20fe469e307d27e3baf837e3f0858b5502b725c8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Martynas=20Jusevi=C4=8Dius?= Date: Sat, 24 Jan 2026 11:46:39 +0100 Subject: [PATCH 06/13] Ontology from upload deadlock test --- .../ontology-import-upload-no-deadlock.sh | 154 ++++++++++++++++++ .../admin/model/test-ontology-import.ttl | 17 ++ 2 files changed, 171 insertions(+) create mode 100755 http-tests/admin/model/ontology-import-upload-no-deadlock.sh create mode 100644 http-tests/admin/model/test-ontology-import.ttl diff --git a/http-tests/admin/model/ontology-import-upload-no-deadlock.sh b/http-tests/admin/model/ontology-import-upload-no-deadlock.sh new file mode 100755 index 000000000..af69d213f --- /dev/null +++ b/http-tests/admin/model/ontology-import-upload-no-deadlock.sh @@ -0,0 +1,154 @@ +#!/usr/bin/env bash +set -euo pipefail + +# Test that ontology imports of uploaded files do not cause deadlock +# This verifies the fix for circular dependency when: +# 1. Request arrives for /uploads/xyz +# 2. OntologyFilter intercepts it and loads ontology +# 3. Ontology has owl:imports for /uploads/xyz +# 4. Jena FileManager makes HTTP request to /uploads/xyz +# 5. Would cause infinite loop/deadlock without the fix + +initialize_dataset "$END_USER_BASE_URL" "$TMP_END_USER_DATASET" "$END_USER_ENDPOINT_URL" +initialize_dataset "$ADMIN_BASE_URL" "$TMP_ADMIN_DATASET" "$ADMIN_ENDPOINT_URL" +purge_cache "$END_USER_VARNISH_SERVICE" +purge_cache "$ADMIN_VARNISH_SERVICE" +purge_cache "$FRONTEND_VARNISH_SERVICE" + +pwd=$(realpath "$PWD") + +# add agent to the writers group so they can upload files + +add-agent-to-group.sh \ + -f "$OWNER_CERT_FILE" \ + -p "$OWNER_CERT_PWD" \ + --agent "$AGENT_URI" \ + "${ADMIN_BASE_URL}acl/groups/writers/" + +# Step 1: Upload an RDF file + +file_content_type="text/turtle" + +file_doc=$(create-file.sh \ + -f "$AGENT_CERT_FILE" \ + -p "$AGENT_CERT_PWD" \ + -b "$END_USER_BASE_URL" \ + --title "Test ontology for upload import" \ + --file "$pwd/test-ontology-import.ttl" \ + --file-content-type "${file_content_type}") + +# Step 2: Extract the uploaded file URI (content-addressed) + +file_doc_ntriples=$(get.sh \ + -f "$AGENT_CERT_FILE" \ + -p "$AGENT_CERT_PWD" \ + --accept 'application/n-triples' \ + "$file_doc") + +upload_uri=$(echo "$file_doc_ntriples" | sed -rn "s/<${file_doc//\//\\/}> <(.*)> \./\1/p") + +echo "Uploaded file URI: $upload_uri" + +# Verify the uploaded file is accessible before we add it as an import +curl -k -f -s \ + -E "$AGENT_CERT_FILE":"$AGENT_CERT_PWD" \ + -H "Accept: ${file_content_type}" \ + "$upload_uri" > /dev/null + +echo "Upload file is accessible" + +# Step 3: Add the uploaded file as an owl:import to the namespace ontology + +namespace_doc="${END_USER_BASE_URL}ns" +namespace="${namespace_doc}#" +ontology_doc="${ADMIN_BASE_URL}ontologies/namespace/" + +add-ontology-import.sh \ + -f "$OWNER_CERT_FILE" \ + -p "$OWNER_CERT_PWD" \ + --import "$upload_uri" \ + "$ontology_doc" + +echo "Added owl:import of uploaded file to namespace ontology" + +# Step 4: Clear the namespace ontology from memory to force reload on next request + +clear-ontology.sh \ + -f "$OWNER_CERT_FILE" \ + -p "$OWNER_CERT_PWD" \ + -b "$ADMIN_BASE_URL" \ + --ontology "$namespace" + +echo "Cleared ontology cache to force reload" + +# Step 5: Make a request that triggers ontology loading +# This would cause a deadlock without the OntologyFilter fix +# Use portable timeout implementation (works on both macOS and Linux) + +echo "Making request to trigger ontology loading (testing for deadlock)..." + +# Portable timeout function - works on both macOS and Linux +request_pid="" +( + curl -k -f -s \ + -E "$OWNER_CERT_FILE":"$OWNER_CERT_PWD" \ + -H "Accept: application/n-triples" \ + "$namespace_doc" > /dev/null +) & +request_pid=$! + +# Wait up to 30 seconds for the request to complete +timeout_seconds=30 +elapsed=0 +while kill -0 "$request_pid" 2>/dev/null; do + if [ $elapsed -ge $timeout_seconds ]; then + kill -9 "$request_pid" 2>/dev/null || true + echo "ERROR: Request timed out after ${timeout_seconds} seconds - deadlock detected!" + exit 1 + fi + sleep 1 + ((elapsed++)) +done + +# Check if curl succeeded +wait "$request_pid" +curl_exit_code=$? +if [ $curl_exit_code -ne 0 ]; then + echo "ERROR: Request failed with exit code $curl_exit_code" + exit 1 +fi + +echo "Request completed successfully in ${elapsed}s (no deadlock)" + +# Step 6: Verify the import is present in the loaded ontology + +curl -k -f -s \ + -H "Accept: application/n-triples" \ + "$namespace_doc" \ +| grep "<${namespace}> <${upload_uri}>" > /dev/null + +echo "Verified owl:import is present in namespace ontology" + +# Step 7: Verify the uploaded file is still accessible after ontology loading + +curl -k -f -s \ + -E "$AGENT_CERT_FILE":"$AGENT_CERT_PWD" \ + -H "Accept: ${file_content_type}" \ + "$upload_uri" > /dev/null + +echo "Uploaded file is still accessible after ontology import" + +# Step 8: Verify that the imported ontology content is accessible via the namespace document +# This confirms the import was actually loaded (not just skipped) + +curl -k -f -s \ + -G \ + -E "$OWNER_CERT_FILE":"$OWNER_CERT_PWD" \ + -H 'Accept: application/sparql-results+xml' \ + --data-urlencode "query=SELECT * { ?p ?o }" \ + "$namespace_doc" \ +| grep 'Test Class' > /dev/null + +echo "Verified imported ontology content is accessible via SPARQL" + +echo "✓ All tests passed - no deadlock detected when importing uploaded files in ontology" diff --git a/http-tests/admin/model/test-ontology-import.ttl b/http-tests/admin/model/test-ontology-import.ttl new file mode 100644 index 000000000..24361b035 --- /dev/null +++ b/http-tests/admin/model/test-ontology-import.ttl @@ -0,0 +1,17 @@ +@prefix : . +@prefix owl: . +@prefix rdfs: . +@prefix xsd: . + +: a owl:Ontology ; + rdfs:label "Test ontology for upload import" ; + rdfs:comment "This ontology is uploaded to test that ontology imports of uploaded files do not cause deadlock" . + +:TestClass a owl:Class ; + rdfs:label "Test Class" ; + rdfs:comment "A test class to verify ontology was loaded" . + +:testProperty a owl:DatatypeProperty ; + rdfs:label "Test Property" ; + rdfs:domain :TestClass ; + rdfs:range xsd:string . From 7f4b8f419d3d31cc482d60a07cd788c84312e149 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Martynas=20Jusevi=C4=8Dius?= Date: Sun, 25 Jan 2026 14:58:47 +0100 Subject: [PATCH 07/13] Ignored paths in `OntologyFilter` (#269) * Ignored paths in `OntologyFilter` * Debug test * Debug test * Debug test * Removed debug output * Debug test * Debug test * Removed debug output --- .../ontology-import-upload-no-deadlock.sh | 62 ++----------------- .../server/filter/request/OntologyFilter.java | 31 ++++++++++ 2 files changed, 35 insertions(+), 58 deletions(-) diff --git a/http-tests/admin/model/ontology-import-upload-no-deadlock.sh b/http-tests/admin/model/ontology-import-upload-no-deadlock.sh index af69d213f..939da9687 100755 --- a/http-tests/admin/model/ontology-import-upload-no-deadlock.sh +++ b/http-tests/admin/model/ontology-import-upload-no-deadlock.sh @@ -47,16 +47,12 @@ file_doc_ntriples=$(get.sh \ upload_uri=$(echo "$file_doc_ntriples" | sed -rn "s/<${file_doc//\//\\/}> <(.*)> \./\1/p") -echo "Uploaded file URI: $upload_uri" - # Verify the uploaded file is accessible before we add it as an import curl -k -f -s \ -E "$AGENT_CERT_FILE":"$AGENT_CERT_PWD" \ -H "Accept: ${file_content_type}" \ "$upload_uri" > /dev/null -echo "Upload file is accessible" - # Step 3: Add the uploaded file as an owl:import to the namespace ontology namespace_doc="${END_USER_BASE_URL}ns" @@ -69,8 +65,6 @@ add-ontology-import.sh \ --import "$upload_uri" \ "$ontology_doc" -echo "Added owl:import of uploaded file to namespace ontology" - # Step 4: Clear the namespace ontology from memory to force reload on next request clear-ontology.sh \ @@ -79,66 +73,22 @@ clear-ontology.sh \ -b "$ADMIN_BASE_URL" \ --ontology "$namespace" -echo "Cleared ontology cache to force reload" - -# Step 5: Make a request that triggers ontology loading -# This would cause a deadlock without the OntologyFilter fix -# Use portable timeout implementation (works on both macOS and Linux) - -echo "Making request to trigger ontology loading (testing for deadlock)..." - -# Portable timeout function - works on both macOS and Linux -request_pid="" -( - curl -k -f -s \ - -E "$OWNER_CERT_FILE":"$OWNER_CERT_PWD" \ - -H "Accept: application/n-triples" \ - "$namespace_doc" > /dev/null -) & -request_pid=$! - -# Wait up to 30 seconds for the request to complete -timeout_seconds=30 -elapsed=0 -while kill -0 "$request_pid" 2>/dev/null; do - if [ $elapsed -ge $timeout_seconds ]; then - kill -9 "$request_pid" 2>/dev/null || true - echo "ERROR: Request timed out after ${timeout_seconds} seconds - deadlock detected!" - exit 1 - fi - sleep 1 - ((elapsed++)) -done - -# Check if curl succeeded -wait "$request_pid" -curl_exit_code=$? -if [ $curl_exit_code -ne 0 ]; then - echo "ERROR: Request failed with exit code $curl_exit_code" - exit 1 -fi - -echo "Request completed successfully in ${elapsed}s (no deadlock)" - -# Step 6: Verify the import is present in the loaded ontology +# Step 5: Verify the import is present in the loaded ontology +# This request also triggers ontology loading and would detect deadlock curl -k -f -s \ -H "Accept: application/n-triples" \ "$namespace_doc" \ | grep "<${namespace}> <${upload_uri}>" > /dev/null -echo "Verified owl:import is present in namespace ontology" - -# Step 7: Verify the uploaded file is still accessible after ontology loading +# Step 6: Verify the uploaded file is still accessible after ontology loading curl -k -f -s \ -E "$AGENT_CERT_FILE":"$AGENT_CERT_PWD" \ -H "Accept: ${file_content_type}" \ "$upload_uri" > /dev/null -echo "Uploaded file is still accessible after ontology import" - -# Step 8: Verify that the imported ontology content is accessible via the namespace document +# Step 7: Verify that the imported ontology content is accessible via the namespace document # This confirms the import was actually loaded (not just skipped) curl -k -f -s \ @@ -148,7 +98,3 @@ curl -k -f -s \ --data-urlencode "query=SELECT * { ?p ?o }" \ "$namespace_doc" \ | grep 'Test Class' > /dev/null - -echo "Verified imported ontology content is accessible via SPARQL" - -echo "✓ All tests passed - no deadlock detected when importing uploaded files in ontology" diff --git a/src/main/java/com/atomgraph/linkeddatahub/server/filter/request/OntologyFilter.java b/src/main/java/com/atomgraph/linkeddatahub/server/filter/request/OntologyFilter.java index c996d5214..0390a989b 100644 --- a/src/main/java/com/atomgraph/linkeddatahub/server/filter/request/OntologyFilter.java +++ b/src/main/java/com/atomgraph/linkeddatahub/server/filter/request/OntologyFilter.java @@ -54,12 +54,43 @@ public class OntologyFilter implements ContainerRequestFilter private static final Logger log = LoggerFactory.getLogger(OntologyFilter.class); + /** + * Paths that should not trigger ontology loading to avoid circular dependencies. + * + * When an ontology contains owl:imports pointing to URIs within these paths, + * loading the ontology would trigger HTTP requests to those URIs. If those requests + * are intercepted by this filter, it creates a circular dependency: + * + * 1. Request arrives for /uploads/xyz + * 2. OntologyFilter intercepts it and loads ontology + * 3. Ontology has owl:imports for /uploads/xyz + * 4. Jena FileManager makes HTTP request to /uploads/xyz + * 5. OntologyFilter intercepts it again → infinite loop/deadlock + * + * Additionally, uploaded files are binary/RDF content that don't require + * ontology context for their serving logic. + */ + private static final java.util.Set IGNORED_PATH_PREFIXES = java.util.Set.of( + "uploads/" + ); + @Inject com.atomgraph.linkeddatahub.Application system; @Override public void filter(ContainerRequestContext crc) throws IOException { + String path = crc.getUriInfo().getPath(); + + // Skip ontology loading for paths that may be referenced in owl:imports + // to prevent circular dependency deadlocks during ontology resolution + if (IGNORED_PATH_PREFIXES.stream().anyMatch(path::startsWith)) + { + if (log.isTraceEnabled()) log.trace("Skipping ontology loading for path: {}", path); + crc.setProperty(OWL.Ontology.getURI(), Optional.empty()); + return; + } + crc.setProperty(OWL.Ontology.getURI(), getOntology(crc)); } From d08f55217b534cc40cc4f3f4b2b75dd8f7ab3dd4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Martynas=20Jusevi=C4=8Dius?= Date: Fri, 13 Feb 2026 14:02:15 +0100 Subject: [PATCH 08/13] Class-based navigation (#270) * Class tree feature * Class-based navigation * IXSL fixes * Navigation fixes * Modal view mode handler * Fixed modal view modes * `ldh:DocTreeActivateHref` param fix * `ldh:LeftSidebar` initialization * `ldh:LeftSidebar` fixes * Class list CSS * Dynamic View fixes * View template refactoring * Fixed template matches * Virew progress bar fixes * Removed unused function * File/import test fixes * Chart rendering aligned with view Using `ldh:update-progress-counter()` to update the progress bar * `VALUES` injection fix Fixed CLI script name ambiguity * CSS fix * Debug output cleanup --- .../create-query.sh => add-construct.sh} | 58 +- bin/{imports/create-file.sh => add-file.sh} | 67 +- ...create-csv-import.sh => add-csv-import.sh} | 50 +- ...create-rdf-import.sh => add-rdf-import.sh} | 50 +- bin/imports/import-csv.sh | 66 +- bin/imports/import-rdf.sh | 82 +- .../ontology-import-upload-no-deadlock.sh | 26 +- http-tests/imports/GET-file-304.sh | 1 + http-tests/imports/GET-file-range.sh | 32 +- http-tests/imports/GET-file-sha1sum.sh | 39 +- .../imports/PUT-file-format-explicit.sh | 31 +- http-tests/imports/PUT-file-format.sh | 30 +- http-tests/imports/create-file.sh | 34 +- platform/datasets/admin.trig | 282 ------ platform/datasets/end-user.trig | 282 ------ .../atomgraph/linkeddatahub/Application.java | 2 - .../linkeddatahub/resource/Generate.java | 33 +- .../com/atomgraph/linkeddatahub/ldh.ttl | 22 + .../atomgraph/linkeddatahub/css/bootstrap.css | 22 +- .../xsl/bootstrap/2.3.2/client/block.xsl | 109 ++- .../bootstrap/2.3.2/client/block/chart.xsl | 54 +- .../bootstrap/2.3.2/client/block/object.xsl | 21 +- .../bootstrap/2.3.2/client/block/query.xsl | 17 +- .../xsl/bootstrap/2.3.2/client/block/view.xsl | 619 +++++++----- .../xsl/bootstrap/2.3.2/client/functions.xsl | 4 +- .../xsl/bootstrap/2.3.2/client/map.xsl | 37 +- .../xsl/bootstrap/2.3.2/client/modal.xsl | 10 +- .../xsl/bootstrap/2.3.2/client/navigation.xsl | 908 ++++++++++++++++-- .../2.3.2/client/query-transforms.xsl | 32 +- .../xsl/bootstrap/2.3.2/layout.xsl | 17 +- .../xsl/bootstrap/2.3.2/resource.xsl | 2 +- .../xsl/bootstrap/2.3.2/translations.rdf | 4 + .../atomgraph/linkeddatahub/xsl/client.xsl | 73 +- 33 files changed, 1796 insertions(+), 1320 deletions(-) rename bin/{imports/create-query.sh => add-construct.sh} (72%) rename bin/{imports/create-file.sh => add-file.sh} (61%) rename bin/imports/{create-csv-import.sh => add-csv-import.sh} (73%) rename bin/imports/{create-rdf-import.sh => add-rdf-import.sh} (73%) diff --git a/bin/imports/create-query.sh b/bin/add-construct.sh similarity index 72% rename from bin/imports/create-query.sh rename to bin/add-construct.sh index f9d793498..5549b25ab 100755 --- a/bin/imports/create-query.sh +++ b/bin/add-construct.sh @@ -5,7 +5,7 @@ print_usage() { printf "Creates a SPARQL CONSTRUCT query.\n" printf "\n" - printf "Usage: %s options\n" "$0" + printf "Usage: %s options TARGET_URI\n" "$0" printf "\n" printf "Options:\n" printf " -f, --cert-pem-file CERT_FILE .pem file with the WebID certificate of the agent\n" @@ -13,20 +13,16 @@ print_usage() printf " -b, --base BASE_URI Base URI of the application\n" printf " --proxy PROXY_URL The host this request will be proxied through (optional)\n" printf "\n" - printf " --title TITLE Title of the chart\n" - printf " --description DESCRIPTION Description of the chart (optional)\n" - printf " --slug STRING String that will be used as URI path segment (optional)\n" + printf " --title TITLE Title of the query\n" + printf " --description DESCRIPTION Description of the query (optional)\n" + printf " --uri URI URI of the query (optional)\n" printf "\n" printf " --query-file ABS_PATH Absolute path to the text file with the SPARQL query string\n" + printf " --service SERVICE_URI URI of the SPARQL service specific to this query (optional)\n" } hash turtle 2>/dev/null || { echo >&2 "turtle not on \$PATH. Aborting."; exit 1; } -urlencode() { - python -c 'import urllib.parse, sys; print(urllib.parse.quote(sys.argv[1], sys.argv[2]))' \ - "$1" "$urlencode_safe" -} - args=() while [[ $# -gt 0 ]] do @@ -63,8 +59,8 @@ do shift # past argument shift # past value ;; - --slug) - slug="$2" + --uri) + uri="$2" shift # past argument shift # past value ;; @@ -73,6 +69,11 @@ do shift # past argument shift # past value ;; + --service) + service="$2" + shift # past argument + shift # past value + ;; *) # unknown arguments args+=("$1") # save it in an array for later shift # past argument @@ -81,6 +82,8 @@ do done set -- "${args[@]}" # restore args +target="$1" + if [ -z "$cert_pem_file" ] ; then print_usage exit 1 @@ -102,43 +105,38 @@ if [ -z "$query_file" ] ; then exit 1 fi -if [ -z "$slug" ] ; then - slug=$(uuidgen | tr '[:upper:]' '[:lower:]') # lowercase -fi -encoded_slug=$(urlencode "$slug") - -container="${base}queries/" query=$(<"$query_file") # read query string from file -target="${container}${encoded_slug}/" - args+=("-f") args+=("$cert_pem_file") args+=("-p") args+=("$cert_password") args+=("-t") args+=("text/turtle") # content type -args+=("$target") if [ -n "$proxy" ]; then args+=("--proxy") args+=("$proxy") fi +if [ -n "$uri" ] ; then + subject="<${uri}>" +else + subject="_:subject" +fi + turtle+="@prefix ldh: .\n" -turtle+="@prefix dh: .\n" turtle+="@prefix dct: .\n" -turtle+="@prefix foaf: .\n" turtle+="@prefix sp: .\n" -turtle+="_:query a sp:Construct .\n" -turtle+="_:query dct:title \"${title}\" .\n" -turtle+="_:query sp:text \"\"\"${query}\"\"\" .\n" -turtle+="<${target}> a dh:Item .\n" -turtle+="<${target}> foaf:primaryTopic _:query .\n" -turtle+="<${target}> dct:title \"${title}\" .\n" +turtle+="${subject} a sp:Construct .\n" +turtle+="${subject} dct:title \"${title}\" .\n" +turtle+="${subject} sp:text \"\"\"${query}\"\"\" .\n" +if [ -n "$service" ] ; then + turtle+="${subject} ldh:service <${service}> .\n" +fi if [ -n "$description" ] ; then - turtle+="_:query dct:description \"${description}\" .\n" + turtle+="${subject} dct:description \"${description}\" .\n" fi # submit Turtle doc to the server -echo -e "$turtle" | turtle --base="$target" | put.sh "${args[@]}" \ No newline at end of file +echo -e "$turtle" | turtle --base="$target" | post.sh "${args[@]}" \ No newline at end of file diff --git a/bin/imports/create-file.sh b/bin/add-file.sh similarity index 61% rename from bin/imports/create-file.sh rename to bin/add-file.sh index 36413d34c..ecc6104b1 100755 --- a/bin/imports/create-file.sh +++ b/bin/add-file.sh @@ -5,7 +5,7 @@ print_usage() { printf "Uploads a file.\n" printf "\n" - printf "Usage: %s options\n" "$0" + printf "Usage: %s options TARGET_URI\n" "$0" printf "\n" printf "Options:\n" printf " -f, --cert-pem-file CERT_FILE .pem file with the WebID certificate of the agent\n" @@ -14,22 +14,14 @@ print_usage() printf " --proxy PROXY_URL The host this request will be proxied through (optional)\n" printf "\n" printf " --title TITLE Title of the file\n" - printf " --container CONTAINER_URI URI of the parent container (optional)\n" printf " --description DESCRIPTION Description of the file (optional)\n" - printf " --slug STRING String that will be used as URI path segment (optional)\n" printf "\n" printf " --file ABS_PATH Absolute path to the file\n" printf " --file-content-type MEDIA_TYPE Media type of the file (optional)\n" - #printf " --file-slug STRING String that will be used as the file's URI path segment (optional)\n" } hash curl 2>/dev/null || { echo >&2 "curl not on \$PATH. Aborting."; exit 1; } -urlencode() { - python -c 'import urllib.parse, sys; print(urllib.parse.quote(sys.argv[1], sys.argv[2]))' \ - "$1" "$urlencode_safe" -} - args=() while [[ $# -gt 0 ]] do @@ -66,16 +58,6 @@ do shift # past argument shift # past value ;; - --slug) - slug="$2" - shift # past argument - shift # past value - ;; - --container) - container="$2" - shift # past argument - shift # past value - ;; --file) file="$2" shift # past argument @@ -86,11 +68,6 @@ do shift # past argument shift # past value ;; - --file-slug) - file_slug="$2" - shift # past argument - shift # past value - ;; *) # unknown arguments args+=("$1") # save it in an array for later shift # past argument @@ -99,6 +76,8 @@ do done set -- "${args[@]}" # restore args +target="$1" + if [ -z "$cert_pem_file" ] ; then print_usage exit 1 @@ -124,23 +103,6 @@ if [ -z "$file_content_type" ] ; then file_content_type=$(file -b --mime-type "$file") fi -if [ -z "$slug" ] ; then - slug=$(uuidgen | tr '[:upper:]' '[:lower:]') # lowercase -fi -encoded_slug=$(urlencode "$slug") - -# need to create explicit file URI since that is what this script returns (not the graph URI) - -#if [ -z "$file_slug" ] ; then -# file_slug=$(uuidgen | tr '[:upper:]' '[:lower:]') # lowercase -#fi - -if [ -z "$container" ] ; then - container="${base}files/" -fi - -target="${container}${encoded_slug}/" - # https://stackoverflow.com/questions/19116016/what-is-the-right-way-to-post-multipart-form-data-using-curl rdf_post+="-F \"rdf=\"\n" @@ -151,18 +113,8 @@ rdf_post+="-F \"pu=http://purl.org/dc/terms/title\"\n" rdf_post+="-F \"ol=${title}\"\n" rdf_post+="-F \"pu=http://www.w3.org/1999/02/22-rdf-syntax-ns#type\"\n" rdf_post+="-F \"ou=http://www.semanticdesktop.org/ontologies/2007/03/22/nfo#FileDataObject\"\n" -rdf_post+="-F \"su=${target}\"\n" -rdf_post+="-F \"pu=http://purl.org/dc/terms/title\"\n" -rdf_post+="-F \"ol=${title}\"\n" -rdf_post+="-F \"pu=http://www.w3.org/1999/02/22-rdf-syntax-ns#type\"\n" -rdf_post+="-F \"ou=https://www.w3.org/ns/ldt/document-hierarchy#Item\"\n" -rdf_post+="-F \"pu=http://xmlns.com/foaf/0.1/primaryTopic\"\n" -rdf_post+="-F \"ob=file\"\n" -rdf_post+="-F \"pu=http://rdfs.org/sioc/ns#has_container\"\n" -rdf_post+="-F \"ou=${container}\"\n" if [ -n "$description" ] ; then - rdf_post+="-F \"sb=file\"\n" rdf_post+="-F \"pu=http://purl.org/dc/terms/description\"\n" rdf_post+="-F \"ol=${description}\"\n" fi @@ -176,14 +128,5 @@ if [ -n "$proxy" ]; then target="${target/$target_host/$proxy_host}" fi -# POST RDF/POST multipart form and capture the effective URL -effective_url=$(echo -e "$rdf_post" | curl -w '%{url_effective}' -f -v -s -k -X PUT -H "Accept: text/turtle" -E "$cert_pem_file":"$cert_password" -o /dev/null --config - "$target") - -# If using proxy, rewrite the effective URL back to original hostname -if [ -n "$proxy" ]; then - # Replace proxy host with original host in the effective URL - rewritten_url="${effective_url/$proxy_host/$target_host}" - echo "$rewritten_url" -else - echo "$effective_url" -fi +# POST RDF/POST multipart form +echo -e "$rdf_post" | curl -f -v -s -k -X POST -H "Accept: text/turtle" -E "$cert_pem_file":"$cert_password" -o /dev/null --config - "$target" diff --git a/bin/imports/create-csv-import.sh b/bin/imports/add-csv-import.sh similarity index 73% rename from bin/imports/create-csv-import.sh rename to bin/imports/add-csv-import.sh index f7edac6cd..5b01392b1 100755 --- a/bin/imports/create-csv-import.sh +++ b/bin/imports/add-csv-import.sh @@ -5,7 +5,7 @@ print_usage() { printf "Transforms CSV data into RDF using a SPARQL query and imports it.\n" printf "\n" - printf "Usage: %s options\n" "$0" + printf "Usage: %s options TARGET_URI\n" "$0" printf "\n" printf "Options:\n" printf " -f, --cert-pem-file CERT_FILE .pem file with the WebID certificate of the agent\n" @@ -13,9 +13,9 @@ print_usage() printf " -b, --base BASE_URI Base URI of the application\n" printf " --proxy PROXY_URL The host this request will be proxied through (optional)\n" printf "\n" - printf " --title TITLE Title of the container\n" - printf " --description DESCRIPTION Description of the container (optional)\n" - printf " --slug STRING String that will be used as URI path segment (optional)\n" + printf " --title TITLE Title of the import\n" + printf " --description DESCRIPTION Description of the import (optional)\n" + printf " --uri URI URI of the import resource (optional)\n" printf "\n" printf " --query QUERY_URI URI of the CONSTRUCT mapping query\n" printf " --file FILE_URI URI of the CSV file\n" @@ -24,11 +24,6 @@ print_usage() hash turtle 2>/dev/null || { echo >&2 "turtle not on \$PATH. Aborting."; exit 1; } -urlencode() { - python -c 'import urllib.parse, sys; print(urllib.parse.quote(sys.argv[1], sys.argv[2]))' \ - "$1" "$urlencode_safe" -} - args=() while [[ $# -gt 0 ]] do @@ -65,8 +60,8 @@ do shift # past argument shift # past value ;; - --slug) - slug="$2" + --uri) + uri="$2" shift # past argument shift # past value ;; @@ -93,6 +88,8 @@ do done set -- "${args[@]}" # restore args +target="$1" + if [ -z "$cert_pem_file" ] ; then print_usage exit 1 @@ -122,14 +119,11 @@ if [ -z "$delimiter" ] ; then exit 1 fi -if [ -z "$slug" ] ; then - slug=$(uuidgen | tr '[:upper:]' '[:lower:]') # lowercase +if [ -n "$uri" ] ; then + subject="<${uri}>" +else + subject="_:import" fi -encoded_slug=$(urlencode "$slug") - -container="${base}imports/" - -target="${container}${encoded_slug}/" args+=("-f") args+=("$cert_pem_file") @@ -137,29 +131,23 @@ args+=("-p") args+=("$cert_password") args+=("-t") args+=("text/turtle") # content type -args+=("$target") if [ -n "$proxy" ]; then args+=("--proxy") args+=("$proxy") fi turtle+="@prefix ldh: .\n" -turtle+="@prefix dh: .\n" turtle+="@prefix dct: .\n" -turtle+="@prefix foaf: .\n" turtle+="@prefix spin: .\n" -turtle+="_:import a ldh:CSVImport .\n" -turtle+="_:import dct:title \"${title}\" .\n" -turtle+="_:import spin:query <${query}> .\n" -turtle+="_:import ldh:file <${file}> .\n" -turtle+="_:import ldh:delimiter \"${delimiter}\" .\n" -turtle+="<${target}> a dh:Item .\n" -turtle+="<${target}> foaf:primaryTopic _:import .\n" -turtle+="<${target}> dct:title \"${title}\" .\n" +turtle+="${subject} a ldh:CSVImport .\n" +turtle+="${subject} dct:title \"${title}\" .\n" +turtle+="${subject} spin:query <${query}> .\n" +turtle+="${subject} ldh:file <${file}> .\n" +turtle+="${subject} ldh:delimiter \"${delimiter}\" .\n" if [ -n "$description" ] ; then - turtle+="_:import dct:description \"${description}\" .\n" + turtle+="${subject} dct:description \"${description}\" .\n" fi # submit Turtle doc to the server -echo -e "$turtle" | turtle --base="$target" | put.sh "${args[@]}" \ No newline at end of file +echo -e "$turtle" | turtle --base="$target" | post.sh "${args[@]}" \ No newline at end of file diff --git a/bin/imports/create-rdf-import.sh b/bin/imports/add-rdf-import.sh similarity index 73% rename from bin/imports/create-rdf-import.sh rename to bin/imports/add-rdf-import.sh index 8d76b5e48..c47e68011 100755 --- a/bin/imports/create-rdf-import.sh +++ b/bin/imports/add-rdf-import.sh @@ -5,7 +5,7 @@ print_usage() { printf "Imports RDF data.\n" printf "\n" - printf "Usage: %s options\n" "$0" + printf "Usage: %s options TARGET_URI\n" "$0" printf "\n" printf "Options:\n" printf " -f, --cert-pem-file CERT_FILE .pem file with the WebID certificate of the agent\n" @@ -13,9 +13,9 @@ print_usage() printf " -b, --base BASE_URI Base URI of the application\n" printf " --proxy PROXY_URL The host this request will be proxied through (optional)\n" printf "\n" - printf " --title TITLE Title of the container\n" - printf " --description DESCRIPTION Description of the container (optional)\n" - printf " --slug STRING String that will be used as URI path segment (optional)\n" + printf " --title TITLE Title of the import\n" + printf " --description DESCRIPTION Description of the import (optional)\n" + printf " --uri URI URI of the import resource (optional)\n" printf "\n" printf " --query QUERY_URI URI of the CONSTRUCT mapping query (optional)\n" printf " --graph GRAPH_URI URI of the graph (optional)\n" @@ -24,11 +24,6 @@ print_usage() hash turtle 2>/dev/null || { echo >&2 "turtle not on \$PATH. Aborting."; exit 1; } -urlencode() { - python -c 'import urllib.parse, sys; print(urllib.parse.quote(sys.argv[1], sys.argv[2]))' \ - "$1" "$urlencode_safe" -} - args=() while [[ $# -gt 0 ]] do @@ -65,8 +60,8 @@ do shift # past argument shift # past value ;; - --slug) - slug="$2" + --uri) + uri="$2" shift # past argument shift # past value ;; @@ -93,6 +88,8 @@ do done set -- "${args[@]}" # restore args +target="$1" + if [ -z "$cert_pem_file" ] ; then print_usage exit 1 @@ -114,14 +111,11 @@ if [ -z "$file" ] ; then exit 1 fi -if [ -z "$slug" ] ; then - slug=$(uuidgen | tr '[:upper:]' '[:lower:]') # lowercase +if [ -n "$uri" ] ; then + subject="<${uri}>" +else + subject="_:import" fi -encoded_slug=$(urlencode "$slug") - -container="${base}imports/" - -target="${container}${encoded_slug}/" args+=("-f") args+=("$cert_pem_file") @@ -129,34 +123,28 @@ args+=("-p") args+=("$cert_password") args+=("-t") args+=("text/turtle") # content type -args+=("$target") if [ -n "$proxy" ]; then args+=("--proxy") args+=("$proxy") fi turtle+="@prefix ldh: .\n" -turtle+="@prefix dh: .\n" turtle+="@prefix dct: .\n" -turtle+="@prefix foaf: .\n" -turtle+="_:import a ldh:RDFImport .\n" -turtle+="_:import dct:title \"${title}\" .\n" -turtle+="_:import ldh:file <${file}> .\n" -turtle+="<${target}> a dh:Item .\n" -turtle+="<${target}> foaf:primaryTopic _:import .\n" -turtle+="<${target}> dct:title \"${title}\" .\n" +turtle+="${subject} a ldh:RDFImport .\n" +turtle+="${subject} dct:title \"${title}\" .\n" +turtle+="${subject} ldh:file <${file}> .\n" if [ -n "$graph" ] ; then turtle+="@prefix sd: .\n" - turtle+="_:import sd:name <${graph}> .\n" + turtle+="${subject} sd:name <${graph}> .\n" fi if [ -n "$query" ] ; then turtle+="@prefix spin: .\n" - turtle+="_:import spin:query <${query}> .\n" + turtle+="${subject} spin:query <${query}> .\n" fi if [ -n "$description" ] ; then - turtle+="_:import dct:description \"${description}\" .\n" + turtle+="${subject} dct:description \"${description}\" .\n" fi # submit Turtle doc to the server -echo -e "$turtle" | turtle --base="$target" | put.sh "${args[@]}" \ No newline at end of file +echo -e "$turtle" | turtle --base="$target" | post.sh "${args[@]}" \ No newline at end of file diff --git a/bin/imports/import-csv.sh b/bin/imports/import-csv.sh index d7c55dd38..5ebde7b7b 100755 --- a/bin/imports/import-csv.sh +++ b/bin/imports/import-csv.sh @@ -139,55 +139,73 @@ if [ -z "$proxy" ] ; then proxy="$base" fi -query_doc=$(create-query.sh \ +# Generate query ID for fragment identifier +query_id=$(uuidgen | tr '[:upper:]' '[:lower:]') + +# Create the imports/ container first (ignore error if it already exists) +create-container.sh \ -b "$base" \ -f "$cert_pem_file" \ -p "$cert_password" \ --proxy "$proxy" \ - --title "$title" \ - --slug "$query_doc_slug" \ - --query-file "$query_file" -) + --title "Imports" \ + --parent "$base" \ + --slug "imports" 2>/dev/null || true -query_ntriples=$(get.sh \ +# Create the import item document +import_doc=$(create-item.sh \ + -b "$base" \ -f "$cert_pem_file" \ -p "$cert_password" \ --proxy "$proxy" \ - --accept 'application/n-triples' \ - "$query_doc" + --title "$title" \ + --container "${base}imports/" \ + --slug "$query_doc_slug" ) -query=$(echo "$query_ntriples" | sed -rn "s/<${query_doc//\//\\/}> <(.*)> \./\1/p" | head -1) - -file_doc=$(create-file.sh \ +# Add the CONSTRUCT query to the item using fragment identifier +# TO-DO: fix ambigous add-construct.sh script names +"$(dirname "$0")/../add-construct.sh" \ -b "$base" \ -f "$cert_pem_file" \ -p "$cert_password" \ --proxy "$proxy" \ --title "$title" \ - --slug "$file_doc_slug" \ - --file-slug "$file_slug" \ - --file "$file" \ - --file-content-type "text/csv" -) + --uri "#${query_id}" \ + --query-file "$query_file" \ + "$import_doc" + +# The query URI is the document with fragment +query="${import_doc}#${query_id}" -file_ntriples=$(get.sh \ +# Add the file to the import item +add-file.sh \ + -b "$base" \ -f "$cert_pem_file" \ -p "$cert_password" \ --proxy "$proxy" \ - --accept 'application/n-triples' \ - "$file_doc") + --title "$title" \ + --file "$file" \ + --file-content-type "text/csv" \ + "$import_doc" + +# Calculate file URI from SHA1 hash +sha1sum=$(shasum -a 1 "$file" | awk '{print $1}') +file_uri="${base}uploads/${sha1sum}" -file=$(echo "$file_ntriples" | sed -rn "s/<${file_doc//\//\\/}> <(.*)> \./\1/p" | head -1) +# Generate import ID for fragment identifier +import_id=$(uuidgen | tr '[:upper:]' '[:lower:]') -create-csv-import.sh \ +# Add the import metadata to the import item using fragment identifier +add-csv-import.sh \ -b "$base" \ -f "$cert_pem_file" \ -p "$cert_password" \ --proxy "$proxy" \ --title "$title" \ - --slug "$import_slug" \ + --uri "#${import_id}" \ --query "$query" \ - --file "$file" \ - --delimiter "$delimiter" + --file "$file_uri" \ + --delimiter "$delimiter" \ + "$import_doc" \ No newline at end of file diff --git a/bin/imports/import-rdf.sh b/bin/imports/import-rdf.sh index 086d4d303..cdf398ac5 100755 --- a/bin/imports/import-rdf.sh +++ b/bin/imports/import-rdf.sh @@ -142,68 +142,86 @@ if [ -z "$proxy" ] ; then proxy="$base" fi +# Create the imports/ container first +create-container.sh \ + -b "$base" \ + -f "$cert_pem_file" \ + -p "$cert_password" \ + --proxy "$proxy" \ + --title "Imports" \ + --parent "$base" \ + --slug "imports" + +# Create the import item document +import_doc=$(create-item.sh \ + -b "$base" \ + -f "$cert_pem_file" \ + -p "$cert_password" \ + --proxy "$proxy" \ + --title "$title" \ + --container "${base}imports/" \ + --slug "$query_doc_slug" +) + if [ -n "$query_file" ] ; then - query_doc=$(create-query.sh \ + # Generate query ID for fragment identifier + query_id=$(uuidgen | tr '[:upper:]' '[:lower:]') + + # Add the CONSTRUCT query to the item using fragment identifier + # TO-DO: fix ambigous add-construct.sh script names + "$(dirname "$0")/../add-construct.sh" \ -b "$base" \ -f "$cert_pem_file" \ -p "$cert_password" \ --proxy "$proxy" \ --title "$title" \ - --slug "$query_doc_slug" \ - --query-file "$query_file" - ) - - query_ntriples=$(get.sh \ - -f "$cert_pem_file" \ - -p "$cert_password" \ - --proxy "$proxy" \ - --accept 'application/n-triples' \ - "$query_doc" - ) + --uri "#${query_id}" \ + --query-file "$query_file" \ + "$import_doc" - query=$(echo "$query_ntriples" | sed -rn "s/<${query_doc//\//\\/}> <(.*)> \./\1/p" | head -1) + # The query URI is the document with fragment + query="${import_doc}#${query_id}" fi -file_doc=$(create-file.sh \ +# Add the file to the import item +add-file.sh \ -b "$base" \ -f "$cert_pem_file" \ -p "$cert_password" \ --proxy "$proxy" \ --title "$title" \ - --slug "$file_doc_slug" \ - --file-slug "$file_slug" \ --file "$file" \ - --file-content-type "$file_content_type" -) + --file-content-type "$file_content_type" \ + "$import_doc" -file_ntriples=$(get.sh \ - -f "$cert_pem_file" \ - -p "$cert_password" \ - --proxy "$proxy" \ - --accept 'application/n-triples' \ - "$file_doc" -) +# Calculate file URI from SHA1 hash +sha1sum=$(shasum -a 1 "$file" | awk '{print $1}') +file_uri="${base}uploads/${sha1sum}" -file=$(echo "$file_ntriples" | sed -rn "s/<${file_doc//\//\\/}> <(.*)> \./\1/p" | head -1) +# Generate import ID for fragment identifier +import_id=$(uuidgen | tr '[:upper:]' '[:lower:]') +# Add the import metadata to the import item using fragment identifier if [ -n "$query" ] ; then - create-rdf-import.sh \ + add-rdf-import.sh \ -b "$base" \ -f "$cert_pem_file" \ -p "$cert_password" \ --proxy "$proxy" \ --title "$title" \ - --slug "$import_slug" \ + --uri "#${import_id}" \ --query "$query" \ - --file "$file" + --file "$file_uri" \ + "$import_doc" else - create-rdf-import.sh \ + add-rdf-import.sh \ -b "$base" \ -f "$cert_pem_file" \ -p "$cert_password" \ --proxy "$proxy" \ --title "$title" \ - --slug "$import_slug" \ + --uri "#${import_id}" \ --graph "$graph" \ - --file "$file" + --file "$file_uri" \ + "$import_doc" fi \ No newline at end of file diff --git a/http-tests/admin/model/ontology-import-upload-no-deadlock.sh b/http-tests/admin/model/ontology-import-upload-no-deadlock.sh index 939da9687..7079f86d5 100755 --- a/http-tests/admin/model/ontology-import-upload-no-deadlock.sh +++ b/http-tests/admin/model/ontology-import-upload-no-deadlock.sh @@ -28,24 +28,32 @@ add-agent-to-group.sh \ # Step 1: Upload an RDF file file_content_type="text/turtle" +slug=$(uuidgen | tr '[:upper:]' '[:lower:]') -file_doc=$(create-file.sh \ +# Create an item document to hold the file +file_doc=$(create-item.sh \ -f "$AGENT_CERT_FILE" \ -p "$AGENT_CERT_PWD" \ -b "$END_USER_BASE_URL" \ --title "Test ontology for upload import" \ - --file "$pwd/test-ontology-import.ttl" \ - --file-content-type "${file_content_type}") - -# Step 2: Extract the uploaded file URI (content-addressed) + --container "$END_USER_BASE_URL" \ + --slug "$slug") -file_doc_ntriples=$(get.sh \ +# Add the file to the document +add-file.sh \ -f "$AGENT_CERT_FILE" \ -p "$AGENT_CERT_PWD" \ - --accept 'application/n-triples' \ - "$file_doc") + -b "$END_USER_BASE_URL" \ + --title "Test ontology for upload import" \ + --file "$pwd/test-ontology-import.ttl" \ + --file-content-type "${file_content_type}" \ + "$file_doc" + +# Step 2: Extract the uploaded file URI (content-addressed) -upload_uri=$(echo "$file_doc_ntriples" | sed -rn "s/<${file_doc//\//\\/}> <(.*)> \./\1/p") +# Calculate file URI from SHA1 hash +sha1sum=$(shasum -a 1 "$pwd/test-ontology-import.ttl" | awk '{print $1}') +upload_uri="${END_USER_BASE_URL}uploads/${sha1sum}" # Verify the uploaded file is accessible before we add it as an import curl -k -f -s \ diff --git a/http-tests/imports/GET-file-304.sh b/http-tests/imports/GET-file-304.sh index 8b4f3728c..1f38581f7 100755 --- a/http-tests/imports/GET-file-304.sh +++ b/http-tests/imports/GET-file-304.sh @@ -7,6 +7,7 @@ purge_cache "$END_USER_VARNISH_SERVICE" purge_cache "$ADMIN_VARNISH_SERVICE" purge_cache "$FRONTEND_VARNISH_SERVICE" +# Run the create-file test and capture the file URI it outputs file=$(./create-file.sh) etag=$( diff --git a/http-tests/imports/GET-file-range.sh b/http-tests/imports/GET-file-range.sh index 649215916..e7eceb0a8 100755 --- a/http-tests/imports/GET-file-range.sh +++ b/http-tests/imports/GET-file-range.sh @@ -22,22 +22,30 @@ add-agent-to-group.sh \ filename="/tmp/random-file" time dd if=/dev/urandom of="$filename" bs=1 count=1024 file_content_type="application/octet-stream" +slug=$(uuidgen | tr '[:upper:]' '[:lower:]') -file_doc=$(create-file.sh \ --f "$AGENT_CERT_FILE" \ --p "$AGENT_CERT_PWD" \ --b "$END_USER_BASE_URL" \ ---title "Random file" \ ---file "$filename" \ ---file-content-type "${file_content_type}") - -file_doc_ntriples=$(get.sh \ +# Create an item document to hold the file +file_doc=$(create-item.sh \ -f "$AGENT_CERT_FILE" \ -p "$AGENT_CERT_PWD" \ - --accept 'application/n-triples' \ - "$file_doc") + -b "$END_USER_BASE_URL" \ + --title "Random file" \ + --container "$END_USER_BASE_URL" \ + --slug "$slug") -file=$(echo "$file_doc_ntriples" | sed -rn "s/<${file_doc//\//\\/}> <(.*)> \./\1/p") +# Add the file to the document +add-file.sh \ + -f "$AGENT_CERT_FILE" \ + -p "$AGENT_CERT_PWD" \ + -b "$END_USER_BASE_URL" \ + --title "Random file" \ + --file "$filename" \ + --file-content-type "${file_content_type}" \ + "$file_doc" + +# Calculate file URI from SHA1 hash +sha1sum=$(shasum -a 1 "$filename" | awk '{print $1}') +file="${END_USER_BASE_URL}uploads/${sha1sum}" from=100 length=42 diff --git a/http-tests/imports/GET-file-sha1sum.sh b/http-tests/imports/GET-file-sha1sum.sh index 3384ffc4a..08a0bd3fb 100755 --- a/http-tests/imports/GET-file-sha1sum.sh +++ b/http-tests/imports/GET-file-sha1sum.sh @@ -23,21 +23,36 @@ filename="/tmp/random-file" time dd if=/dev/urandom of="$filename" bs=1 count=1024 file_content_type="application/octet-stream" -file_doc=$(create-file.sh \ --f "$AGENT_CERT_FILE" \ --p "$AGENT_CERT_PWD" \ --b "$END_USER_BASE_URL" \ ---title "Random file" \ ---file "$filename" \ ---file-content-type "${file_content_type}") - -file_doc_ntriples=$(get.sh \ +# Create a container for files first +create-container.sh \ -f "$AGENT_CERT_FILE" \ -p "$AGENT_CERT_PWD" \ - --accept 'application/n-triples' \ - "$file_doc") + -b "$END_USER_BASE_URL" \ + --title "Files" \ + --parent "$END_USER_BASE_URL" \ + --slug "files" -file=$(echo "$file_doc_ntriples" | sed -rn "s/<${file_doc//\//\\/}> <(.*)> \./\1/p") +# Create an item document to hold the file +file_doc=$(create-item.sh \ + -f "$AGENT_CERT_FILE" \ + -p "$AGENT_CERT_PWD" \ + -b "$END_USER_BASE_URL" \ + --title "Random file" \ + --container "${END_USER_BASE_URL}files/") + +# Add the file to the document +add-file.sh \ + -f "$AGENT_CERT_FILE" \ + -p "$AGENT_CERT_PWD" \ + -b "$END_USER_BASE_URL" \ + --title "Random file" \ + --file "$filename" \ + --file-content-type "${file_content_type}" \ + "$file_doc" + +# Calculate file URI from SHA1 hash +sha1sum=$(shasum -a 1 "$filename" | awk '{print $1}') +file="${END_USER_BASE_URL}uploads/${sha1sum}" server_sha1sum=$(echo "$file" | cut -d "/" -f 5) # cut the last URL path segment diff --git a/http-tests/imports/PUT-file-format-explicit.sh b/http-tests/imports/PUT-file-format-explicit.sh index 3c9dffd8b..3d5b3d38c 100755 --- a/http-tests/imports/PUT-file-format-explicit.sh +++ b/http-tests/imports/PUT-file-format-explicit.sh @@ -24,20 +24,30 @@ echo "test,data,sample" > "$test_file" echo "1,2,3" >> "$test_file" echo "4,5,6" >> "$test_file" -# generate slug for the file document - slug=$(uuidgen | tr '[:upper:]' '[:lower:]') -# upload file with explicit media type: text/plain +# Create an item document to hold the file +file_doc=$(create-item.sh \ + -f "$AGENT_CERT_FILE" \ + -p "$AGENT_CERT_PWD" \ + -b "$END_USER_BASE_URL" \ + --title "Test File for Media Type Update" \ + --container "$END_USER_BASE_URL" \ + --slug "$slug") -file_doc=$(create-file.sh \ +# upload file with explicit media type: text/plain +add-file.sh \ -f "$AGENT_CERT_FILE" \ -p "$AGENT_CERT_PWD" \ -b "$END_USER_BASE_URL" \ --title "Test File for Media Type Update" \ - --slug "$slug" \ --file "$test_file" \ - --file-content-type "text/plain") + --file-content-type "text/plain" \ + "$file_doc" + +# Calculate file URI from SHA1 hash +sha1sum=$(shasum -a 1 "$test_file" | awk '{print $1}') +file_uri="${END_USER_BASE_URL}uploads/${sha1sum}" # get the file resource URI and initial dct:format @@ -47,8 +57,6 @@ file_doc_ntriples=$(get.sh \ --accept 'application/n-triples' \ "$file_doc") -file_uri=$(echo "$file_doc_ntriples" | sed -rn "s/<${file_doc//\//\\/}> <(.*)> \./\1/p") - # get initial SHA1 hash initial_sha1=$(echo "$file_doc_ntriples" | sed -rn "s/<${file_uri//\//\\/}> \"(.*)\" \./\1/p") @@ -61,18 +69,17 @@ if [[ ! "$initial_format" =~ text/plain ]]; then exit 1 fi -# re-upload the same file with same slug but different explicit media type: text/csv +# re-upload the same file but different explicit media type: text/csv # this simulates editing the file document through the UI and uploading a new file -create-file.sh \ +add-file.sh \ -f "$AGENT_CERT_FILE" \ -p "$AGENT_CERT_PWD" \ -b "$END_USER_BASE_URL" \ --title "Test File for Media Type Update" \ - --slug "$slug" \ --file "$test_file" \ --file-content-type "text/csv" \ - > /dev/null + "$file_doc" # get updated document diff --git a/http-tests/imports/PUT-file-format.sh b/http-tests/imports/PUT-file-format.sh index 4a30ad9d6..fa503fbac 100755 --- a/http-tests/imports/PUT-file-format.sh +++ b/http-tests/imports/PUT-file-format.sh @@ -24,19 +24,29 @@ echo "test,data,sample" > "$test_file" echo "1,2,3" >> "$test_file" echo "4,5,6" >> "$test_file" -# generate slug for the file document - slug=$(uuidgen | tr '[:upper:]' '[:lower:]') -# upload file WITHOUT explicit media type (rely on browser detection via `file -b --mime-type`) +# Create an item document to hold the file +file_doc=$(create-item.sh \ + -f "$AGENT_CERT_FILE" \ + -p "$AGENT_CERT_PWD" \ + -b "$END_USER_BASE_URL" \ + --title "Test File for Browser Media Type" \ + --container "$END_USER_BASE_URL" \ + --slug "$slug") -file_doc=$(create-file.sh \ +# upload file WITHOUT explicit media type (rely on browser detection via `file -b --mime-type`) +add-file.sh \ -f "$AGENT_CERT_FILE" \ -p "$AGENT_CERT_PWD" \ -b "$END_USER_BASE_URL" \ --title "Test File for Browser Media Type" \ - --slug "$slug" \ - --file "$test_file") + --file "$test_file" \ + "$file_doc" + +# Calculate file URI from SHA1 hash +sha1sum=$(shasum -a 1 "$test_file" | awk '{print $1}') +file_uri="${END_USER_BASE_URL}uploads/${sha1sum}" # get the file resource URI and initial dct:format @@ -46,25 +56,23 @@ file_doc_ntriples=$(get.sh \ --accept 'application/n-triples' \ "$file_doc") -file_uri=$(echo "$file_doc_ntriples" | sed -rn "s/<${file_doc//\//\\/}> <(.*)> \./\1/p") - # get initial SHA1 hash initial_sha1=$(echo "$file_doc_ntriples" | sed -rn "s/<${file_uri//\//\\/}> \"(.*)\" \./\1/p") # get initial dct:format (should be browser-detected) initial_format=$(echo "$file_doc_ntriples" | sed -rn "s/<${file_uri//\//\\/}> <(.*)> \./\1/p") -# re-upload the same file with same slug but WITH explicit media type: text/csv +# re-upload the same file but WITH explicit media type: text/csv # this simulates editing and uploading with a corrected format after browser auto-detection was wrong -create-file.sh \ +add-file.sh \ -f "$AGENT_CERT_FILE" \ -p "$AGENT_CERT_PWD" \ -b "$END_USER_BASE_URL" \ --title "Test File for Browser Media Type" \ - --slug "$slug" \ --file "$test_file" \ --file-content-type "text/csv" \ + "$file_doc" \ > /dev/null # get updated document diff --git a/http-tests/imports/create-file.sh b/http-tests/imports/create-file.sh index d7e5c462c..a054bb1af 100755 --- a/http-tests/imports/create-file.sh +++ b/http-tests/imports/create-file.sh @@ -20,24 +20,30 @@ add-agent-to-group.sh \ # create file file_content_type="text/csv" +slug=$(uuidgen | tr '[:upper:]' '[:lower:]') -file_doc=$(create-file.sh \ --f "$AGENT_CERT_FILE" \ --p "$AGENT_CERT_PWD" \ --b "$END_USER_BASE_URL" \ ---title "Test CSV" \ ---file "$pwd/test.csv" \ ---file-content-type "${file_content_type}") - -file_doc_ntriples=$(get.sh \ +# Create an item document to hold the file +file_doc=$(create-item.sh \ -f "$AGENT_CERT_FILE" \ -p "$AGENT_CERT_PWD" \ - --accept 'application/n-triples' \ - "$file_doc") - -# echo "FILE NTRIPLES: $file_doc_ntriples" + -b "$END_USER_BASE_URL" \ + --title "Test CSV" \ + --container "$END_USER_BASE_URL" \ + --slug "$slug") -file=$(echo "$file_doc_ntriples" | sed -rn "s/<${file_doc//\//\\/}> <(.*)> \./\1/p") +# Add the file to the document +add-file.sh \ + -f "$AGENT_CERT_FILE" \ + -p "$AGENT_CERT_PWD" \ + -b "$END_USER_BASE_URL" \ + --title "Test CSV" \ + --file "$pwd/test.csv" \ + --file-content-type "${file_content_type}" \ + "$file_doc" + +# Calculate file URI from SHA1 hash +sha1sum=$(shasum -a 1 "$pwd/test.csv" | awk '{print $1}') +file="${END_USER_BASE_URL}uploads/${sha1sum}" echo "$file" # file URL used in other tests diff --git a/platform/datasets/admin.trig b/platform/datasets/admin.trig index 4756fa90b..07eac47c0 100644 --- a/platform/datasets/admin.trig +++ b/platform/datasets/admin.trig @@ -9,7 +9,6 @@ @prefix sioc: . @prefix foaf: . @prefix dct: . -@prefix spin: . <> { @@ -58,287 +57,6 @@ } -# CONTAINERS - - -{ - - a dh:Container ; - dct:title "Queries" ; - dct:description "SPARQL queries" ; - rdf:_1 . - - a ldh:Object ; - rdf:value . - - a ldh:View ; - dct:title "Queries" ; - spin:query . - - a sp:Select ; - dct:title "Select query resources" ; - sp:text """PREFIX sp: - -SELECT DISTINCT ?s -WHERE - { GRAPH ?g - { { ?s a sp:Select } - UNION - { ?s a sp:Construct } - UNION - { ?s a sp:Describe } - UNION - { ?s a sp:Ask } - } - }""" . - -} - - -{ - - a dh:Item ; - sioc:has_container ; - dct:title "Select instances" ; - foaf:primaryTopic . - - a sp:Select ; - dct:title "Select instances" ; - dct:description "Selects instances of type from the default graph" ; - sp:text """SELECT DISTINCT ?s -WHERE - { ?s a $type ; - ?p ?o - }""" . - -} - - -{ - - a dh:Item ; - sioc:has_container ; - dct:title "Select instances in graphs" ; - foaf:primaryTopic . - - a sp:Select ; - dct:title "Select instances in graphs" ; - dct:description "Selects instances of type from named graphs" ; - sp:text """SELECT DISTINCT ?s -WHERE - { GRAPH ?g - { ?s a $type ; - ?p ?o - } - }""" . - -} - - -{ - - a dh:Container ; - dct:title "Files" ; - dct:description "Uploaded files" ; - rdf:_1 . - - a ldh:Object ; - rdf:value . - - a ldh:View ; - dct:title "Files" ; - spin:query . - - a sp:Select ; - dct:title "Select file resources" ; - sp:text """PREFIX nfo: - -SELECT DISTINCT ?s -WHERE - { GRAPH ?g - { ?s a nfo:FileDataObject } - }""" . - -} - - -{ - - a dh:Container ; - dct:title "Imports" ; - dct:description "Data imports" ; - rdf:_1 . - - a ldh:Object ; - rdf:value . - - a ldh:View ; - dct:title "Imports" ; - spin:query . - - a sp:Select ; - dct:title "Select import resources" ; - sp:text """PREFIX ldh: - -SELECT DISTINCT ?s -WHERE - { GRAPH ?g - { { ?s a ldh:CSVImport } - UNION - { ?s a ldh:RDFImport } - } - }""" . - -} - - -{ - - a dh:Item ; - dct:title "Geo" ; - dct:description "Geolocated resources" ; - rdf:_1 . - - a ldh:Object ; - rdf:value . - - a ldh:View ; - dct:title "Geo resources" ; - spin:query ; - ac:mode ac:MapMode . - - a sp:Select ; - dct:title "Select geo resources" ; - sp:text """PREFIX geo: -PREFIX dct: - -SELECT DISTINCT ?resource -WHERE -{ GRAPH ?graph - { ?resource geo:lat ?lat ; - geo:long ?long - OPTIONAL - { ?resource a ?type } - OPTIONAL - { ?resource dct:title ?title } - } -} -ORDER BY ?title""" . - -} - - -{ - - a dh:Item ; - dct:title "Latest" ; - dct:description "Latest resources" ; - rdf:_1 . - - a ldh:Object ; - rdf:value . - - a ldh:View ; - dct:title "Latest resources" ; - spin:query . - - a sp:Select ; - dct:title "Select latest" ; - sp:text """PREFIX dct: - -SELECT DISTINCT ?dated -WHERE -{ GRAPH ?graph - { ?dated dct:created ?created } -} -ORDER BY DESC(?created)""" . - -} - - -{ - - a dh:Container ; - dct:title "Charts" ; - dct:description "Saved charts" ; - rdf:_1 . - - a ldh:Object ; - rdf:value . - - a ldh:View ; - dct:title "Charts" ; - spin:query . - - a sp:Select ; - dct:title "Select chart resources" ; - sp:text """PREFIX ldh: - -SELECT DISTINCT ?s -WHERE - { GRAPH ?g - { { ?s a ldh:GraphChart } - UNION - { ?s a ldh:ResultSetChart } - } - }""" . - -} - - -{ - - a dh:Container ; - dct:title "Apps" ; - dct:description "Linked Data applications" ; - rdf:_1 . - - a ldh:Object ; - rdf:value . - - a ldh:View ; - dct:title "Applications" ; - spin:query . - - a sp:Select ; - dct:title "Select application resources" ; - sp:text """PREFIX lapp: - -SELECT DISTINCT ?s -WHERE - { GRAPH ?g - { ?s a lapp:Application } - }""" . - -} - - -{ - - a dh:Container ; - dct:title "Services" ; - dct:description "SPARQL services" ; - rdf:_1 . - - a ldh:Object ; - rdf:value . - - a ldh:View ; - dct:title "Services" ; - spin:query . - - a sp:Select ; - dct:title "Select service resources" ; - sp:text """PREFIX sd: - -SELECT DISTINCT ?s -WHERE - { GRAPH ?g - { ?s a sd:Service } - }""" . - -} - ### ADMIN-SPECIFIC @prefix lacl: . diff --git a/platform/datasets/end-user.trig b/platform/datasets/end-user.trig index 2608b6a39..65c624610 100644 --- a/platform/datasets/end-user.trig +++ b/platform/datasets/end-user.trig @@ -9,7 +9,6 @@ @prefix sioc: . @prefix foaf: . @prefix dct: . -@prefix spin: . <> { @@ -58,287 +57,6 @@ } -# CONTAINERS - - -{ - - a dh:Container ; - dct:title "Queries" ; - dct:description "SPARQL queries" ; - rdf:_1 . - - a ldh:Object ; - rdf:value . - - a ldh:View ; - dct:title "Queries" ; - spin:query . - - a sp:Select ; - dct:title "Select query resources" ; - sp:text """PREFIX sp: - -SELECT DISTINCT ?s -WHERE - { GRAPH ?g - { { ?s a sp:Select } - UNION - { ?s a sp:Construct } - UNION - { ?s a sp:Describe } - UNION - { ?s a sp:Ask } - } - }""" . - -} - - -{ - - a dh:Item ; - sioc:has_container ; - dct:title "Select instances" ; - foaf:primaryTopic . - - a sp:Select ; - dct:title "Select instances" ; - dct:description "Selects instances of type from the default graph" ; - sp:text """SELECT DISTINCT ?s -WHERE - { ?s a $type ; - ?p ?o - }""" . - -} - - -{ - - a dh:Item ; - sioc:has_container ; - dct:title "Select instances in graphs" ; - foaf:primaryTopic . - - a sp:Select ; - dct:title "Select instances in graphs" ; - dct:description "Selects instances of type from named graphs" ; - sp:text """SELECT DISTINCT ?s -WHERE - { GRAPH ?g - { ?s a $type ; - ?p ?o - } - }""" . - -} - - -{ - - a dh:Container ; - dct:title "Files" ; - dct:description "Uploaded files" ; - rdf:_1 . - - a ldh:Object ; - rdf:value . - - a ldh:View ; - dct:title "Files" ; - spin:query . - - a sp:Select ; - dct:title "Select file resources" ; - sp:text """PREFIX nfo: - -SELECT DISTINCT ?s -WHERE - { GRAPH ?g - { ?s a nfo:FileDataObject } - }""" . - -} - - -{ - - a dh:Container ; - dct:title "Imports" ; - dct:description "Data imports" ; - rdf:_1 . - - a ldh:Object ; - rdf:value . - - a ldh:View ; - dct:title "Imports" ; - spin:query . - - a sp:Select ; - dct:title "Select import resources" ; - sp:text """PREFIX ldh: - -SELECT DISTINCT ?s -WHERE - { GRAPH ?g - { { ?s a ldh:CSVImport } - UNION - { ?s a ldh:RDFImport } - } - }""" . - -} - - -{ - - a dh:Item ; - dct:title "Geo" ; - dct:description "Geolocated resources" ; - rdf:_1 . - - a ldh:Object ; - rdf:value . - - a ldh:View ; - dct:title "Geo resources" ; - spin:query ; - ac:mode ac:MapMode . - - a sp:Select ; - dct:title "Select geo resources" ; - sp:text """PREFIX geo: -PREFIX dct: - -SELECT DISTINCT ?resource -WHERE -{ GRAPH ?graph - { ?resource geo:lat ?lat ; - geo:long ?long - OPTIONAL - { ?resource a ?type } - OPTIONAL - { ?resource dct:title ?title } - } -} -ORDER BY ?title""" . - -} - - -{ - - a dh:Item ; - dct:title "Latest" ; - dct:description "Latest resources" ; - rdf:_1 . - - a ldh:Object ; - rdf:value . - - a ldh:View ; - dct:title "Latest resources" ; - spin:query . - - a sp:Select ; - dct:title "Select latest" ; - sp:text """PREFIX dct: - -SELECT DISTINCT ?dated -WHERE -{ GRAPH ?graph - { ?dated dct:created ?created } -} -ORDER BY DESC(?created)""" . - -} - - -{ - - a dh:Container ; - dct:title "Charts" ; - dct:description "Saved charts" ; - rdf:_1 . - - a ldh:Object ; - rdf:value . - - a ldh:View ; - dct:title "Charts" ; - spin:query . - - a sp:Select ; - dct:title "Select chart resources" ; - sp:text """PREFIX ldh: - -SELECT DISTINCT ?s -WHERE - { GRAPH ?g - { { ?s a ldh:GraphChart } - UNION - { ?s a ldh:ResultSetChart } - } - }""" . - -} - - -{ - - a dh:Container ; - dct:title "Apps" ; - dct:description "Linked Data applications" ; - rdf:_1 . - - a ldh:Object ; - rdf:value . - - a ldh:View ; - dct:title "Applications" ; - spin:query . - - a sp:Select ; - dct:title "Select application resources" ; - sp:text """PREFIX lapp: - -SELECT DISTINCT ?s -WHERE - { GRAPH ?g - { ?s a lapp:Application } - }""" . - -} - - -{ - - a dh:Container ; - dct:title "Services" ; - dct:description "SPARQL services" ; - rdf:_1 . - - a ldh:Object ; - rdf:value . - - a ldh:View ; - dct:title "Services" ; - spin:query . - - a sp:Select ; - dct:title "Select service resources" ; - sp:text """PREFIX sd: - -SELECT DISTINCT ?s -WHERE - { GRAPH ?g - { ?s a sd:Service } - }""" . - -} - ### END-USER-SPECIFIC diff --git a/src/main/java/com/atomgraph/linkeddatahub/Application.java b/src/main/java/com/atomgraph/linkeddatahub/Application.java index 0a5851110..ac7c6dba8 100644 --- a/src/main/java/com/atomgraph/linkeddatahub/Application.java +++ b/src/main/java/com/atomgraph/linkeddatahub/Application.java @@ -16,7 +16,6 @@ */ package com.atomgraph.linkeddatahub; -import com.atomgraph.linkeddatahub.server.mapper.ResourceExistsExceptionMapper; import com.atomgraph.linkeddatahub.server.mapper.HttpHostConnectExceptionMapper; import com.atomgraph.linkeddatahub.server.mapper.InternalURLExceptionMapper; import com.atomgraph.linkeddatahub.server.mapper.MessagingExceptionMapper; @@ -1104,7 +1103,6 @@ protected void registerExceptionMappers() register(WebIDDelegationExceptionMapper.class); register(WebIDLoadingExceptionMapper.class); register(TokenExpiredExceptionMapper.class); - register(ResourceExistsExceptionMapper.class); register(QueryParseExceptionMapper.class); register(AuthenticationExceptionMapper.class); register(ForbiddenExceptionMapper.class); diff --git a/src/main/java/com/atomgraph/linkeddatahub/resource/Generate.java b/src/main/java/com/atomgraph/linkeddatahub/resource/Generate.java index 716289439..cecd10dd6 100644 --- a/src/main/java/com/atomgraph/linkeddatahub/resource/Generate.java +++ b/src/main/java/com/atomgraph/linkeddatahub/resource/Generate.java @@ -18,8 +18,6 @@ import com.atomgraph.core.MediaTypes; import com.atomgraph.linkeddatahub.apps.model.Application; -import com.atomgraph.linkeddatahub.client.GraphStoreClient; -import com.atomgraph.linkeddatahub.imports.QueryLoader; import com.atomgraph.linkeddatahub.server.model.impl.DirectGraphStoreImpl; import com.atomgraph.linkeddatahub.server.security.AgentContext; import com.atomgraph.linkeddatahub.server.util.Skolemizer; @@ -44,8 +42,10 @@ import jakarta.ws.rs.core.Response.Status; import jakarta.ws.rs.core.UriBuilder; import jakarta.ws.rs.core.UriInfo; +import org.apache.jena.ontology.Ontology; import org.apache.jena.query.ParameterizedSparqlString; import org.apache.jena.query.Query; +import org.apache.jena.query.QueryFactory; import org.apache.jena.query.Syntax; import org.apache.jena.rdf.model.Model; import org.apache.jena.rdf.model.ModelFactory; @@ -69,10 +69,11 @@ public class Generate private final UriInfo uriInfo; private final MediaTypes mediaTypes; private final Application application; + private final Ontology ontology; private final Optional agentContext; private final com.atomgraph.linkeddatahub.Application system; private final ResourceContext resourceContext; - + /** * Constructs endpoint for container generation. * @@ -80,18 +81,21 @@ public class Generate * @param uriInfo current URI info * @param mediaTypes supported media types * @param application matched application + * @param ontology ontology of the current application * @param system system application * @param agentContext authenticated agent's context * @param resourceContext resource context for creating resources */ @Inject public Generate(@Context Request request, @Context UriInfo uriInfo, MediaTypes mediaTypes, - com.atomgraph.linkeddatahub.apps.model.Application application, Optional agentContext, + com.atomgraph.linkeddatahub.apps.model.Application application, Optional ontology, Optional agentContext, com.atomgraph.linkeddatahub.Application system, @Context ResourceContext resourceContext) { + if (ontology.isEmpty()) throw new InternalServerErrorException("Ontology is not specified"); this.uriInfo = uriInfo; this.mediaTypes = mediaTypes; this.application = application; + this.ontology = ontology.get(); this.agentContext = agentContext; this.system = system; this.resourceContext = resourceContext; @@ -129,10 +133,13 @@ public Response post(Model model) Resource queryRes = part.getPropertyResourceValue(SPIN.query); if (queryRes == null) throw new BadRequestException("Container query string (spin:query) not provided"); - GraphStoreClient gsc = GraphStoreClient.create(getSystem().getClient(), getSystem().getMediaTypes()). - delegation(getUriInfo().getBaseUri(), getAgentContext().orElse(null)); - QueryLoader queryLoader = new QueryLoader(URI.create(queryRes.getURI()), getApplication().getBase().getURI(), Syntax.syntaxARQ, gsc); - Query query = queryLoader.get(); + // Lookup query in ontology + Resource queryResource = getOntology().getOntModel().getResource(queryRes.getURI()); + if (queryResource == null || !queryResource.hasProperty(SP.text)) + throw new BadRequestException("Query resource not found in ontology: " + queryRes.getURI()); + + String queryString = queryResource.getProperty(SP.text).getString(); + Query query = QueryFactory.create(queryString, Syntax.syntaxARQ); if (!query.isSelectType()) throw new BadRequestException("Container query is not of SELECT type"); ParameterizedSparqlString pss = new ParameterizedSparqlString(query.toString()); @@ -253,6 +260,16 @@ public Application getApplication() return application; } + /** + * Returns the ontology. + * + * @return the ontology + */ + public Ontology getOntology() + { + return ontology; + } + /** * Returns the current URI info. * diff --git a/src/main/resources/com/atomgraph/linkeddatahub/ldh.ttl b/src/main/resources/com/atomgraph/linkeddatahub/ldh.ttl index 589ae75b9..caa46a07f 100644 --- a/src/main/resources/com/atomgraph/linkeddatahub/ldh.ttl +++ b/src/main/resources/com/atomgraph/linkeddatahub/ldh.ttl @@ -493,6 +493,28 @@ ORDER BY ?title """ ; rdfs:isDefinedBy : . +:SelectInstances a sp:Select ; + rdfs:label "Select instances" ; + dct:description "Selects instances of type from the default graph" ; + sp:text """SELECT DISTINCT ?s +WHERE + { ?s a $type ; + ?p ?o + }""" ; + rdfs:isDefinedBy : . + +:SelectInstancesInGraphs a sp:Select ; + rdfs:label "Select instances in graphs" ; + dct:description "Selects instances of type from named graphs" ; + sp:text """SELECT DISTINCT ?s +WHERE + { GRAPH ?g + { ?s a $type ; + ?p ?o + } + }""" ; + rdfs:isDefinedBy : . + :ChildrenView a :View ; rdfs:label "Children view" ; spin:query :SelectChildren ; diff --git a/src/main/webapp/static/com/atomgraph/linkeddatahub/css/bootstrap.css b/src/main/webapp/static/com/atomgraph/linkeddatahub/css/bootstrap.css index 66155e480..fcfc0fb23 100644 --- a/src/main/webapp/static/com/atomgraph/linkeddatahub/css/bootstrap.css +++ b/src/main/webapp/static/com/atomgraph/linkeddatahub/css/bootstrap.css @@ -37,6 +37,8 @@ button.btn.create-action { height: 30px; } a.external::after { content: "⤴"; padding-left: 0.2em; } a.btn.create-action { height: 20px; } .create-resource .btn.create-action { margin-top: 1em; } +.btn-class { background: inherit; } +.btn-class span { color: black; } .btn-group.open .btn.dropdown-toggle.create-action { background-image: url('../icons/ic_note_add_black_24px.svg'); } li button.btn-edit-constructors, li button.btn-add-data, li button.btn-add-ontology, li button.btn-generate-containers { text-align: left; width: 100%; background-color: inherit; } .btn-container { background-image: url('../icons/folder.svg'); } @@ -48,8 +50,6 @@ li button.btn-edit-constructors, li button.btn-add-data, li button.btn-add-ontol .btn-import { background-image: url('../icons/ic_transform_black_24px.svg'); } .btn-chart { background-image: url('../icons/ic_show_chart_black_24px.svg'); } .btn-view { background-image: url('../icons/ic_view_list_black_24px.svg'); } -.btn-latest { background-image: url('../icons/ic_new_releases_black_24px.svg'); } -.btn-geo { background-image: url('../icons/ic_location_on_black_24px.svg'); } .btn-logo { background-position: left; background-repeat: no-repeat; padding-left: 32px; } .dropdown-menu .btn-logo { background-position: 12px center; padding-left: 40px; } .btn.btn-toggle-content { font-size: 0; color: transparent; background-image: url('../icons/baseline-expand_less-24px.svg'); background-position: center center; background-repeat: no-repeat; width: 48px; } @@ -82,17 +82,21 @@ li button.btn-edit-constructors, li button.btn-add-data, li button.btn-add-ontol .dropdown-menu > li > a.btn-list { background-image: url('../icons/view_list_black_24dp.svg'); background-position: 12px center; background-repeat: no-repeat; padding: 5px 5px 5px 40px; } .dropdown-menu > li > a.btn-table { background-image: url('../icons/ic_border_all_black_24px.svg'); background-position: 12px center; background-repeat: no-repeat; padding: 5px 5px 5px 40px; } .dropdown-menu > li > a.btn-grid { background-image: url('../icons/ic_grid_on_black_24px.svg'); background-position: 12px center; background-repeat: no-repeat; padding: 5px 5px 5px 40px; } -#doc-tree { display: none; width: 15%; position: fixed; left: 0; top: 106px; height: calc(100% - 106px); } +#left-sidebar { display: none; width: 15%; position: fixed; left: 0; top: 106px; height: calc(100% - 106px); } @media (max-width: 979px) { body { padding-top: 0; } - #doc-tree { display: block; width: auto; position: unset; top: unset; height: auto; } - #doc-tree .nav { max-height: 20em; overflow: auto; } + #left-sidebar { display: block; width: auto; position: unset; top: unset; height: auto; } + #left-sidebar .nav { max-height: 20em; overflow: auto; } } -#doc-tree .nav-list > li > a { margin-left: 0; margin-right: 0; } -#doc-tree .nav-list > li > a.btn-container, #doc-tree .nav-list > li > a.btn-app, #doc-tree .nav-list > li > a.btn-chart, #doc-tree .nav-list > li > a.btn-file, #doc-tree .nav-list > li > a.btn-geo, #doc-tree .nav-list > li > a.btn-import, #doc-tree .nav-list > li > a.btn-latest, #doc-tree .nav-list > li > a.btn-query, #doc-tree .nav-list > li > a.btn-service { padding-left: 24px; } -#doc-tree li { max-height: 20em; overflow: auto; } -#doc-tree li > a { display: inline-block; } +#left-sidebar .nav-list > li > a.btn-container { padding-left: 24px; } +#left-sidebar .nav-list > li > a { margin-left: 0; margin-right: 0; } +#left-sidebar .nav-list > li > a.btn-container +#left-sidebar li { max-height: 20em; overflow: auto; } +#left-sidebar li > a { display: inline-block; } +#left-sidebar .btn-latest { background-image: url('../icons/ic_new_releases_black_24px.svg'); background-color: inherit; } +#left-sidebar .btn-geo { background-image: url('../icons/ic_location_on_black_24px.svg'); background-color: inherit; } + .btn.btn-expand-tree { height: 24px; width: 24px; background-image: url('../icons/expand_more_black_24dp.svg'); } .btn.btn-expand-tree:hover, .btn.btn-expand-tree:focus { background-position: 0 0; } .btn.btn-expanded-tree { height: 24px; width: 24px; background-image: url('../icons/chevron_right_black_24dp.svg'); } diff --git a/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/bootstrap/2.3.2/client/block.xsl b/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/bootstrap/2.3.2/client/block.xsl index 1060d7ba2..745d5aab7 100644 --- a/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/bootstrap/2.3.2/client/block.xsl +++ b/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/bootstrap/2.3.2/client/block.xsl @@ -151,28 +151,31 @@ exclude-result-prefixes="#all" - - - - - - - - - - + - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + @@ -454,7 +457,71 @@ exclude-result-prefixes="#all" - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/bootstrap/2.3.2/client/block/chart.xsl b/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/bootstrap/2.3.2/client/block/chart.xsl index 5541a334b..53307ebbc 100644 --- a/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/bootstrap/2.3.2/client/block/chart.xsl +++ b/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/bootstrap/2.3.2/client/block/chart.xsl @@ -286,11 +286,16 @@ exclude-result-prefixes="#all" - - - - - + + + + + + + + + + - - - - + + - - - - - - - @@ -816,8 +813,7 @@ exclude-result-prefixes="#all" - - + - @@ -867,11 +862,10 @@ exclude-result-prefixes="#all" - - - + + - + @@ -880,7 +874,10 @@ exclude-result-prefixes="#all" - + + + + @@ -896,8 +893,7 @@ exclude-result-prefixes="#all" - - + - ldh:block-object-metadata-response + ldh:block-object-metadata-response $block/@about: + + @@ -302,18 +304,29 @@ exclude-result-prefixes="#all" + - + + + + + + + + + + - - + + + diff --git a/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/bootstrap/2.3.2/client/block/query.xsl b/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/bootstrap/2.3.2/client/block/query.xsl index 550630534..fc2c10194 100644 --- a/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/bootstrap/2.3.2/client/block/query.xsl +++ b/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/bootstrap/2.3.2/client/block/query.xsl @@ -280,6 +280,12 @@ exclude-result-prefixes="#all" + + + + + + @@ -325,7 +331,7 @@ exclude-result-prefixes="#all" - + @@ -536,7 +542,7 @@ exclude-result-prefixes="#all" - + @@ -581,11 +587,10 @@ exclude-result-prefixes="#all" - - - + + - + diff --git a/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/bootstrap/2.3.2/client/block/view.xsl b/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/bootstrap/2.3.2/client/block/view.xsl index 3ebcc4766..e5d3cd2db 100644 --- a/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/bootstrap/2.3.2/client/block/view.xsl +++ b/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/bootstrap/2.3.2/client/block/view.xsl @@ -69,11 +69,15 @@ exclude-result-prefixes="#all" - - - - - + + + + + + + + + @@ -86,9 +90,13 @@ exclude-result-prefixes="#all" 'container': $container, 'mode': $mode, 'refresh-content': $refresh-content, - 'query-uri': $query-uri + 'query-uri': $query-uri, + 'cache': ixsl:get(ixsl:get(ixsl:window(), 'LinkedDataHub.contents'), '`' || $block/@about || '`') }"/> - + + + + - + ldh:view-results-thunk + + + + ldh:load-object-metadata + @@ -154,8 +167,12 @@ exclude-result-prefixes="#all" - + + + + + @@ -177,7 +194,6 @@ exclude-result-prefixes="#all" - - + + ldh:set-object-metadata + + + + @@ -208,7 +229,7 @@ exclude-result-prefixes="#all" - + @@ -331,6 +352,7 @@ exclude-result-prefixes="#all" + @@ -365,8 +387,13 @@ exclude-result-prefixes="#all" map { 'request': $request, 'container': ., - 'count-var-name': $count-var-name + 'count-var-name': $count-var-name, + 'cache': $cache }"/> + + + + - @@ -495,11 +521,8 @@ exclude-result-prefixes="#all" + - - - - @@ -522,7 +545,6 @@ exclude-result-prefixes="#all" + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - @@ -544,7 +606,7 @@ exclude-result-prefixes="#all" - + @@ -575,13 +637,27 @@ exclude-result-prefixes="#all" + + + + + + + + + + + + + + - - + + @@ -591,7 +667,7 @@ exclude-result-prefixes="#all" - + @@ -681,7 +757,6 @@ exclude-result-prefixes="#all" - @@ -694,13 +769,13 @@ exclude-result-prefixes="#all" + - - + $initial-load: @@ -769,6 +844,7 @@ exclude-result-prefixes="#all" + @@ -785,8 +861,10 @@ exclude-result-prefixes="#all" 'container': id($order-by-container-id, ixsl:page()), 'id': $id, 'predicate': $predicate, - 'order-by-predicate': $order-by-predicate + 'order-by-predicate': $order-by-predicate, + 'cache': $cache }"/> + - - - - - @@ -809,7 +882,7 @@ exclude-result-prefixes="#all" - + @@ -1113,46 +1186,114 @@ exclude-result-prefixes="#all" - - + + BLOCK DELEGATION: view-mode handler triggered + - - - + BLOCK DELEGATION: block URI = + + BLOCK DELEGATION: cache found: + + + + + + + + + + BLOCK DELEGATION: pager previous triggered + + + BLOCK DELEGATION: block URI = + + BLOCK DELEGATION: cache found: + + + + + + + + + + BLOCK DELEGATION: pager next triggered + + + BLOCK DELEGATION: block URI = + + BLOCK DELEGATION: cache found: + + + + + + + + + + BLOCK DELEGATION: container-order triggered + + + BLOCK DELEGATION: block URI = + + BLOCK DELEGATION: cache found: + + + + + + + + + + + BLOCK DELEGATION: btn-order-by triggered + + + BLOCK DELEGATION: block URI = + + BLOCK DELEGATION: cache found: + + + + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + - + + - - - - - - - + + + + + + + + - - - - - - - - - - - - - - + + + + - - - - + + + + - + - + + - - - - - - - + + + + + + + + + - - - - - - - - - - - - - - + + + + + - - - - + + + + - + - + + - - - - - - - + + + + + + + + + + - - - - - - - - - - - - - - + + + + - - - + + + - + - + + + + + - - - - - - - - - + + + + + + - - - - - - - + + + + - - - - - - - - - + + + + + + + + - + - + - + + - - - - + - + @@ -1371,8 +1486,8 @@ exclude-result-prefixes="#all" - - + + @@ -1395,9 +1510,19 @@ exclude-result-prefixes="#all" - - - + + + + + + + + + + + + + @@ -1459,7 +1584,7 @@ exclude-result-prefixes="#all" - + @@ -1469,12 +1594,11 @@ exclude-result-prefixes="#all" - - - - - - + + + + + @@ -1487,20 +1611,21 @@ exclude-result-prefixes="#all" - + - + - + - + + - + - - - - - - + + + + + @@ -1535,20 +1659,21 @@ exclude-result-prefixes="#all" - + - + - + - + + + ldh:view-query-response @@ -1605,14 +1731,11 @@ exclude-result-prefixes="#all" - + - - - - - - + + + @@ -1630,23 +1753,25 @@ exclude-result-prefixes="#all" - - - - - + + + + + + + + + + + + + + + + + - - - - - - - - - - - + @@ -1675,7 +1800,6 @@ exclude-result-prefixes="#all" - + - - - - - + + ldh:render-view + @@ -1737,7 +1860,6 @@ exclude-result-prefixes="#all" - @@ -1750,6 +1872,7 @@ exclude-result-prefixes="#all" + @@ -1773,11 +1896,16 @@ exclude-result-prefixes="#all" - + + + + + + - + @@ -1788,7 +1916,7 @@ exclude-result-prefixes="#all" ldh:facet-filter-response - + @@ -1804,7 +1932,7 @@ exclude-result-prefixes="#all" - + @@ -1816,7 +1944,7 @@ exclude-result-prefixes="#all" ldh:parallax-response - + @@ -1872,7 +2000,7 @@ exclude-result-prefixes="#all" ldh:parallax-property-response - + @@ -1932,7 +2060,7 @@ exclude-result-prefixes="#all" ldh:facet-value-response - + @@ -2078,6 +2206,9 @@ exclude-result-prefixes="#all" ldh:result-count-response + + + @@ -2101,7 +2232,7 @@ exclude-result-prefixes="#all" - + @@ -2116,7 +2247,7 @@ exclude-result-prefixes="#all" ldh:order-by-response - + @@ -2130,7 +2261,7 @@ exclude-result-prefixes="#all" - + diff --git a/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/bootstrap/2.3.2/client/functions.xsl b/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/bootstrap/2.3.2/client/functions.xsl index 5731d47c9..1b0e1f2e4 100644 --- a/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/bootstrap/2.3.2/client/functions.xsl +++ b/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/bootstrap/2.3.2/client/functions.xsl @@ -93,7 +93,7 @@ exclude-result-prefixes="#all" - + @@ -495,6 +495,8 @@ exclude-result-prefixes="#all" + ldh:handle-response + diff --git a/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/bootstrap/2.3.2/client/map.xsl b/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/bootstrap/2.3.2/client/map.xsl index 0fd72e1b5..8c2a46433 100644 --- a/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/bootstrap/2.3.2/client/map.xsl +++ b/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/bootstrap/2.3.2/client/map.xsl @@ -122,11 +122,11 @@ exclude-result-prefixes="#all" - - - + + + @@ -142,7 +142,8 @@ exclude-result-prefixes="#all" 'request': $request, 'container': $container, 'container-id': $container-id, - 'block-uri': $block-uri + 'map': $map, + 'initial-load': $initial-load }"/> - - + + - - - + @@ -331,17 +330,21 @@ exclude-result-prefixes="#all" - - + + + + + - - + + + @@ -429,6 +432,9 @@ exclude-result-prefixes="#all" + + + diff --git a/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/bootstrap/2.3.2/translations.rdf b/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/bootstrap/2.3.2/translations.rdf index a7dfaff31..df130e8df 100644 --- a/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/bootstrap/2.3.2/translations.rdf +++ b/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/bootstrap/2.3.2/translations.rdf @@ -104,6 +104,10 @@ Geo Geo + + Other + Otro + Files Archivos diff --git a/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/client.xsl b/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/client.xsl index 052116f39..3f0ec1505 100644 --- a/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/client.xsl +++ b/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/client.xsl @@ -286,7 +286,7 @@ WHERE - + @@ -323,15 +323,15 @@ WHERE - - + + - + - - - + + + @@ -481,22 +481,6 @@ WHERE - - - - - -
  • - - - - / - -
  • -
    - @@ -607,9 +591,19 @@ WHERE + + + + + + + + + - - + + + @@ -751,7 +745,7 @@ WHERE - + Application change. Base URI: @@ -842,7 +836,8 @@ WHERE - + + @@ -853,24 +848,22 @@ WHERE
    - - - - - - - - - - - + + - + - + From 1abed3abbd0438fa5137686431de30465fe728f4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Martynas=20Jusevi=C4=8Dius?= Date: Sun, 15 Feb 2026 10:46:51 +0100 Subject: [PATCH 09/13] Removed debug output --- .../linkeddatahub/xsl/bootstrap/2.3.2/client/functions.xsl | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/bootstrap/2.3.2/client/functions.xsl b/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/bootstrap/2.3.2/client/functions.xsl index 1b0e1f2e4..27531e32f 100644 --- a/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/bootstrap/2.3.2/client/functions.xsl +++ b/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/bootstrap/2.3.2/client/functions.xsl @@ -494,9 +494,7 @@ exclude-result-prefixes="#all" - - ldh:handle-response - + From c609fe61f18778976b3eb0b59d3a568997ca12c6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Martynas=20Jusevi=C4=8Dius?= Date: Sun, 15 Feb 2026 10:47:16 +0100 Subject: [PATCH 10/13] Removed unused class New CLI script --- bin/delete.sh | 71 +++++++++++++++++++ .../mapper/ResourceExistsExceptionMapper.java | 63 ---------------- 2 files changed, 71 insertions(+), 63 deletions(-) create mode 100755 bin/delete.sh delete mode 100644 src/main/java/com/atomgraph/linkeddatahub/server/mapper/ResourceExistsExceptionMapper.java diff --git a/bin/delete.sh b/bin/delete.sh new file mode 100755 index 000000000..1099bb488 --- /dev/null +++ b/bin/delete.sh @@ -0,0 +1,71 @@ +#!/usr/bin/env bash + +print_usage() +{ + printf "Deletes an RDF document.\n" + printf "\n" + printf "Usage: %s options TARGET_URI\n" "$0" + printf "\n" + printf "Options:\n" + printf " -f, --cert-pem-file CERT_FILE .pem file with the WebID certificate of the agent\n" + printf " -p, --cert-password CERT_PASSWORD Password of the WebID certificate\n" + printf " --proxy PROXY_URL The host this request will be proxied through (optional)\n" +} + +hash curl 2>/dev/null || { echo >&2 "curl not on \$PATH. Aborting."; exit 1; } + +unknown=() +while [[ $# -gt 0 ]] +do + key="$1" + + case $key in + -f|--cert-pem-file) + cert_pem_file="$2" + shift # past argument + shift # past value + ;; + -p|--cert-password) + cert_password="$2" + shift # past argument + shift # past value + ;; + --proxy) + proxy="$2" + shift # past argument + shift # past value + ;; + *) # unknown option + unknown+=("$1") # save it in an array for later + shift # past argument + ;; + esac +done +set -- "${unknown[@]}" # restore args + +if [ -z "$cert_pem_file" ] ; then + print_usage + exit 1 +fi +if [ -z "$cert_password" ] ; then + print_usage + exit 1 +fi +if [ "$#" -ne 1 ]; then + print_usage + exit 1 +fi + +url="$1" + +if [ -n "$proxy" ]; then + # rewrite target hostname to proxy hostname + url_host=$(echo "$url" | cut -d '/' -f 1,2,3) + proxy_host=$(echo "$proxy" | cut -d '/' -f 1,2,3) + final_url="${url/$url_host/$proxy_host}" +else + final_url="$url" +fi + +# DELETE the document +curl -f -v -k -E "$cert_pem_file":"$cert_password" -X DELETE -o /dev/null "$final_url" diff --git a/src/main/java/com/atomgraph/linkeddatahub/server/mapper/ResourceExistsExceptionMapper.java b/src/main/java/com/atomgraph/linkeddatahub/server/mapper/ResourceExistsExceptionMapper.java deleted file mode 100644 index 7cfe91c52..000000000 --- a/src/main/java/com/atomgraph/linkeddatahub/server/mapper/ResourceExistsExceptionMapper.java +++ /dev/null @@ -1,63 +0,0 @@ -/** - * Copyright 2019 Martynas Jusevičius - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -package com.atomgraph.linkeddatahub.server.mapper; - -import com.atomgraph.core.MediaTypes; -import com.atomgraph.linkeddatahub.server.exception.ResourceExistsException; -import com.atomgraph.server.mapper.ExceptionMapperBase; -import jakarta.inject.Inject; -import jakarta.ws.rs.core.HttpHeaders; -import org.apache.jena.rdf.model.ResourceFactory; - -import jakarta.ws.rs.core.Response; -import jakarta.ws.rs.ext.ExceptionMapper; -import org.apache.jena.rdf.model.Resource; - -/** - * JAX-RS mapper for resource conflict exceptions. - * - * @author Martynas Jusevičius {@literal } - */ -@Deprecated -public class ResourceExistsExceptionMapper extends ExceptionMapperBase implements ExceptionMapper -{ - - /** - * Constructs mapper from media types. - * - * @param mediaTypes registry of readable/writeable media types - */ - @Inject - public ResourceExistsExceptionMapper(MediaTypes mediaTypes) - { - super(mediaTypes); - } - - @Override - public Response toResponse(ResourceExistsException ex) - { - Resource exception = toResource(ex, Response.Status.CONFLICT, - ResourceFactory.createResource("http://www.w3.org/2011/http-statusCodes#Conflict")); - ex.getModel().add(exception.getModel()); - - return getResponseBuilder(ex.getModel()). - status(Response.Status.CONFLICT). - header(HttpHeaders.LOCATION, ex.getURI()). - build(); - } - -} From 18071ac35dd1dc04da3437b5eec89faa67e51ac4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Martynas=20Jusevi=C4=8Dius?= Date: Sun, 15 Feb 2026 12:15:58 +0100 Subject: [PATCH 11/13] Refactored CSV/RDF import scripts --- bin/add-file.sh | 12 +-- bin/imports/import-csv.sh | 71 +++------------ bin/imports/import-rdf.sh | 89 +++++-------------- .../ontology-import-upload-no-deadlock.sh | 2 +- http-tests/imports/GET-file-range.sh | 2 +- http-tests/imports/GET-file-sha1sum.sh | 2 +- .../imports/PUT-file-format-explicit.sh | 4 +- http-tests/imports/PUT-file-format.sh | 2 +- http-tests/imports/create-file.sh | 2 +- http-tests/imports/import-csv.sh | 14 ++- http-tests/imports/import-rdf-no-query.sh | 20 +++-- http-tests/imports/import-rdf.sh | 16 +++- 12 files changed, 90 insertions(+), 146 deletions(-) diff --git a/bin/add-file.sh b/bin/add-file.sh index ecc6104b1..dce1dcb13 100755 --- a/bin/add-file.sh +++ b/bin/add-file.sh @@ -17,7 +17,7 @@ print_usage() printf " --description DESCRIPTION Description of the file (optional)\n" printf "\n" printf " --file ABS_PATH Absolute path to the file\n" - printf " --file-content-type MEDIA_TYPE Media type of the file (optional)\n" + printf " --content-type MEDIA_TYPE Media type of the file (optional)\n" } hash curl 2>/dev/null || { echo >&2 "curl not on \$PATH. Aborting."; exit 1; } @@ -63,8 +63,8 @@ do shift # past argument shift # past value ;; - --file-content-type) - file_content_type="$2" + --content-type) + content_type="$2" shift # past argument shift # past value ;; @@ -98,9 +98,9 @@ if [ -z "$file" ] ; then print_usage exit 1 fi -if [ -z "$file_content_type" ] ; then +if [ -z "$content_type" ] ; then # determine content-type if not provided - file_content_type=$(file -b --mime-type "$file") + content_type=$(file -b --mime-type "$file") fi # https://stackoverflow.com/questions/19116016/what-is-the-right-way-to-post-multipart-form-data-using-curl @@ -108,7 +108,7 @@ fi rdf_post+="-F \"rdf=\"\n" rdf_post+="-F \"sb=file\"\n" rdf_post+="-F \"pu=http://www.semanticdesktop.org/ontologies/2007/03/22/nfo#fileName\"\n" -rdf_post+="-F \"ol=@${file};type=${file_content_type}\"\n" +rdf_post+="-F \"ol=@${file};type=${content_type}\"\n" rdf_post+="-F \"pu=http://purl.org/dc/terms/title\"\n" rdf_post+="-F \"ol=${title}\"\n" rdf_post+="-F \"pu=http://www.w3.org/1999/02/22-rdf-syntax-ns#type\"\n" diff --git a/bin/imports/import-csv.sh b/bin/imports/import-csv.sh index 5ebde7b7b..55838d7ff 100755 --- a/bin/imports/import-csv.sh +++ b/bin/imports/import-csv.sh @@ -12,7 +12,7 @@ print_usage() { printf "Transforms CSV data into RDF using a SPARQL query and imports it.\n" printf "\n" - printf "Usage: %s options\n" "$0" + printf "Usage: %s options TARGET_URI\n" "$0" printf "\n" printf "Options:\n" printf " -f, --cert-pem-file CERT_FILE .pem file with the WebID certificate of the agent\n" @@ -25,12 +25,8 @@ print_usage() printf " --slug STRING String that will be used as URI path segment (optional)\n" printf "\n" printf " --query-file ABS_PATH Absolute path to the text file with the SPARQL query string\n" - printf " --query-doc-slug STRING String that will be used as the query's URI path segment (optional)\n" - printf " --file ABS_PATH Absolute path to the CSV file\n" - printf " --file-slug STRING String that will be used as the file's URI path segment (optional)\n" - printf " --file-doc-slug STRING String that will be used as the file document's URI path segment (optional)\n" + printf " --csv-file ABS_PATH Absolute path to the CSV file\n" printf " --delimiter CHAR CSV delimiter char (default: ',')\n" - printf " --import-slug STRING String that will be used as the import's URI path segment (optional)\n" } args=() @@ -69,23 +65,8 @@ do shift # past argument shift # past value ;; - --query-doc-slug) - query_doc_slug="$2" - shift # past argument - shift # past value - ;; - --file) - file="$2" - shift # past argument - shift # past value - ;; - --file-slug) - file_slug="$2" - shift # past argument - shift # past value - ;; - --file-doc-slug) - file_doc_slug="$2" + --csv-file) + csv_file="$2" shift # past argument shift # past value ;; @@ -94,11 +75,6 @@ do shift # past argument shift # past value ;; - --import-slug) - import_slug="$2" - shift # past argument - shift # past value - ;; *) # unknown arguments args+=("$1") # save it in an array for later shift # past argument @@ -107,6 +83,8 @@ do done set -- "${args[@]}" # restore args +target="$1" + if [ -z "$cert_pem_file" ] ; then print_usage exit 1 @@ -127,7 +105,7 @@ if [ -z "$query_file" ] ; then print_usage exit 1 fi -if [ -z "$file" ] ; then +if [ -z "$csv_file" ] ; then print_usage exit 1 fi @@ -142,27 +120,6 @@ fi # Generate query ID for fragment identifier query_id=$(uuidgen | tr '[:upper:]' '[:lower:]') -# Create the imports/ container first (ignore error if it already exists) -create-container.sh \ - -b "$base" \ - -f "$cert_pem_file" \ - -p "$cert_password" \ - --proxy "$proxy" \ - --title "Imports" \ - --parent "$base" \ - --slug "imports" 2>/dev/null || true - -# Create the import item document -import_doc=$(create-item.sh \ - -b "$base" \ - -f "$cert_pem_file" \ - -p "$cert_password" \ - --proxy "$proxy" \ - --title "$title" \ - --container "${base}imports/" \ - --slug "$query_doc_slug" -) - # Add the CONSTRUCT query to the item using fragment identifier # TO-DO: fix ambigous add-construct.sh script names "$(dirname "$0")/../add-construct.sh" \ @@ -173,10 +130,10 @@ import_doc=$(create-item.sh \ --title "$title" \ --uri "#${query_id}" \ --query-file "$query_file" \ - "$import_doc" + "$target" # The query URI is the document with fragment -query="${import_doc}#${query_id}" +query="${target}#${query_id}" # Add the file to the import item add-file.sh \ @@ -185,12 +142,12 @@ add-file.sh \ -p "$cert_password" \ --proxy "$proxy" \ --title "$title" \ - --file "$file" \ - --file-content-type "text/csv" \ - "$import_doc" + --file "$csv_file" \ + --content-type "text/csv" \ + "$target" # Calculate file URI from SHA1 hash -sha1sum=$(shasum -a 1 "$file" | awk '{print $1}') +sha1sum=$(shasum -a 1 "$csv_file" | awk '{print $1}') file_uri="${base}uploads/${sha1sum}" # Generate import ID for fragment identifier @@ -207,5 +164,5 @@ add-csv-import.sh \ --query "$query" \ --file "$file_uri" \ --delimiter "$delimiter" \ - "$import_doc" + "$target" \ No newline at end of file diff --git a/bin/imports/import-rdf.sh b/bin/imports/import-rdf.sh index cdf398ac5..d0ce8dae0 100755 --- a/bin/imports/import-rdf.sh +++ b/bin/imports/import-rdf.sh @@ -10,9 +10,9 @@ function onexit() { print_usage() { - printf "Transforms CSV data into RDF using a SPARQL query and imports it.\n" + printf "Transforms RDF data using a SPARQL query and imports it.\n" printf "\n" - printf "Usage: %s options\n" "$0" + printf "Usage: %s options TARGET_URI\n" "$0" printf "\n" printf "Options:\n" printf " -f, --cert-pem-file CERT_FILE .pem file with the WebID certificate of the agent\n" @@ -25,13 +25,9 @@ print_usage() printf " --slug STRING String that will be used as URI path segment (optional)\n" printf "\n" printf " --query-file ABS_PATH Absolute path to the text file with the SPARQL query string (optional)\n" - printf " --query-doc-slug STRING String that will be used as the query's URI path segment (optional)\n" printf " --graph GRAPH_URI URI of the graph (optional)\n" - printf " --file ABS_PATH Absolute path to the CSV file (optional)\n" - printf " --file-slug STRING String that will be used as the file's URI path segment (optional)\n" - printf " --file-doc-slug STRING String that will be used as the file document's URI path segment (optional)\n" - printf " --file-content-type MEDIA_TYPE Media type of the file\n" - printf " --import-slug STRING String that will be used as the import's URI path segment (optional)\n" + printf " --rdf-file ABS_PATH Absolute path to the RDF file (optional)\n" + printf " --content-type MEDIA_TYPE Media type of the file\n" } args=() @@ -75,33 +71,13 @@ do shift # past argument shift # past value ;; - --query-doc-slug) - query_doc_slug="$2" + --rdf-file) + rdf_file="$2" shift # past argument shift # past value ;; - --file) - file="$2" - shift # past argument - shift # past value - ;; - --file-slug) - file_slug="$2" - shift # past argument - shift # past value - ;; - --file-doc-slug) - file_doc_slug="$2" - shift # past argument - shift # past value - ;; - --file-content-type) - file_content_type="$2" - shift # past argument - shift # past value - ;; - --import-slug) - import_slug="$2" + --content-type) + content_type="$2" shift # past argument shift # past value ;; @@ -113,6 +89,8 @@ do done set -- "${args[@]}" # restore args +target="$1" + if [ -z "$cert_pem_file" ] ; then print_usage exit 1 @@ -129,11 +107,11 @@ if [ -z "$title" ] ; then print_usage exit 1 fi -if [ -z "$file" ] ; then +if [ -z "$rdf_file" ] ; then print_usage exit 1 fi -if [ -z "$file_content_type" ] ; then +if [ -z "$content_type" ] ; then print_usage exit 1 fi @@ -142,27 +120,6 @@ if [ -z "$proxy" ] ; then proxy="$base" fi -# Create the imports/ container first -create-container.sh \ - -b "$base" \ - -f "$cert_pem_file" \ - -p "$cert_password" \ - --proxy "$proxy" \ - --title "Imports" \ - --parent "$base" \ - --slug "imports" - -# Create the import item document -import_doc=$(create-item.sh \ - -b "$base" \ - -f "$cert_pem_file" \ - -p "$cert_password" \ - --proxy "$proxy" \ - --title "$title" \ - --container "${base}imports/" \ - --slug "$query_doc_slug" -) - if [ -n "$query_file" ] ; then # Generate query ID for fragment identifier query_id=$(uuidgen | tr '[:upper:]' '[:lower:]') @@ -177,10 +134,10 @@ if [ -n "$query_file" ] ; then --title "$title" \ --uri "#${query_id}" \ --query-file "$query_file" \ - "$import_doc" + "$target" # The query URI is the document with fragment - query="${import_doc}#${query_id}" + query="${target}#${query_id}" fi # Add the file to the import item @@ -190,13 +147,13 @@ add-file.sh \ -p "$cert_password" \ --proxy "$proxy" \ --title "$title" \ - --file "$file" \ - --file-content-type "$file_content_type" \ - "$import_doc" + --file "$rdf_file" \ + --content-type "$content_type" \ + "$target" # Calculate file URI from SHA1 hash -sha1sum=$(shasum -a 1 "$file" | awk '{print $1}') -file_uri="${base}uploads/${sha1sum}" +sha1sum=$(shasum -a 1 "$rdf_file" | awk '{print $1}') +rdf_file_uri="${base}uploads/${sha1sum}" # Generate import ID for fragment identifier import_id=$(uuidgen | tr '[:upper:]' '[:lower:]') @@ -211,8 +168,8 @@ if [ -n "$query" ] ; then --title "$title" \ --uri "#${import_id}" \ --query "$query" \ - --file "$file_uri" \ - "$import_doc" + --file "$rdf_file_uri" \ + "$target" else add-rdf-import.sh \ -b "$base" \ @@ -222,6 +179,6 @@ else --title "$title" \ --uri "#${import_id}" \ --graph "$graph" \ - --file "$file_uri" \ - "$import_doc" + --file "$rdf_file_uri" \ + "$target" fi \ No newline at end of file diff --git a/http-tests/admin/model/ontology-import-upload-no-deadlock.sh b/http-tests/admin/model/ontology-import-upload-no-deadlock.sh index 7079f86d5..935facd7e 100755 --- a/http-tests/admin/model/ontology-import-upload-no-deadlock.sh +++ b/http-tests/admin/model/ontology-import-upload-no-deadlock.sh @@ -46,7 +46,7 @@ add-file.sh \ -b "$END_USER_BASE_URL" \ --title "Test ontology for upload import" \ --file "$pwd/test-ontology-import.ttl" \ - --file-content-type "${file_content_type}" \ + --content-type "${file_content_type}" \ "$file_doc" # Step 2: Extract the uploaded file URI (content-addressed) diff --git a/http-tests/imports/GET-file-range.sh b/http-tests/imports/GET-file-range.sh index e7eceb0a8..c9c416308 100755 --- a/http-tests/imports/GET-file-range.sh +++ b/http-tests/imports/GET-file-range.sh @@ -40,7 +40,7 @@ add-file.sh \ -b "$END_USER_BASE_URL" \ --title "Random file" \ --file "$filename" \ - --file-content-type "${file_content_type}" \ + --content-type "${file_content_type}" \ "$file_doc" # Calculate file URI from SHA1 hash diff --git a/http-tests/imports/GET-file-sha1sum.sh b/http-tests/imports/GET-file-sha1sum.sh index 08a0bd3fb..5b62d6bbc 100755 --- a/http-tests/imports/GET-file-sha1sum.sh +++ b/http-tests/imports/GET-file-sha1sum.sh @@ -47,7 +47,7 @@ add-file.sh \ -b "$END_USER_BASE_URL" \ --title "Random file" \ --file "$filename" \ - --file-content-type "${file_content_type}" \ + --content-type "${file_content_type}" \ "$file_doc" # Calculate file URI from SHA1 hash diff --git a/http-tests/imports/PUT-file-format-explicit.sh b/http-tests/imports/PUT-file-format-explicit.sh index 3d5b3d38c..d480fcb4a 100755 --- a/http-tests/imports/PUT-file-format-explicit.sh +++ b/http-tests/imports/PUT-file-format-explicit.sh @@ -42,7 +42,7 @@ add-file.sh \ -b "$END_USER_BASE_URL" \ --title "Test File for Media Type Update" \ --file "$test_file" \ - --file-content-type "text/plain" \ + --content-type "text/plain" \ "$file_doc" # Calculate file URI from SHA1 hash @@ -78,7 +78,7 @@ add-file.sh \ -b "$END_USER_BASE_URL" \ --title "Test File for Media Type Update" \ --file "$test_file" \ - --file-content-type "text/csv" \ + --content-type "text/csv" \ "$file_doc" # get updated document diff --git a/http-tests/imports/PUT-file-format.sh b/http-tests/imports/PUT-file-format.sh index fa503fbac..f066be396 100755 --- a/http-tests/imports/PUT-file-format.sh +++ b/http-tests/imports/PUT-file-format.sh @@ -71,7 +71,7 @@ add-file.sh \ -b "$END_USER_BASE_URL" \ --title "Test File for Browser Media Type" \ --file "$test_file" \ - --file-content-type "text/csv" \ + --content-type "text/csv" \ "$file_doc" \ > /dev/null diff --git a/http-tests/imports/create-file.sh b/http-tests/imports/create-file.sh index a054bb1af..e5d5c5541 100755 --- a/http-tests/imports/create-file.sh +++ b/http-tests/imports/create-file.sh @@ -38,7 +38,7 @@ add-file.sh \ -b "$END_USER_BASE_URL" \ --title "Test CSV" \ --file "$pwd/test.csv" \ - --file-content-type "${file_content_type}" \ + --content-type "${file_content_type}" \ "$file_doc" # Calculate file URI from SHA1 hash diff --git a/http-tests/imports/import-csv.sh b/http-tests/imports/import-csv.sh index 89d8458b9..85835aaaa 100755 --- a/http-tests/imports/import-csv.sh +++ b/http-tests/imports/import-csv.sh @@ -17,7 +17,16 @@ add-agent-to-group.sh \ --agent "$AGENT_URI" \ "${ADMIN_BASE_URL}acl/groups/writers/" -# create container +# create import item + +item=$(create-item.sh \ + -f "$AGENT_CERT_FILE" \ + -p "$AGENT_CERT_PWD" \ + -b "$END_USER_BASE_URL" \ + --title "RDF import" \ + --container "$END_USER_BASE_URL") + +# create target container container=$(create-container.sh \ -f "$AGENT_CERT_FILE" \ @@ -35,7 +44,8 @@ import-csv.sh \ -b "$END_USER_BASE_URL" \ --title "Test" \ --query-file "$pwd/csv-test.rq" \ - --file "$pwd/test.csv" + --csv-file "$pwd/test.csv" \ + "$item" csv_id="test-item" csv_value="42" diff --git a/http-tests/imports/import-rdf-no-query.sh b/http-tests/imports/import-rdf-no-query.sh index d33158689..1b63a5bd1 100755 --- a/http-tests/imports/import-rdf-no-query.sh +++ b/http-tests/imports/import-rdf-no-query.sh @@ -17,9 +17,18 @@ add-agent-to-group.sh \ --agent "$AGENT_URI" \ "${ADMIN_BASE_URL}acl/groups/writers/" -# create item +# create import item item=$(create-item.sh \ + -f "$AGENT_CERT_FILE" \ + -p "$AGENT_CERT_PWD" \ + -b "$END_USER_BASE_URL" \ + --title "RDF import" \ + --container "$END_USER_BASE_URL") + +# create target item + +graph=$(create-item.sh \ -f "$AGENT_CERT_FILE" \ -p "$AGENT_CERT_PWD" \ -b "$END_USER_BASE_URL" \ @@ -34,9 +43,10 @@ import-rdf.sh \ -p "$AGENT_CERT_PWD" \ -b "$END_USER_BASE_URL" \ --title "Test" \ - --file "$pwd/test.ttl" \ - --file-content-type "text/turtle" \ - --graph "$item" + --rdf-file "$pwd/test.ttl" \ + --content-type "text/turtle" \ + --graph "$graph" \ + "$item" # wait until the imported data appears (since import is executed asynchronously) @@ -51,7 +61,7 @@ do test_triples=$(curl -G -k -f -s -N \ -E "$AGENT_CERT_FILE":"$AGENT_CERT_PWD" \ -H "Accept: application/n-triples" \ - "$item" \ + "$graph" \ | grep " " || [[ $? == 1 ]]) sleep 1 ; diff --git a/http-tests/imports/import-rdf.sh b/http-tests/imports/import-rdf.sh index 2e4e75acd..20ed50376 100755 --- a/http-tests/imports/import-rdf.sh +++ b/http-tests/imports/import-rdf.sh @@ -17,7 +17,16 @@ add-agent-to-group.sh \ --agent "$AGENT_URI" \ "${ADMIN_BASE_URL}acl/groups/writers/" -# create container +# create import item + +item=$(create-item.sh \ + -f "$AGENT_CERT_FILE" \ + -p "$AGENT_CERT_PWD" \ + -b "$END_USER_BASE_URL" \ + --title "RDF import" \ + --container "$END_USER_BASE_URL") + +# create target container container=$(create-container.sh \ -f "$AGENT_CERT_FILE" \ @@ -35,8 +44,9 @@ import-rdf.sh \ -b "$END_USER_BASE_URL" \ --title "Test" \ --query-file "$pwd/rdf-test.rq" \ - --file "$pwd/test.ttl" \ - --file-content-type "text/turtle" + --rdf-file "$pwd/test.ttl" \ + --content-type "text/turtle" \ + "$item" rdf_id="concept7367" rdf_value="http://vocabularies.unesco.org/thesaurus/concept7367" From 92a74762d92efd927dcb313ea2848fffd0374cd9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Martynas=20Jusevi=C4=8Dius?= Date: Sun, 15 Feb 2026 17:16:08 +0100 Subject: [PATCH 12/13] Left sidebar CSS fixes --- .../static/com/atomgraph/linkeddatahub/css/bootstrap.css | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/main/webapp/static/com/atomgraph/linkeddatahub/css/bootstrap.css b/src/main/webapp/static/com/atomgraph/linkeddatahub/css/bootstrap.css index fcfc0fb23..03abdc375 100644 --- a/src/main/webapp/static/com/atomgraph/linkeddatahub/css/bootstrap.css +++ b/src/main/webapp/static/com/atomgraph/linkeddatahub/css/bootstrap.css @@ -91,8 +91,7 @@ li button.btn-edit-constructors, li button.btn-add-data, li button.btn-add-ontol } #left-sidebar .nav-list > li > a.btn-container { padding-left: 24px; } #left-sidebar .nav-list > li > a { margin-left: 0; margin-right: 0; } -#left-sidebar .nav-list > li > a.btn-container -#left-sidebar li { max-height: 20em; overflow: auto; } +#left-sidebar ul { max-height: 22em; overflow: auto; } #left-sidebar li > a { display: inline-block; } #left-sidebar .btn-latest { background-image: url('../icons/ic_new_releases_black_24px.svg'); background-color: inherit; } #left-sidebar .btn-geo { background-image: url('../icons/ic_location_on_black_24px.svg'); background-color: inherit; } From 3fd852a1e89524ce46ad9a4145520a44c391db03 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Martynas=20Jusevi=C4=8Dius?= Date: Sun, 15 Feb 2026 21:14:48 +0100 Subject: [PATCH 13/13] `LIMIT` on sidebar queries --- .../xsl/bootstrap/2.3.2/client/navigation.xsl | 76 +++++++++---------- 1 file changed, 37 insertions(+), 39 deletions(-) diff --git a/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/bootstrap/2.3.2/client/navigation.xsl b/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/bootstrap/2.3.2/client/navigation.xsl index d1d0197d5..944382032 100644 --- a/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/bootstrap/2.3.2/client/navigation.xsl +++ b/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/bootstrap/2.3.2/client/navigation.xsl @@ -94,14 +94,13 @@ ORDER BY DESC(?created) - +