From 472f3ed867b1cac3b8598552f90f368e4a73c3cb Mon Sep 17 00:00:00 2001 From: "Luke W. Johnston" Date: Mon, 31 Jul 2023 14:47:16 +0200 Subject: [PATCH 01/13] move some text out into shorts, plus reformat --- design/architecture/pseudocode.md | 524 +++++++++++++++--------------- 1 file changed, 264 insertions(+), 260 deletions(-) diff --git a/design/architecture/pseudocode.md b/design/architecture/pseudocode.md index e9f807ff..fb62bb4e 100644 --- a/design/architecture/pseudocode.md +++ b/design/architecture/pseudocode.md @@ -2,23 +2,6 @@ title: Detailed code-level architecture --- -Modern web and computational infrastructures are built on web APIs. Any -modern online resource or interface makes use of an API, such as from -Google, Gen3, or the UK Biobank. An API is a mechanism by which -different programs can communicate with one another. They form a set of -instructions or conventions that allow easy communication between a user -and the computer. APIs by their nature are transparent and if -well-documented would ensure the linked data would be FAIR, safely and -securely. - -In this case, the API would be between the user and the web server that -stores the underlying database and documentation. The API would be a -combination of a predefined set of instructions that are sent to the web -server to run certain commands as well as a set of explicit conventions -and rules on how files and folders are structured and named. Taken -together, this API would allow other software like R packages to be -built to interact with the backend to automate tasks done by the users. - ## API endpoint for data input ### Use PUT request for uploading one file (csv/txt) @@ -26,164 +9,172 @@ built to interact with the backend to automate tasks done by the users. Here is an example of API endpoint to upload csv/txt file to Seedcase box - # API PUT request - def post_file(request): - ### User need provide the file path and security information ### - - secured = verify_security(security_information) - if not secured: - raise PermissionDenied('Failed access control check') - +``` +# API PUT request +def post_file(request): + ### User need provide the file path and security information ### + + secured = verify_security(security_information) + if not secured: + raise PermissionDenied('Failed access control check') + + try: + logger.info('Get PUT request to upload file') + file_contents = read_file(file_path) + except: + logger.exception('Failed to read the uploaded file: ') + return Response( + 'Failed to read the uploaded file', + status=status.HTTP_400_BAD_REQUEST + ) + + # determine the selected project + if 'project_id' in request.data: + project_id = request.data['project_id'] try: - logger.info('Get PUT request to upload file') - file_contents = read_file(file_path) - except: - logger.exception('Failed to read the uploaded file: ') + model.project = project.objects.get(id=project_id) + except project.DoesNotExist: return Response( - 'Failed to read the uploaded file', - status=status.HTTP_400_BAD_REQUEST - ) - - # determine the selected project - if 'project_id' in request.data: - project_id = request.data['project_id'] - try: - model.project = project.objects.get(id=project_id) - except project.DoesNotExist: - return Response( - f'project with id {project_id} does not exist', - status=status.HTTP_400_BAD_REQUEST - ) - else: - logger.exception('Failed to provide project id') - return Response( - 'Failed to provide project id', - status=status.HTTP_400_BAD_REQUEST - ) - - - # write data to the project - try: - success_write_data = write_data_to_project(project_id, file_content) - except Exception as e: - logger.exception('Failed write data to the project') - return Response( - f'Failed to write data to the projec with error {e}', + f'project with id {project_id} does not exist', status=status.HTTP_400_BAD_REQUEST ) + else: + logger.exception('Failed to provide project id') + return Response( + 'Failed to provide project id', + status=status.HTTP_400_BAD_REQUEST + ) - return Response(status=status.HTTP_201_CREATED) + + # write data to the project + try: + success_write_data = write_data_to_project(project_id, file_content) + except Exception as e: + logger.exception('Failed write data to the project') + return Response( + f'Failed to write data to the projec with error {e}', + status=status.HTTP_400_BAD_REQUEST + ) + + return Response(status=status.HTTP_201_CREATED) +``` Here is an example making API call to post file to backend - import requests +``` +import requests - # Set the URL of the API endpoint - url = "https://seedcase.com/api/raw_data_file_upload" +# Set the URL of the API endpoint +url = "https://seedcase.com/api/raw_data_file_upload" - # Set the path to the text file you want to post - file_path = "path/to/text/file.txt" +# Set the path to the text file you want to post +file_path = "path/to/text/file.txt" - # Set the security code for the API call - security_code = "security_code_here" +# Set the security code for the API call +security_code = "security_code_here" - # Open the text file and read its contents - with open(file_path, 'rb') as file: - file_contents = file.read() +# Open the text file and read its contents +with open(file_path, 'rb') as file: + file_contents = file.read() - # Set the request headers, including the security code - headers = {"Authorization": "Bearer " + security_code} +# Set the request headers, including the security code +headers = {"Authorization": "Bearer " + security_code} - # Set the request parameters to include the project ID - params = {"project_id": project_id} +# Set the request parameters to include the project ID +params = {"project_id": project_id} - # Make the API call with the requests library - response = requests.put(url, data=file_contents, headers=headers, params=params) +# Make the API call with the requests library +response = requests.put(url, data=file_contents, headers=headers, params=params) - # Check the status code of the response to ensure the request was successful - assert response.status_code == 200, 'Fail to post file!' +# Check the status code of the response to ensure the request was successful +assert response.status_code == 200, 'Fail to post file!' +``` ### Use PUT request for uploading data directly from database (remote/local) Here is an example use this API upload data directly from a database - # API PUT Request - def post_data_from_database(request): - """ - User need provide the database connection infor, requested data parameter - and target project id. - """ - - headers = {'Authorization': 'Basic ' + database.credentials} - try: - data = request.get(database.url, headers=headres, params={data_parameter}) - except: - logger.exception('Failed connect to expected database') - return Response( - f'Failed connect to expected database with error: {error}', - status=status.HTTP_400_BAD_REQUEST - ) - - # determine the selected project - if 'project_id' in request.data: - project_id = request.data['project_id'] - try: - model.project = project.objects.get(id=project_id) - except project.DoesNotExist: - return Response( - f'project with id {project_id} does not exist', - status=status.HTTP_400_BAD_REQUEST - ) - else: - logger.exception('Failed to provide project id') - return Response( - 'Failed to provide project id', - status=status.HTTP_400_BAD_REQUEST - ) - - # write data to the project +``` +# API PUT Request +def post_data_from_database(request): + """ + User need provide the database connection infor, requested data parameter + and target project id. + """ + + headers = {'Authorization': 'Basic ' + database.credentials} + try: + data = request.get(database.url, headers=headres, params={data_parameter}) + except: + logger.exception('Failed connect to expected database') + return Response( + f'Failed connect to expected database with error: {error}', + status=status.HTTP_400_BAD_REQUEST + ) + + # determine the selected project + if 'project_id' in request.data: + project_id = request.data['project_id'] try: - success_write_data = write_data_to_project(project_id, file_content) - except Exception as e: - logger.exception('Failed write data to the project') + model.project = project.objects.get(id=project_id) + except project.DoesNotExist: return Response( - f'Failed to write data to the projec with error {e}', + f'project with id {project_id} does not exist', status=status.HTTP_400_BAD_REQUEST ) + else: + logger.exception('Failed to provide project id') + return Response( + 'Failed to provide project id', + status=status.HTTP_400_BAD_REQUEST + ) - return Response(status=status.HTTP_201_CREATED) + # write data to the project + try: + success_write_data = write_data_to_project(project_id, file_content) + except Exception as e: + logger.exception('Failed write data to the project') + return Response( + f'Failed to write data to the projec with error {e}', + status=status.HTTP_400_BAD_REQUEST + ) + + return Response(status=status.HTTP_201_CREATED) +``` Here is an example making API directly post data to the database - # Set the URL of the API endpoint to retrieve raw data from - get_url = "https://raw_data_database.com/api/get_raw_data" +``` +# Set the URL of the API endpoint to retrieve raw data from +get_url = "https://raw_data_database.com/api/get_raw_data" - # Set the parameters for the GET call - get_params = {"auth_token": "your_auth_token_here", "filter_by": "some_criteria"} +# Set the parameters for the GET call +get_params = {"auth_token": "your_auth_token_here", "filter_by": "some_criteria"} - # Make the GET call to retrieve the raw data - response = requests.get(get_url, params=get_params) +# Make the GET call to retrieve the raw data +response = requests.get(get_url, params=get_params) - # Check the status code of the response to ensure the request was successful - if response.status_code == 200: - raw_data = response.json() - else: - self.fail('Fail to get the data!') +# Check the status code of the response to ensure the request was successful +if response.status_code == 200: + raw_data = response.json() +else: + self.fail('Fail to get the data!') - # Set the API endpoint URL to post the raw data to, including the project ID as a query parameter - post_url = "https://seedcase.com/api/post_raw_data" - post_params = {"project_id": project_id} +# Set the API endpoint URL to post the raw data to, including the project ID as a query parameter +post_url = "https://seedcase.com/api/post_raw_data" +post_params = {"project_id": project_id} - # Set the security code for the API call - security_code = "security_code_here" +# Set the security code for the API call +security_code = "security_code_here" - # Set the headers for the API call, including the security code - headers = {"Authorization": f"Bearer {security_code}", "Content-Type": "application/json"} +# Set the headers for the API call, including the security code +headers = {"Authorization": f"Bearer {security_code}", "Content-Type": "application/json"} - # Make the POST call to post the raw data to the database - response = requests.post(post_url, json=raw_data, headers=headers, params=post_params) +# Make the POST call to post the raw data to the database +response = requests.post(post_url, json=raw_data, headers=headers, params=post_params) - assert response.status_code == 200, 'Fail to post raw data to database!' +assert response.status_code == 200, 'Fail to post raw data to database!' +``` ## API endpoint for data output @@ -191,165 +182,178 @@ Here is an example making API directly post data to the database Here is an example use API endpoint to download data as json/csv format - # API GET request - def download_data_json(request): - """ - User need provide project id, and data parameters and security info - """ - secured = verify_security(security_information) - if not secured: - raise PermissionDenied('Failed access control check') - - try: - stream = export_data(project_id, data_parameters) - # could add one step funciton for convert json into csv or other file - response = HttpResponse(stream.getvalue(), content_type='application/json') - response['Content-Disposition'] = 'attachment; filename="output_data.json"' - return response - except Exception as e: - logger.exception('fail to download json data') - return HttpResponse( - f'Failed in exporting to data with error {e}', - status=status.HTTP_400_BAD_REQUEST - ) +``` +# API GET request +def download_data_json(request): + """ + User need provide project id, and data parameters and security info + """ + secured = verify_security(security_information) + if not secured: + raise PermissionDenied('Failed access control check') + + try: + stream = export_data(project_id, data_parameters) + # could add one step funciton for convert json into csv or other file + response = HttpResponse(stream.getvalue(), content_type='application/json') + response['Content-Disposition'] = 'attachment; filename="output_data.json"' + return response + except Exception as e: + logger.exception('fail to download json data') + return HttpResponse( + f'Failed in exporting to data with error {e}', + status=status.HTTP_400_BAD_REQUEST + ) +``` Here is the example how to use the GET call download file - download_url = "https://ssedcase.com/api/download_file" +``` +download_url = "https://ssedcase.com/api/download_file" - # Set any required authentication or filtering parameters as query parameters - auth_token = "your_auth_token_here" - filter_param = "some_criteria" - params = {"auth_token": auth_token, "filter_by": filter_param} +# Set any required authentication or filtering parameters as query parameters +auth_token = "your_auth_token_here" +filter_param = "some_criteria" +params = {"auth_token": auth_token, "filter_by": filter_param} - # Set the headers for the API call, including any required security tokens - headers = {"Authorization": "Bearer your_security_token_here"} +# Set the headers for the API call, including any required security tokens +headers = {"Authorization": "Bearer your_security_token_here"} - # Make the GET call to download the file - response = requests.get(download_url, params=params, headers=headers) +# Make the GET call to download the file +response = requests.get(download_url, params=params, headers=headers) +``` ### Use POST request to generate data file to a location for download later Here is an example use API to get the data and post to location for user to download later - # API POST request - - def fetch_create_data(request): - """ - User need to provide project id, data parameters and security information. - It will be post call to request data generation, and check status. Notify - user when the status is true, when data is ready for download at the defined - location - """ - secured = verify_security(security_information) - if not secured: - raise PermissionDenied('Failed access control check') - - try: - generated_data = data_generation(project_id, data_parameters) - - return Response(status=status.HTTP_201_CREATED) - - except Exception as e: - logger.exception('fail to generate data') - return HttpResponse( - f'Failed in generating to data with error {e}', - status=status.HTTP_400_BAD_REQUEST - ) +``` +# API POST request + +def fetch_create_data(request): + """ + User need to provide project id, data parameters and security information. + It will be post call to request data generation, and check status. Notify + user when the status is true, when data is ready for download at the defined + location + """ + secured = verify_security(security_information) + if not secured: + raise PermissionDenied('Failed access control check') + + try: + generated_data = data_generation(project_id, data_parameters) + + return Response(status=status.HTTP_201_CREATED) - status = False - fail = False - while status == False and fail == False: - status = check_generating_status(generated_data.process_id) - if time >= designed_fail_time - fail = True - if fail: - logger.exception('fail to generate data, it takes too long') - publish_data = ( - designed_location, - content = 'fail to generate data, it takes too long' - ) - else: - # Could add function to convert into csv/json file - publish_data = ( - designed_location, - content = generated_data - ) - # Could add function to notify user + except Exception as e: + logger.exception('fail to generate data') + return HttpResponse( + f'Failed in generating to data with error {e}', + status=status.HTTP_400_BAD_REQUEST + ) + + status = False + fail = False + while status == False and fail == False: + status = check_generating_status(generated_data.process_id) + if time >= designed_fail_time + fail = True + if fail: + logger.exception('fail to generate data, it takes too long') + publish_data = ( + designed_location, + content = 'fail to generate data, it takes too long' + ) + else: + # Could add function to convert into csv/json file + publish_data = ( + designed_location, + content = generated_data + ) + # Could add function to notify user +``` Here is an example how to use this API endpoint - # Set the URL of the API endpoint to retrieve the data from the database - data_url = "https://seedcase.com/api/get_data" +``` +# Set the URL of the API endpoint to retrieve the data from the database +data_url = "https://seedcase.com/api/get_data" - # Set any required authentication or filtering parameters as query parameters - auth_token = "auth_token_here" - filter_param = "some_criteria" - params = {"auth_token": auth_token, "filter_by": filter_param} +# Set any required authentication or filtering parameters as query parameters +auth_token = "auth_token_here" +filter_param = "some_criteria" +params = {"auth_token": auth_token, "filter_by": filter_param} - # Set the headers for the API call, including any required security tokens - headers = {"Authorization": "Bearer your_security_token_here"} +# Set the headers for the API call, including any required security tokens +headers = {"Authorization": "Bearer your_security_token_here"} - # Make the GET call to retrieve the data from the database - response = requests.get(data_url, params=params, headers=headers) +# Make the GET call to retrieve the data from the database +response = requests.get(data_url, params=params, headers=headers) - # Check the status code of the response to ensure the request was successful - if response.status_code == 200: - # If the request was successful, save the data to a file in a temporary directory - data = response.content - filename = "data.txt" - with open(os.path.join(app.config['UPLOAD_FOLDER'], filename), "wb") as f: - f.write(data) +# Check the status code of the response to ensure the request was successful +if response.status_code == 200: + # If the request was successful, save the data to a file in a temporary directory + data = response.content + filename = "data.txt" + with open(os.path.join(app.config['UPLOAD_FOLDER'], filename), "wb") as f: + f.write(data) - def download_file(): - # Serve the file for download when the user visits the /download route - filename = "data.txt" - return send_from_directory(app.config['UPLOAD_FOLDER'], filename, as_attachment=True) +def download_file(): + # Serve the file for download when the user visits the /download route + filename = "data.txt" + return send_from_directory(app.config['UPLOAD_FOLDER'], filename, as_attachment=True) - if __name__ == '__main__': - # Set up the app with a temporary directory to store the file - app.config['UPLOAD_FOLDER'] = '/tmp' - app.run(debug=True) +if __name__ == '__main__': + # Set up the app with a temporary directory to store the file + app.config['UPLOAD_FOLDER'] = '/tmp' + app.run(debug=True) +``` ## Plugin function to Clean (QC) data file Here is an example of the plugin for cleaning data - from django.core.files.storage import default_storage +``` +from django.core.files.storage import default_storage - def process_sample_analysis_data_file(file): - # Create an empty list to store the cleaned data - processed_data = [] - - # Call different clean data function - - # Loop through each line in the file - processed_data = clean_data_function(file) +def process_sample_analysis_data_file(file): + # Create an empty list to store the cleaned data + processed_data = [] + + # Call different clean data function + + # Loop through each line in the file + processed_data = clean_data_function(file) - # Return the cleaned data as a string with each line separated by a newline character - return '\n'.join(processed_data ) + # Return the cleaned data as a string with each line separated by a newline character + return '\n'.join(processed_data ) - project_data_model = MyModel.objects.get(pk=1) + project_data_model = MyModel.objects.get(pk=1) - # Get the uploaded file from the FileField - file = project_data.data_file + # Get the uploaded file from the FileField + file = project_data.data_file - # Open the file and read the data - with default_storage.open(file.name) as f: - data = f.readlines() + # Open the file and read the data + with default_storage.open(file.name) as f: + data = f.readlines() +``` ## Plugin function to sort data file Here is an example of the plugin for sorting data in to different part such as metadata - def get_metadata_from_csv(file): - # Open the file and create a CSV reader - csv_file = csv.reader(file) +``` +def get_metadata_from_csv(file): + # Open the file and create a CSV reader + csv_file = csv.reader(file) + + # Read the header row as metadata + metadata = next(csv_file) - # Read the header row as metadata - metadata = next(csv_file) + # Return the metadata as a list + return metadata +``` - # Return the metadata as a list - return metadata From 70b73069789feb1b0d39c5ead0b174002e8501c0 Mon Sep 17 00:00:00 2001 From: "Luke W. Johnston" Date: Mon, 31 Jul 2023 14:47:35 +0200 Subject: [PATCH 02/13] move over into building block view --- design/architecture/api.md | 141 ------------------ .../building-block-view.qmd | 140 +++++++++++++++++ 2 files changed, 140 insertions(+), 141 deletions(-) delete mode 100644 design/architecture/api.md diff --git a/design/architecture/api.md b/design/architecture/api.md deleted file mode 100644 index 0ecd2c11..00000000 --- a/design/architecture/api.md +++ /dev/null @@ -1,141 +0,0 @@ -# API Endpoints - -These are some potential endpoints. For almost all of them, there are -some overlapping features. For instance, each endpoint accepts (either -as a requirement or optionally): - -- Authorization (string): A security code that is used to authenticate - the user. This security code should be generated and provided to the - user when they create an account. - -| Response status code | Description | -|---------------------------|---------------------------------------------------------| -| 400 Bad Request | The request was malformed or invalid. | -| 401 Unauthorized | The authentication code provided is invalid or missing. | -| 500 Internal Server Error | There was an error processing the request. | - -: Common API response status codes shared by (almost all) endpoints. - -## Upload data - -- `POST /data/raw/`: This endpoint allows users to upload raw research - data to a project. - -| Response status code | Description | -|----------------------|--------------------------------------------------| -| 201 Created | The research data was successfully posted. | -| 404 Not Found | The project with the specified ID was not found. | - -## Upload raw data - -- `POST /data/raw/file`: This endpoint allows users to upload raw data - files to a project. - -| Response status code | Description | -|----------------------|----------------------------------------------| -| 201 Created | The raw data file was successfully uploaded. | - -## Generate data for a data project - -- `POST /projects/{project_id}/data/`: This endpoint allows users to - post data to be generated for a data project, and provides a - location for the user to download the generated data after it has - been created. - - `project_id` (string, required): The unique identifier of the - data project that the data is being generated for. - -| Response status code | Description | -|----------------------|-----------------------------------------------------------------------| -| 202 Accepted | The request was accepted and the data generation process has started. | -| 404 Not Found | The project with the specified ID was not found. | - -## Run QC function - -- `POST /data/raw/qc/{function_name}`: This endpoint allows users to - call a specific data cleaning function. The user must provide the - name of the function they wish to call as a URL parameter and any - additional parameters required by the function in the request body. - - `function_name` (string, required): The name of the data - cleaning function to call. - -| Response status code | Description | -|-----------------------|-------------------------------------------------------------------------| -| 201 process completed | The request was successful and the data cleaning function was executed. | - -## Get list of data projects - -- `GET /projects/`: This endpoint allows users to retrieve a list of - data projects based on the specified filter parameters. - - `status` (string, optional): Filter the list of projects based - on their status. Possible values are "proposed", "ongoing", and - "completed". If not specified, all projects will be returned. - -| Response status code | Description | -|----------------------|-----------------------------| -| 200 OK | The request was successful. | - -## Get metadata - -- `GET /metadata/`: This endpoint allows users to retrieve metadata - for all the data contained in the Data Resource. -- `GET /projects/{project_id}/metadata`: This endpoint allows users to - retrieve metadata, like project description and variables requested, - for the data project. - -| Response status code | Description | -|----------------------|-----------------------------| -| 200 OK | The request was successful. | - -## Get list of registered users - -- `GET /users/`: Get list of users registered within the Data - Resource. -- `GET /projects/{project_id}/users/`: Get list of users assigned to a - specific project. - - `project_id` (string, required): The ID of the project. - -| Response status code | Description | -|----------------------|-----------------------------| -| 200 OK | The request was successful. | - -## Assign permissions to users - -- `POST /users/{user_id}/permissions/`: This endpoint allows - (authorized) users to add, remove, or update user permissions for a - project. - - `user_id` (string, required): The ID of the user. - - Additional parameters in the request body, as a JSON object with - the following fields: - - `user_email` (string): The email address of the user whose - permissions will be updated. - - `action` (string): The action to be performed on the user's - permissions. Valid values are "add", "remove", or "update". - - `permission` (string): The permission to be granted or - revoked. Valid values are "read" or "write". If the action - is "update", the user must also provide the new value of the - permission field. - -| Response status code | Description | -|----------------------|------------------------------------------------------------------| -| 200 OK | The request was successful and the user permissions were updated | -| 404 Not Found | The user with the specified ID was not found. | - -## Get the changelog - -- `GET /log/`: This endpoint allows users to retrieve the history log - of the Data Resource based on certain criteria. The user must - provide at least one of the following parameters in the query - string: - - `dataset_id` (string, optional): The ID of the dataset for which - to retrieve the history log. - - `date_from` (string, optional): The starting date for which to - retrieve the history log (formatted as `YYYY-MM-DD`). - - `date_to` (string, optional): The ending date for which to - retrieve the history log (formatted as `YYYY-MM-DD`). If - multiple parameters are provided, the endpoint will return all - history logs that match any of the provided criteria. - -| Response status code | Description | -|----------------------|---------------------------------------------------------------| -| 200 OK | The request was successful and the history log was retrieved. | -| 404 Not Found | No history logs were found for the specified criteria. | diff --git a/design/software-architecture/building-block-view.qmd b/design/software-architecture/building-block-view.qmd index dd102ce1..c082277d 100644 --- a/design/software-architecture/building-block-view.qmd +++ b/design/software-architecture/building-block-view.qmd @@ -139,6 +139,146 @@ Management Layer* part. ![user data management](/design/images/user-data-management.png) +These are some potential endpoints for the API. For almost all of them, +there are some overlapping features. For instance, each endpoint accepts +(either as a requirement or optionally): + +- Authorization (string): A security code that is used to authenticate + the user. This security code should be generated and provided to the + user when they create an account. + +| Response status code | Description | +|---------------------------|---------------------------------------------------------| +| 400 Bad Request | The request was malformed or invalid. | +| 401 Unauthorized | The authentication code provided is invalid or missing. | +| 500 Internal Server Error | There was an error processing the request. | + +: Common API response status codes shared by (almost all) endpoints. + +#### Upload data + +- `POST /data/raw/`: This endpoint allows users to upload raw research + data to a project. + +| Response status code | Description | +|----------------------|--------------------------------------------------| +| 201 Created | The research data was successfully posted. | +| 404 Not Found | The project with the specified ID was not found. | + +#### Upload raw data + +- `POST /data/raw/file`: This endpoint allows users to upload raw data + files to a project. + +| Response status code | Description | +|----------------------|----------------------------------------------| +| 201 Created | The raw data file was successfully uploaded. | + +#### Generate data for a data project + +- `POST /projects/{project_id}/data/`: This endpoint allows users to + post data to be generated for a data project, and provides a + location for the user to download the generated data after it has + been created. + - `project_id` (string, required): The unique identifier of the + data project that the data is being generated for. + +| Response status code | Description | +|----------------------|-----------------------------------------------------------------------| +| 202 Accepted | The request was accepted and the data generation process has started. | +| 404 Not Found | The project with the specified ID was not found. | + +#### Run QC function + +- `POST /data/raw/qc/{function_name}`: This endpoint allows users to + call a specific data cleaning function. The user must provide the + name of the function they wish to call as a URL parameter and any + additional parameters required by the function in the request body. + - `function_name` (string, required): The name of the data + cleaning function to call. + +| Response status code | Description | +|-----------------------|-------------------------------------------------------------------------| +| 201 process completed | The request was successful and the data cleaning function was executed. | + +#### Get list of data projects + +- `GET /projects/`: This endpoint allows users to retrieve a list of + data projects based on the specified filter parameters. + - `status` (string, optional): Filter the list of projects based + on their status. Possible values are "proposed", "ongoing", and + "completed". If not specified, all projects will be returned. + +| Response status code | Description | +|----------------------|-----------------------------| +| 200 OK | The request was successful. | + +#### Get metadata + +- `GET /metadata/`: This endpoint allows users to retrieve metadata + for all the data contained in the Data Resource. +- `GET /projects/{project_id}/metadata`: This endpoint allows users to + retrieve metadata, like project description and variables requested, + for the data project. + +| Response status code | Description | +|----------------------|-----------------------------| +| 200 OK | The request was successful. | + +#### Get list of registered users + +- `GET /users/`: Get list of users registered within the Data + Resource. +- `GET /projects/{project_id}/users/`: Get list of users assigned to a + specific project. + - `project_id` (string, required): The ID of the project. + +| Response status code | Description | +|----------------------|-----------------------------| +| 200 OK | The request was successful. | + +#### Assign permissions to users + +- `POST /users/{user_id}/permissions/`: This endpoint allows + (authorized) users to add, remove, or update user permissions for a + project. + - `user_id` (string, required): The ID of the user. + - Additional parameters in the request body, as a JSON object with + the following fields: + - `user_email` (string): The email address of the user whose + permissions will be updated. + - `action` (string): The action to be performed on the user's + permissions. Valid values are "add", "remove", or "update". + - `permission` (string): The permission to be granted or + revoked. Valid values are "read" or "write". If the action + is "update", the user must also provide the new value of the + permission field. + +| Response status code | Description | +|----------------------|------------------------------------------------------------------| +| 200 OK | The request was successful and the user permissions were updated | +| 404 Not Found | The user with the specified ID was not found. | + +#### Get the changelog + +- `GET /log/`: This endpoint allows users to retrieve the history log + of the Data Resource based on certain criteria. The user must + provide at least one of the following parameters in the query + string: + - `dataset_id` (string, optional): The ID of the dataset for which + to retrieve the history log. + - `date_from` (string, optional): The starting date for which to + retrieve the history log (formatted as `YYYY-MM-DD`). + - `date_to` (string, optional): The ending date for which to + retrieve the history log (formatted as `YYYY-MM-DD`). If + multiple parameters are provided, the endpoint will return all + history logs that match any of the provided criteria. + +| Response status code | Description | +|----------------------|---------------------------------------------------------------| +| 200 OK | The request was successful and the history log was retrieved. | +| 404 Not Found | No history logs were found for the specified criteria. | + #### Data entry | Building Block | Description | From 76e577bd27da52378bb9aba36c5b823efc81f6dd Mon Sep 17 00:00:00 2001 From: "Luke W. Johnston" Date: Mon, 31 Jul 2023 14:47:43 +0200 Subject: [PATCH 03/13] don't need --- .../images/raw/diagram-data-input.mmd | 44 ------------------- 1 file changed, 44 deletions(-) delete mode 100644 design/pathways/images/raw/diagram-data-input.mmd diff --git a/design/pathways/images/raw/diagram-data-input.mmd b/design/pathways/images/raw/diagram-data-input.mmd deleted file mode 100644 index 6d180824..00000000 --- a/design/pathways/images/raw/diagram-data-input.mmd +++ /dev/null @@ -1,44 +0,0 @@ -%% https://kroki.io/ look into more -flowchart TB - %% Data flow through UI - data_batch_sample_analysis_raw[] %% Save as file to be processed with specific analysis processing? - data_batch_questionnaires_raw[] - - data_stream_questionnaires[] - - --> ui_data_input_file[] - - %% Data flow directly through API - data_routine_redirect[] - - data --> api_post_data_[] - data --> api_post_data_[] - - %% Flow from API to backend - - ui_data_input_file --> api_post_data_file - - %% Orchestration for backend management - %% See: https://airflow.apache.org/docs/apache-airflow/stable/index.html - %% - Trigger to run processing when new raw data is stored - %% - - - - %% QC: Feedback to user - qc_results --> ui_qc_report - qc_temp_tbl --> - -flowchart TB - %% Batch input of sample analysis data in its raw form - data_batch_sample_analysis_raw --> ui_upload_sample_analysis_raw --> api_post_data_raw --> path_sample_analysis_raw --> api_process_sample_analysis_data --> api_add_data_to_database --> path_database - - api_post_data_raw --> api_append_changelog --> path_data_raw_changelog - - api_process_sample_analysis_data --> api_qc_results_sample_analysis --> path_qc_results_sample_analysis --> ui_qc_report_sample_analysis - - api_qc_results_sample_analysis --> api_append_qc_changelog --> path_qc_changelog - - metadata_batch_sample_analysis_raw --> api_post_metadata_raw --> path_metadata_raw - - api_process_sample_analysis_data --> api_extract_metadata_sample_analysis_processed --> path_metadata_processed - From eb5094a9b4879d30c86dd08423d6e8b37c198abe Mon Sep 17 00:00:00 2001 From: "Luke W. Johnston" Date: Mon, 31 Jul 2023 14:47:53 +0200 Subject: [PATCH 04/13] don't need --- design/architecture/frontend.md | 26 -------------------------- 1 file changed, 26 deletions(-) delete mode 100644 design/architecture/frontend.md diff --git a/design/architecture/frontend.md b/design/architecture/frontend.md deleted file mode 100644 index b995c4f6..00000000 --- a/design/architecture/frontend.md +++ /dev/null @@ -1,26 +0,0 @@ ---- -title: "Frontend interface layer" ---- - -This interface is what all users interact with and use, with essentially -three "permission" levels available: - -1. Full access: User 4. -2. Authorized access: - 1. User 1: A data upload portal that accepts either the routine, - known data or new, unknown data from User 2. - 2. User 2: A data request portal that allows the user to select - variables in the catalogue of data as well as write and submit - project proposals for access. -3. Public access: User 3 would have access to all public pages, which - includes the data dictionary, updates on findings, list of current - and past projects, and a log of any changes or additions to the - data. - -All content would be rendered directly as plain HTML text to ease use of -existing webpage translation services (e.g. Google Translate), so that -content written in another language, i.e., Danish, would still be -readable to non-native speakers. This would also lower the amount of -maintenance necessary for documentation. - -TODO: Diagram design of basic interface? From 40c99decaa01d8f6aba5985e550d5260f74f4216 Mon Sep 17 00:00:00 2001 From: "Luke W. Johnston" Date: Mon, 31 Jul 2023 14:48:04 +0200 Subject: [PATCH 05/13] moved and rearranged over into building block view --- design/architecture/backend.md | 103 ------------------ .../building-block-view.qmd | 18 +++ 2 files changed, 18 insertions(+), 103 deletions(-) delete mode 100644 design/architecture/backend.md diff --git a/design/architecture/backend.md b/design/architecture/backend.md deleted file mode 100644 index 77b74c0c..00000000 --- a/design/architecture/backend.md +++ /dev/null @@ -1,103 +0,0 @@ ---- -title: "The database and documentation layer" ---- - -Given the heterogeneity in the sources of data input, the backend will -need to be composed of multiple components: raw data files as plain -text, cleaning and processing programming scripts, a formal database -structure (e.g. SQL), a VC system to track changes to the raw data and -processing scripts, a data version numbering system, a changelog -describing the changes, and a data dictionary linked to the variables -contained in the database. Versioning of the raw data and scripts is -done for recordkeeping, auditing, and transparency, in addition to -allowing comparison of data used between past and current projects that -use the data. - -A major challenge to building the backend is in the heterogeneity of the -data input. The key is to establish and enforce a standardized Common -Data Model (CDM) for all incoming data at the point of entry. For the -framework, the exact contents of the database aren't important, since as -long as the contents follow the CDM it will be programmatically merged -into the final formal database. This is necessary as the database -contents depend heavily on the research topic and aims of the study that -will use this framework. - -The backend documentation is either largely generated automatically or -manually written. For instance, the list of projects and findings would -be generated by the submitted projects and input from User 2 -(researchers) while the changelog would be updated either by automated -additions or, optionally, manually from User 4. The data dictionary -would be stored as a JSON file with the documentation text itself as -Markdown text. This data dictionary would be publicly accessible and -could be updated by anyone (with approval from User 4), potentially -through "Merge Request" mechanisms. This mechanism involves -automatically linking any addition or correction back to the main -documentation and requesting it be merged into it. - -TODO: File tree structure design showing where data will be saved and -how it will look like on the computer. - -# Backend technology - -An open source database is the backbone of our project. It will support -the access, security features, and data storage. - -Below we have first detailed the specifications that we are looking for -in a backend database, and then a discussion of a selection of specific -systems that may meet our requirements. - -# Database specifications - -This document will change and grow as we develop Seedcase database. This -section lists some specifications we need and that we already know it -must be able to handle. - -## Must have functionality - -There are some standard features of a database which should be present -no matter which system we end up using. A database backend should be -able to handle the data stored, the users, security, and be able to -monitor and back up data. - -### Data handling - -There are some important aspects of data handling that we will need the -database to do. It will need to be able to accept data written into the -system, be able to store the data, and able to output the data when -needed. It also needs to ensure that data integrity is preserved and -that concurrency issues doesn't arise, in other words it needs to comply -with the ACID properties (Atomicity, Consistency, Isolation, and -Durability). - -We also need a system that can handle potentially large amounts of data, -not only on many individuals (row based), but also many variables for -each individual record (column based). The speed with which it is able -to retrieve data will also play a role in our choice of backend. - -### User handling - -We need a system that is capable of handling users. We will need a -system that can take the access policies we design and implement them, -likely through a system of roles and groups which the individual user -can be assigned to. We are currently working with a model where we -expect most instances of Seedcase to be used by a single user, but it -will need to be able to scale to allow for multiple users with very -different roles. - -### Security - -The security of Seedcase is discussed in [a different document](link). -We will need to be able to comply with different regulations (like -GDPR), it is therefore likely that it will be a combination of backend -and frontend security working together to keep data safe. - -### Monitoring - -The backend of Seedcase should contain measures that will ensure that if -data is changed or lost it is possible to restore it. There are a couple -of ways to achieve this, either monitoring/logging of changes to the -data, or by running backups of all or parts of the data on a regular -basis. These two approaches can of course work in tandem, where -important data tables and structures have changes logged in audit -tables, combined with frequent backups saved to a separate part of the -system. diff --git a/design/software-architecture/building-block-view.qmd b/design/software-architecture/building-block-view.qmd index c082277d..a45ff68a 100644 --- a/design/software-architecture/building-block-view.qmd +++ b/design/software-architecture/building-block-view.qmd @@ -295,6 +295,24 @@ Management Layer* part. ### Back End Environment (whitebox) +The input data will be quite heterogeneous and so the Seedcase backend +will need to be composed of multiple components: + +- Raw data files as plain text +- Cleaning and processing programming scripts +- A formal database structure (e.g. SQL), that is ACID (Atomic, + Consistent, Isolated, and Durable) compliant for data integrity and + that handles heterogeneous data types +- A Common Data Model that is standardized and structured for use by + the database +- A version control system to track changes to the raw data and + processing scripts, for recordkeeping, auditing, and transparency +- A data version numbering system, to help track which data the user + is working with and to allow for comparison between versions +- A changelog describing the changes +- A data dictionary linked to the variables contained in the database, + modified manually by the users but generated automatically + ![C4 Component diagram showing the Back End Environment Container.](/design/images/component-backend.png) From 0e9a64e28c94fa629bf8341e8bf52ad326b91fe7 Mon Sep 17 00:00:00 2001 From: "Luke W. Johnston" Date: Mon, 31 Jul 2023 14:48:50 +0200 Subject: [PATCH 06/13] renamed to examples --- design/{architecture/pseudocode.md => examples/api.qmd} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename design/{architecture/pseudocode.md => examples/api.qmd} (100%) diff --git a/design/architecture/pseudocode.md b/design/examples/api.qmd similarity index 100% rename from design/architecture/pseudocode.md rename to design/examples/api.qmd From 445190dd36fe71317a74178cd895ab35dbe2820c Mon Sep 17 00:00:00 2001 From: "Luke W. Johnston" Date: Mon, 31 Jul 2023 14:49:50 +0200 Subject: [PATCH 07/13] moved into images folder, but not sure we need these anymore --- design/{pathways => }/images/data-input-batch.mmd | 0 design/{pathways => }/images/data-input-sample-analysis.mmd | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename design/{pathways => }/images/data-input-batch.mmd (100%) rename design/{pathways => }/images/data-input-sample-analysis.mmd (100%) diff --git a/design/pathways/images/data-input-batch.mmd b/design/images/data-input-batch.mmd similarity index 100% rename from design/pathways/images/data-input-batch.mmd rename to design/images/data-input-batch.mmd diff --git a/design/pathways/images/data-input-sample-analysis.mmd b/design/images/data-input-sample-analysis.mmd similarity index 100% rename from design/pathways/images/data-input-sample-analysis.mmd rename to design/images/data-input-sample-analysis.mmd From 768708188adc0da7a23e8e91e60897c0ea2964c2 Mon Sep 17 00:00:00 2001 From: "Luke W. Johnston" Date: Mon, 31 Jul 2023 17:43:09 +0200 Subject: [PATCH 08/13] moved (and modified) this text over into runtime view --- design/pathways/knowledge-sharing.md | 27 ------------------- design/software-architecture/runtime-view.qmd | 15 +++++++++++ 2 files changed, 15 insertions(+), 27 deletions(-) delete mode 100644 design/pathways/knowledge-sharing.md diff --git a/design/pathways/knowledge-sharing.md b/design/pathways/knowledge-sharing.md deleted file mode 100644 index e24180d1..00000000 --- a/design/pathways/knowledge-sharing.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -title: "Knowledge sharing pathways" ---- - -The framework assumes that this user would interact with the portal -through at least three routes: - -1. Reading about the database, its history, organizational structure - and ownership, and any other details in a typical "About" section. - Since this is a part of a basic website layout, it is not fully part - of the framework. -2. Browsing completed and ongoing projects making use of the database, - along with the description of the project and variables used. This - would be based on a "database" of projects that have requested - access to the data, would be stored in JSON format, and through the - API specification would be generated into a table on the portal. -3. Getting updates on the latest results from the projects and browsing - some basic aggregate statistics of some key variables. This would - involve programmatically generating standard aggregate statistics of - the underlying database, formatting results according to the API, - transferring them to the web server, and have it generated into a - webpage format. Overlapping with the second route, key findings from - completed projects can be stored in the projects database and be - generated into a listing on the website. - -TODO: Directed graph diagram showing pathways actions take from browsing -project. diff --git a/design/software-architecture/runtime-view.qmd b/design/software-architecture/runtime-view.qmd index 8fa87558..781c063a 100644 --- a/design/software-architecture/runtime-view.qmd +++ b/design/software-architecture/runtime-view.qmd @@ -255,3 +255,18 @@ that data project as part of a new data project. The data projects listing on the Web Portal will include a button to either download the JSON metadata or to copy the project into a new data project application. + +## Browsing projects and results + +There are at least three routes we anticipate the external user might +take when browsing the Data Resource: + +1. Reading about the Data Resource, its history, organizational + structure and ownership, and any other details in a typical "About" + section. +2. Browsing the completed and ongoing projects that are using or have + used the Data Resource, including viewing some of the basic results + of the completed projects. +3. Viewing some basic aggregate statistics of some key variables in the + Data Resource. The aggregate statistics would be generated + programmatically. From 81414e7e34d6db9f2f8d692e33dac0f33e3c7370 Mon Sep 17 00:00:00 2001 From: "Luke W. Johnston" Date: Mon, 31 Jul 2023 17:47:11 +0200 Subject: [PATCH 09/13] moved relevant text into building block view --- design/pathways/data-input.qmd | 53 ------------------- .../building-block-view.qmd | 5 +- 2 files changed, 3 insertions(+), 55 deletions(-) delete mode 100644 design/pathways/data-input.qmd diff --git a/design/pathways/data-input.qmd b/design/pathways/data-input.qmd deleted file mode 100644 index 72315f75..00000000 --- a/design/pathways/data-input.qmd +++ /dev/null @@ -1,53 +0,0 @@ -# Data input pathways - - - -## Pre- and post-processing of data - -Any automated processing that is developed specific to a project would -need to adhere to the API's conventions. If any issues are found or if -the data is entirely new to the database, they get sent to a log and -User 4 would receive a notification to deal with the issue. - -## Data, metadata, and expected values input - -### Quality control - -Any new or updated data that is uploaded would trigger generic automated -data cleaning, processing, quality control checks of this new data. - -qc could be based on basic things like checking if file is correct, -contains proper delims, etc. - - - -### Versioning (internal vs external) - -- versioning, external via DOI with releases of summary stats and - changelog. (how often?) - -If there are no issues or the issues have been dealt with, an automated -script would take a snapshot of the data with the VC system, the version -number (based on [Semantic Versioning](https://semver.org/)) of the data -would be updated, an entry would get added to the changelog, and the -formal database would get updated. - -## From input to storage - -### Flow for "sample analysis" data - -```{mermaid} -%%| eval: true -%%| label: fig-data-input-sample-analysis -%%| fig-cap: "The flow of sample analysis-type data through Seedcase." -%%| file: images/data-input-sample-analysis.mmd -``` - -### Flow for batch data - -```{mermaid} -%%| eval: true -%%| label: fig-data-input-batch -%%| fig-cap: "The flow of batch collection data through Seedcase." -%%| file: images/data-input-batch.mmd -``` diff --git a/design/software-architecture/building-block-view.qmd b/design/software-architecture/building-block-view.qmd index a45ff68a..e9d124d1 100644 --- a/design/software-architecture/building-block-view.qmd +++ b/design/software-architecture/building-block-view.qmd @@ -307,8 +307,9 @@ will need to be composed of multiple components: the database - A version control system to track changes to the raw data and processing scripts, for recordkeeping, auditing, and transparency -- A data version numbering system, to help track which data the user - is working with and to allow for comparison between versions +- A data version numbering system, based on the + [CalVer](https://calver.org/) style, to help track which data the + user is working with and to allow for comparison between versions - A changelog describing the changes - A data dictionary linked to the variables contained in the database, modified manually by the users but generated automatically From 4ea106ac047331ead1037919b83f10f7f004ea4e Mon Sep 17 00:00:00 2001 From: "Luke W. Johnston" Date: Mon, 31 Jul 2023 17:47:23 +0200 Subject: [PATCH 10/13] this makes more sense in community --- {design => community}/decisions/why-github/index.md | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename {design => community}/decisions/why-github/index.md (100%) diff --git a/design/decisions/why-github/index.md b/community/decisions/why-github/index.md similarity index 100% rename from design/decisions/why-github/index.md rename to community/decisions/why-github/index.md From f0e52f268b2177c51dad8be527e4eff9e6453175 Mon Sep 17 00:00:00 2001 From: "Luke W. Johnston" Date: Mon, 31 Jul 2023 17:52:11 +0200 Subject: [PATCH 11/13] moved to software-architecture folder --- design/{ => software-architecture}/decisions/why-REST/index.qmd | 0 .../{ => software-architecture}/decisions/why-containers/index.md | 0 design/{ => software-architecture}/decisions/why-django/index.qmd | 0 design/{ => software-architecture}/decisions/why-docker/index.md | 0 design/{ => software-architecture}/decisions/why-license/index.md | 0 design/{ => software-architecture}/decisions/why-python/index.qmd | 0 design/{ => software-architecture}/decisions/why-sql/index.md | 0 7 files changed, 0 insertions(+), 0 deletions(-) rename design/{ => software-architecture}/decisions/why-REST/index.qmd (100%) rename design/{ => software-architecture}/decisions/why-containers/index.md (100%) rename design/{ => software-architecture}/decisions/why-django/index.qmd (100%) rename design/{ => software-architecture}/decisions/why-docker/index.md (100%) rename design/{ => software-architecture}/decisions/why-license/index.md (100%) rename design/{ => software-architecture}/decisions/why-python/index.qmd (100%) rename design/{ => software-architecture}/decisions/why-sql/index.md (100%) diff --git a/design/decisions/why-REST/index.qmd b/design/software-architecture/decisions/why-REST/index.qmd similarity index 100% rename from design/decisions/why-REST/index.qmd rename to design/software-architecture/decisions/why-REST/index.qmd diff --git a/design/decisions/why-containers/index.md b/design/software-architecture/decisions/why-containers/index.md similarity index 100% rename from design/decisions/why-containers/index.md rename to design/software-architecture/decisions/why-containers/index.md diff --git a/design/decisions/why-django/index.qmd b/design/software-architecture/decisions/why-django/index.qmd similarity index 100% rename from design/decisions/why-django/index.qmd rename to design/software-architecture/decisions/why-django/index.qmd diff --git a/design/decisions/why-docker/index.md b/design/software-architecture/decisions/why-docker/index.md similarity index 100% rename from design/decisions/why-docker/index.md rename to design/software-architecture/decisions/why-docker/index.md diff --git a/design/decisions/why-license/index.md b/design/software-architecture/decisions/why-license/index.md similarity index 100% rename from design/decisions/why-license/index.md rename to design/software-architecture/decisions/why-license/index.md diff --git a/design/decisions/why-python/index.qmd b/design/software-architecture/decisions/why-python/index.qmd similarity index 100% rename from design/decisions/why-python/index.qmd rename to design/software-architecture/decisions/why-python/index.qmd diff --git a/design/decisions/why-sql/index.md b/design/software-architecture/decisions/why-sql/index.md similarity index 100% rename from design/decisions/why-sql/index.md rename to design/software-architecture/decisions/why-sql/index.md From c03eaf0ca10b813135959688ff4ee1c62f4dfb38 Mon Sep 17 00:00:00 2001 From: "Luke W. Johnston" Date: Mon, 31 Jul 2023 17:52:38 +0200 Subject: [PATCH 12/13] create decision listing in relevant file and delete old one --- design/decisions.md | 13 ------------- .../architecture-decisions.qmd | 6 ++++++ 2 files changed, 6 insertions(+), 13 deletions(-) delete mode 100644 design/decisions.md diff --git a/design/decisions.md b/design/decisions.md deleted file mode 100644 index f5d571fc..00000000 --- a/design/decisions.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: "Reasons for decisions" -listing: - contents: decisions - type: table - table-hover: true - table-striped: true - categories: true ---- - -We try to document why we make the decisions we make on the tools, -overall design, and technologies that seedcase uses. - diff --git a/design/software-architecture/architecture-decisions.qmd b/design/software-architecture/architecture-decisions.qmd index a7fff2ae..e3692b34 100644 --- a/design/software-architecture/architecture-decisions.qmd +++ b/design/software-architecture/architecture-decisions.qmd @@ -1,6 +1,12 @@ --- title: "Architecture decisions" order: 9 +listing: + contents: decisions + type: table + table-hover: true + table-striped: true + categories: true --- {{< include /includes/_wip.qmd >}} From 8718764aac52b1579c922a9b5587b81d81416780 Mon Sep 17 00:00:00 2001 From: "Luke W. Johnston" Date: Mon, 31 Jul 2023 17:52:47 +0200 Subject: [PATCH 13/13] fix links --- _quarto.yml | 1 - community/blocks.qmd | 5 +++-- .../software-architecture/cross-cutting-concepts.qmd | 10 +++++----- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/_quarto.yml b/_quarto.yml index 5c6da95d..56b37bfe 100644 --- a/_quarto.yml +++ b/_quarto.yml @@ -53,7 +53,6 @@ website: contents: # Orders by using the `order:` key in the YAML header of the files - auto: design/software-architecture/ - - design/decisions.md - id: about title: "About" pinned: true diff --git a/community/blocks.qmd b/community/blocks.qmd index c6e1ac3d..f11f65f2 100644 --- a/community/blocks.qmd +++ b/community/blocks.qmd @@ -118,8 +118,9 @@ This support block contains explanations and justifications for why we choose the technologies, workflows, and designs that we do. This is not just for the Product itself, but also for the Documentation and Culture and Collaboration. Files and content related to this support block are -found in [Design Decisions](/design/decisions.md) (files in -`design/decisions/`) and [Contributing +found in the software architecture [Design +Decisions](/design/software-architecture/architecture-decisions.qmd) +(files in `design/software-architecture/decisions/`) and [Contributing Decisions](/community/CONTRIBUTING.md#decisions) (files in `community/decisions/`). diff --git a/design/software-architecture/cross-cutting-concepts.qmd b/design/software-architecture/cross-cutting-concepts.qmd index 3ec84622..fb7731fc 100644 --- a/design/software-architecture/cross-cutting-concepts.qmd +++ b/design/software-architecture/cross-cutting-concepts.qmd @@ -177,11 +177,11 @@ subsection](#permissions) below. ### Copyright and licensing Seedcase will have the MIT license (see the section on [Why choose a MIT -License](/design/decisions/why-license/) for more info). We will also be -using a Developer Certification of Origin (DCO) for anyone developing -and submitting code from outside the core project team. This will be -implemented using [GitHub App DCO](https://github.com/apps/dco). The -licence text itself is available +License](/design/software-architecture/decisions/why-license/) for more +info). We will also be using a Developer Certification of Origin (DCO) +for anyone developing and submitting code from outside the core project +team. This will be implemented using [GitHub App +DCO](https://github.com/apps/dco). The licence text itself is available [here](https://developercertificate.org). ### Compliance with data regulations