diff --git a/404.html b/404.html index 5f925b8..fc44c56 100644 --- a/404.html +++ b/404.html @@ -207,6 +207,10 @@ + + + + @@ -224,16 +228,24 @@ - - + + + + Overview - - + + + + + + + + @@ -243,6 +255,8 @@ + + diff --git a/examples/index.html b/examples/index.html index a41c0a3..a4663a9 100644 --- a/examples/index.html +++ b/examples/index.html @@ -216,6 +216,10 @@ + + + + @@ -233,16 +237,24 @@ - - + + + + Overview - - + + + + + + + + @@ -252,6 +264,8 @@ + + diff --git a/examples/short_notebooks/Landscape_View_Xenium/index.html b/examples/short_notebooks/Landscape_View_Xenium/index.html index a0dcb06..2e51f89 100644 --- a/examples/short_notebooks/Landscape_View_Xenium/index.html +++ b/examples/short_notebooks/Landscape_View_Xenium/index.html @@ -214,6 +214,10 @@ + + + + @@ -231,16 +235,24 @@ - - + + + + Overview - - + + + + + + + + @@ -250,6 +262,8 @@ + + diff --git a/gallery/gallery_xenium/index.html b/gallery/gallery_xenium/index.html index 21c166b..48307dc 100644 --- a/gallery/gallery_xenium/index.html +++ b/gallery/gallery_xenium/index.html @@ -212,6 +212,10 @@ + + + + @@ -229,16 +233,24 @@ - - + + + + Overview - - + + + + + + + + @@ -248,6 +260,8 @@ + + diff --git a/gallery/gallery_xenium_mouse_brain/index.html b/gallery/gallery_xenium_mouse_brain/index.html index 332ad1a..3ea4bfe 100644 --- a/gallery/gallery_xenium_mouse_brain/index.html +++ b/gallery/gallery_xenium_mouse_brain/index.html @@ -216,6 +216,10 @@ + + + + @@ -233,16 +237,24 @@ - - + + + + Overview - - + + + + + + + + @@ -252,6 +264,8 @@ + + diff --git a/gallery/gallery_xenium_multi/index.html b/gallery/gallery_xenium_multi/index.html index 76e533f..536e0c1 100644 --- a/gallery/gallery_xenium_multi/index.html +++ b/gallery/gallery_xenium_multi/index.html @@ -216,6 +216,10 @@ + + + + @@ -233,16 +237,24 @@ - - + + + + Overview - - + + + + + + + + @@ -252,6 +264,8 @@ + + diff --git a/gallery/gallery_xenium_skin_cancer/index.html b/gallery/gallery_xenium_skin_cancer/index.html index 45700ba..dae2cce 100644 --- a/gallery/gallery_xenium_skin_cancer/index.html +++ b/gallery/gallery_xenium_skin_cancer/index.html @@ -216,6 +216,10 @@ + + + + @@ -233,16 +237,24 @@ - - + + + + Overview - - + + + + + + + + @@ -252,6 +264,8 @@ + + diff --git a/gallery/index.html b/gallery/index.html index 017c050..400938e 100644 --- a/gallery/index.html +++ b/gallery/index.html @@ -216,6 +216,10 @@ + + + + @@ -233,16 +237,24 @@ - - + + + + Overview - - + + + + + + + + @@ -252,6 +264,8 @@ + + diff --git a/index.html b/index.html index e9b26ba..8015e20 100644 --- a/index.html +++ b/index.html @@ -10,7 +10,7 @@ - + @@ -263,6 +263,10 @@ + + + + @@ -280,16 +284,24 @@ - - + + + + Overview - - + + + + + + + + @@ -299,6 +311,8 @@ + + @@ -1016,7 +1030,7 @@ Welcome to Celldega's Documentation< Celldega Landscape visualization of a human skin cancer Xenium dataset obtained from 10X Genomics. -Celldega is a spatial analysis and visualization library that is being developed by the Spatial Technology Platform at the Broad Institute of MIT and Harvard. This project enables researchers to easily visualize large ST datasets (e.g., datasets with >100M transcripts) alongside single-cell and spatial analysis notebook workflows (e.g., integrating with sverse tools and utilizing novel spatial analysis approaches). +Celldega is a spatial analysis and visualization library that is being developed by the Spatial Technology Platform at the Broad Institute of MIT and Harvard. This project enables researchers to easily visualize large ST datasets (e.g., datasets with >100M transcripts) alongside single-cell and spatial analysis notebook workflows (e.g., sverse tools and novel spatial analysis approaches). Getting Started Installation diff --git a/index.md b/index.md index a215ca4..3d2a00d 100644 --- a/index.md +++ b/index.md @@ -8,7 +8,7 @@ Celldega Landscape visualization of a human skin cancer Xenium dataset obtained from 10X Genomics. -Celldega is a spatial analysis and visualization library that is being developed by the Spatial Technology Platform at the Broad Institute of MIT and Harvard. This project enables researchers to easily visualize large ST datasets (e.g., datasets with >100M transcripts) alongside single-cell and spatial analysis notebook workflows (e.g., integrating with sverse tools and utilizing novel spatial analysis approaches). +Celldega is a spatial analysis and visualization library that is being developed by the Spatial Technology Platform at the Broad Institute of MIT and Harvard. This project enables researchers to easily visualize large ST datasets (e.g., datasets with >100M transcripts) alongside single-cell and spatial analysis notebook workflows (e.g., sverse tools and novel spatial analysis approaches). diff --git a/javascript/api/index.html b/javascript/api/index.html index b1c274b..34aa408 100644 --- a/javascript/api/index.html +++ b/javascript/api/index.html @@ -216,6 +216,10 @@ + + + + @@ -233,16 +237,24 @@ - - + + + + Overview - - + + + + + + + + @@ -252,6 +264,8 @@ + + diff --git a/javascript/index.html b/javascript/index.html index a84ec4b..a416b32 100644 --- a/javascript/index.html +++ b/javascript/index.html @@ -216,6 +216,10 @@ + + + + @@ -233,16 +237,24 @@ - - + + + + Overview - - + + + + + + + + @@ -252,6 +264,8 @@ + + diff --git a/overview/file_formats/index.html b/overview/file_formats/index.html index dff6503..6f0675d 100644 --- a/overview/file_formats/index.html +++ b/overview/file_formats/index.html @@ -218,6 +218,10 @@ + + + + @@ -235,16 +239,24 @@ - - + + + + Overview - - + + + + + + + + @@ -254,6 +266,8 @@ + + diff --git a/overview/getting_started/index.html b/overview/getting_started/index.html index ffbc1c7..9becbf1 100644 --- a/overview/getting_started/index.html +++ b/overview/getting_started/index.html @@ -9,7 +9,7 @@ - + @@ -218,6 +218,10 @@ + + + + @@ -235,16 +239,24 @@ - - + + + + Overview - - + + + + + + + + @@ -254,6 +266,8 @@ + + diff --git a/overview/index.html b/overview/index.html index 1e17232..5748a5d 100644 --- a/overview/index.html +++ b/overview/index.html @@ -9,6 +9,10 @@ + + + + @@ -205,6 +209,8 @@ + + @@ -212,6 +218,10 @@ + + + + @@ -222,25 +232,33 @@ - + - + - - + + + + Overview - - + + + + + + + + - + Overview @@ -248,6 +266,8 @@ + + @@ -940,7 +960,7 @@ Overview -Celldega enables researchers to easily visualize large ST datasets (e.g., datasets with >100M transcripts) alongside single-cell and spatial analysis notebook workflows (e.g., integrating with sverse tools and utilizing novel spatial analysis approaches). +The Celldega library is being developed to help researchers easily visualize and analyze high-dimensional spatial-omics data in the context of a notebook workflow. Initial development has been focused on spatial transcriptomics visualization. Celldega can be used as a Python library in a Jupyter notebook environment or as a stand-alone JavaScript library for creating visualizations. Getting Started diff --git a/overview/index.md b/overview/index.md index 6621600..8c193e3 100644 --- a/overview/index.md +++ b/overview/index.md @@ -1,6 +1,6 @@ # Overview -Celldega enables researchers to easily visualize large ST datasets (e.g., datasets with >100M transcripts) alongside single-cell and spatial analysis notebook workflows (e.g., integrating with sverse tools and utilizing novel spatial analysis approaches). +The Celldega library is being developed to help researchers easily visualize and analyze high-dimensional spatial-omics data in the context of a notebook workflow. Initial development has been focused on spatial transcriptomics visualization. Celldega can be used as a Python library in a Jupyter notebook environment or as a stand-alone JavaScript library for creating visualizations. diff --git a/overview/installation/index.html b/overview/installation/index.html index 79bd23c..bf95495 100644 --- a/overview/installation/index.html +++ b/overview/installation/index.html @@ -218,6 +218,10 @@ + + + + @@ -235,16 +239,24 @@ - - + + + + Overview - - + + + + + + + + @@ -254,6 +266,8 @@ + + diff --git a/overview/usage/index.html b/overview/usage/index.html index 64fc0dd..b561463 100644 --- a/overview/usage/index.html +++ b/overview/usage/index.html @@ -218,6 +218,10 @@ + + + + @@ -235,16 +239,24 @@ - - + + + + Overview - - + + + + + + + + @@ -254,6 +266,8 @@ + + diff --git a/python/api/index.html b/python/api/index.html index f5f6789..c81902b 100644 --- a/python/api/index.html +++ b/python/api/index.html @@ -212,6 +212,10 @@ + + + + @@ -229,16 +233,24 @@ - - + + + + Overview - - + + + + + + + + @@ -248,6 +260,8 @@ + + diff --git a/python/index.html b/python/index.html index 9d7b97c..e135c9c 100644 --- a/python/index.html +++ b/python/index.html @@ -216,6 +216,10 @@ + + + + @@ -233,16 +237,24 @@ - - + + + + Overview - - + + + + + + + + @@ -252,6 +264,8 @@ + + @@ -954,21 +968,6 @@ - - - - - - - Submodules - - - - - - - - @@ -987,14 +986,10 @@ Python API Overview -Pre Module Overview +Pre Module Overview The pre module contains methods for pre-processing LandscapeFiles. -Viz Module Overview +Viz Module Overview The viz module contains functions and classes for data visualization. -Submodules - -widget: Widgets for visualizing spatial omics data. - diff --git a/python/index.md b/python/index.md index fe72908..f386f77 100644 --- a/python/index.md +++ b/python/index.md @@ -1,15 +1,10 @@ # Python API Overview -## Pre Module Overview +## [Pre Module Overview](pre/api) The `pre` module contains methods for pre-processing LandscapeFiles. -## Viz Module Overview +## [Viz Module Overview](viz/api) The `viz` module contains functions and classes for data visualization. - -### Submodules - -- `widget`: Widgets for visualizing spatial omics data. - diff --git a/python/pre/api/index.html b/python/pre/api/index.html index db9abcc..6e2ec99 100644 --- a/python/pre/api/index.html +++ b/python/pre/api/index.html @@ -216,6 +216,10 @@ + + + + @@ -233,16 +237,24 @@ - - + + + + Overview - - + + + + + + + + @@ -252,6 +264,8 @@ + + diff --git a/python/viz/api/index.html b/python/viz/api/index.html index 08b107a..613d9d4 100644 --- a/python/viz/api/index.html +++ b/python/viz/api/index.html @@ -216,6 +216,10 @@ + + + + @@ -233,16 +237,24 @@ - - + + + + Overview - - + + + + + + + + @@ -252,6 +264,8 @@ + + diff --git a/search/search_index.json b/search/search_index.json index e6e9f60..f1242fa 100644 --- a/search/search_index.json +++ b/search/search_index.json @@ -1 +1 @@ -{"config":{"lang":["en"],"separator":"[\\s\\-]+","pipeline":["stopWordFilter"]},"docs":[{"location":"","title":"Welcome to Celldega's Documentation","text":"Celldega Landscape visualization of a human skin cancer Xenium dataset obtained from 10X Genomics. Celldega is a spatial analysis and visualization library that is being developed by the Spatial Technology Platform at the Broad Institute of MIT and Harvard. This project enables researchers to easily visualize large ST datasets (e.g., datasets with >100M transcripts) alongside single-cell and spatial analysis notebook workflows (e.g., integrating with sverse tools and utilizing novel spatial analysis approaches). Getting Started Installation Usage "},{"location":"#about","title":"About","text":"Celldega is named after a Bodega, a small shop with all the essentials, that is part of the fabric of a neighborhood."},{"location":"examples/","title":"Jupyter Notebook Examples","text":"Landscape View Xenium"},{"location":"examples/short_notebooks/Landscape_View_Xenium/","title":"Landscape View Xenium","text":"In\u00a0[2]: Copied! # %load_ext autoreload\n# %autoreload 2\n# %env ANYWIDGET_HMR=1\n # %load_ext autoreload # %autoreload 2 # %env ANYWIDGET_HMR=1 In\u00a0[3]: Copied! import celldega as dega\ndega.__version__\n import celldega as dega dega.__version__ Out[3]: '0.0.0' In\u00a0[6]: Copied! from observable_jupyter import embed\n from observable_jupyter import embed In\u00a0[11]: Copied! base_url = 'https://raw.githubusercontent.com/broadinstitute/celldega_Xenium_Prime_Mouse_Brain_Coronal_FF_outs/main/Xenium_Prime_Mouse_Brain_Coronal_FF_outs'\n base_url = 'https://raw.githubusercontent.com/broadinstitute/celldega_Xenium_Prime_Mouse_Brain_Coronal_FF_outs/main/Xenium_Prime_Mouse_Brain_Coronal_FF_outs' In\u00a0[14]: Copied! embed('@cornhundred/celldega-landscape-ist', inputs={'base_url': base_url}, cells=['landscape_container'], display_logo=False)\n embed('@cornhundred/celldega-landscape-ist', inputs={'base_url': base_url}, cells=['landscape_container'], display_logo=False) In\u00a0[13]: Copied! # base_url = 'https://raw.githubusercontent.com/broadinstitute/celldega_Xenium_Prime_Human_Skin_FFPE_outs/main/Xenium_Prime_Human_Skin_FFPE_outs'\n\n# landscape_ist = dega.viz.Landscape(\n# technology='Xenium',\n# ini_zoom = -4.5,\n# ini_x=6000,\n# ini_y=8000,\n# base_url = base_url,\n\n# )\n\n# landscape_ist\n # base_url = 'https://raw.githubusercontent.com/broadinstitute/celldega_Xenium_Prime_Human_Skin_FFPE_outs/main/Xenium_Prime_Human_Skin_FFPE_outs' # landscape_ist = dega.viz.Landscape( # technology='Xenium', # ini_zoom = -4.5, # ini_x=6000, # ini_y=8000, # base_url = base_url, # ) # landscape_ist In\u00a0[\u00a0]: Copied! \n"},{"location":"examples/short_notebooks/Landscape_View_Xenium/#landscape-view-xenium","title":"Landscape View Xenium\u00b6","text":""},{"location":"gallery/","title":"Celldega Gallery","text":"This page includes links to visualizations that are made with the stand-alone Celldega JavaScript library."},{"location":"gallery/#imaging-spatial-transcriptomics","title":"Imaging Spatial Transcriptomics","text":""},{"location":"gallery/#xenium","title":"Xenium","text":" Xenium Mouse Brain Xenium Human Skin Cancer "},{"location":"gallery/#sequencing-spatial-transcriptomics","title":"Sequencing Spatial Transcriptomics","text":""},{"location":"gallery/#visium-hd","title":"Visium HD","text":""},{"location":"gallery/gallery_xenium/","title":"Celldega Xenium Gallery","text":""},{"location":"gallery/gallery_xenium/#xenium-prime-mouse-brain-coronal-ff","title":"Xenium Prime Mouse Brain Coronal FF","text":""},{"location":"gallery/gallery_xenium/#xenium-prime-human-skin-ffpe-outs","title":"Xenium Prime Human Skin FFPE outs","text":""},{"location":"gallery/gallery_xenium/#xenium-human-pancreas-ffpe","title":"Xenium Human Pancreas FFPE","text":""},{"location":"gallery/gallery_xenium/#bone-marrow","title":"Bone Marrow","text":""},{"location":"gallery/gallery_xenium_mouse_brain/","title":"Xenium Prime Mouse Brain Coronal FF","text":""},{"location":"gallery/gallery_xenium_multi/","title":"Xenium Multi Dataset","text":""},{"location":"gallery/gallery_xenium_multi/#xenium-prime-mouse-brain-coronal-ff","title":"Xenium Prime Mouse Brain Coronal FF","text":""},{"location":"gallery/gallery_xenium_multi/#xenium-prime-human-skin-ffpe-outs","title":"Xenium Prime Human Skin FFPE outs","text":""},{"location":"gallery/gallery_xenium_multi/#xenium-human-pancreas-ffpe","title":"Xenium Human Pancreas FFPE","text":""},{"location":"gallery/gallery_xenium_multi/#bone-marrow","title":"Bone Marrow","text":""},{"location":"gallery/gallery_xenium_skin_cancer/","title":"Xenium Prime Human Skin FFPE outs","text":""},{"location":"javascript/","title":"JavaScript API Overview","text":"Celldega's visualization methods can be used as a stand-alone JavaScript library outside the context of a Jupyter notebook. This can be used to create showcase visualizations with publicly hosted data."},{"location":"javascript/api/","title":"Celldega JavaScript API Documentation","text":"The JavaScript component of Celldega is used within the Jupyter Widgets framework to provide interactive visualization in the context of a Jupyter notebook but can also be used as a standalone JavaScript library."},{"location":"javascript/api/#landscape_ist-api-documentation","title":"landscape_ist API Documentation","text":"The landscape_ist function initializes and renders an interactive spatial transcriptomics (IST) landscape visualization. This API is designed to work with Deck.gl and includes customizable visualization options, dynamic data updates, and UI interactions."},{"location":"javascript/api/#parameters","title":"Parameters","text":" el (HTMLElement): The root DOM element where the visualization is rendered. ini_model (Object): The initial data model containing configuration and state. token (string): Authentication token for accessing data. ini_x, ini_y, ini_z (number): Initial spatial coordinates for the view. ini_zoom (number): Initial zoom level for the visualization. base_url (string): Base URL for accessing data files. dataset_name (string, optional): Name of the dataset being visualized. trx_radius (number, optional): Initial radius for transcript points. Default: 0.25. width (number|string, optional): Width of the visualization. Default: 100%. height (number, optional): Height of the visualization. Default: 800. view_change_custom_callback (Function, optional): Custom callback triggered on view changes. "},{"location":"javascript/api/#public-api","title":"Public API","text":"The landscape_ist function returns an object (landscape) with several methods for interacting with the visualization."},{"location":"javascript/api/#update_matrix_gene","title":"update_matrix_gene","text":"Updates the visualization to highlight data for a specific gene."},{"location":"javascript/api/#parameters_1","title":"Parameters","text":" inst_gene (string): The gene to highlight. "},{"location":"javascript/api/#behavior","title":"Behavior","text":" Updates the transcript layer to show data for the specified gene. Scrolls the bar graph to bring the selected gene into view. Toggles visibility of image layers and controls based on the selected gene. "},{"location":"javascript/api/#update_matrix_col","title":"update_matrix_col","text":"Updates the visualization to highlight data for a specific column (e.g., cluster)."},{"location":"javascript/api/#parameters_2","title":"Parameters","text":" inst_col (string): The column to highlight. "},{"location":"javascript/api/#behavior_1","title":"Behavior","text":" Highlights the bar graph corresponding to the selected column. Updates cell and path layers to reflect the selected column. Toggles visibility of layers based on the column selection. "},{"location":"javascript/api/#update_matrix_dendro_col","title":"update_matrix_dendro_col","text":"Updates the visualization based on a dendrogram selection of columns."},{"location":"javascript/api/#parameters_3","title":"Parameters","text":" selected_cols (Array<string>): The list of selected column names. "},{"location":"javascript/api/#behavior_2","title":"Behavior","text":" Highlights the selected columns in the bar graph. Updates layers to reflect the selection. "},{"location":"javascript/api/#update_view_state","title":"update_view_state","text":"Updates the view state of the Deck.gl visualization."},{"location":"javascript/api/#parameters_4","title":"Parameters","text":" new_view_state (Object): The new view state configuration. close_up (boolean): Whether the view should zoom in closely. trx_layer (Object): The transcript layer to update. "},{"location":"javascript/api/#behavior_3","title":"Behavior","text":" Adjusts the viewport and reconfigures layers based on the new view state. "},{"location":"javascript/api/#update_layers","title":"update_layers","text":"Updates all visualization layers."},{"location":"javascript/api/#behavior_4","title":"Behavior","text":" Refreshes the Deck.gl layers with the current visualization state. "},{"location":"javascript/api/#finalize","title":"finalize","text":"Finalizes the Deck.gl instance and cleans up resources."},{"location":"javascript/api/#behavior_5","title":"Behavior","text":" Disposes of all Deck.gl resources and event listeners to prevent memory leaks. "},{"location":"javascript/api/#usage-example","title":"Usage Example","text":"\njavascript\nimport { landscape_ist } from 'path/to/landscape_ist';\n\nconst rootElement = document.getElementById('visualization-container');\nconst model = { /* Model containing visualization data */ };\n\nconst visualization = await landscape_ist(\n rootElement,\n model,\n 'example-token',\n 100,\n 200,\n 0,\n -5,\n 'https://example.com/data',\n 'Example Dataset'\n);\n\n// Update the visualization with a specific gene.\nvisualization.update_matrix_gene('TP53');\n\n// Update the visualization with a specific column.\nvisualization.update_matrix_col('Cluster 1');\n\n// Finalize the visualization when done.\nvisualization.finalize();\n\n"},{"location":"javascript/api/#matrix_viz-api-documentation","title":"matrix_viz API Documentation","text":"The matrix_viz function initializes and renders a matrix visualization. This API is built using approaches and code adaptations from the Clustergrammer-GL library, and it integrates tightly with Deck.gl to provide interactive and dynamic visualizations."},{"location":"javascript/api/#parameters_5","title":"Parameters","text":" model (Object): The model object containing configuration data for the visualization. el (HTMLElement): The root DOM element where the visualization is rendered. network (Object): The network object containing the matrix data to visualize. width (string|number, optional): The width of the visualization. Default: '800'. height (string|number, optional): The height of the visualization. Default: '800'. row_label_callback (Function, optional): A callback function triggered on row label interactions. col_label_callback (Function, optional): A callback function triggered on column label interactions. col_dendro_callback (Function, optional): A callback function triggered on dendrogram column interactions. "},{"location":"javascript/api/#internal-behavior","title":"Internal Behavior","text":"The function performs the following setup: 1. Deck.gl Integration: - Initializes a Deck.gl instance for the matrix visualization. - Sets properties for interactivity, including tooltips, view state changes, and layer filtering. Matrix Data Setup: Parses and structures the matrix data from the network object. Configures labels, categories, and dendrograms for both rows and columns. Layer Initialization: Creates layers for: Matrix cells. Row and column labels. Row and column categories. Row and column dendrograms. Attaches interactions (e.g., click events) to these layers. UI Setup: Creates a container for the visualization and appends it to the root DOM element. "},{"location":"javascript/api/#example-usage","title":"Example Usage","text":"import { matrix_viz } from 'path/to/matrix_viz';\n\nconst rootElement = document.getElementById('matrix-container');\nconst model = { /* Model containing visualization data */ };\nconst network = { /* Network object representing the matrix data */ };\n\n// Callback functions\nconst rowLabelCallback = (row) => {\n console.log('Row label clicked:', row);\n};\n\nconst colLabelCallback = (col) => {\n console.log('Column label clicked:', col);\n};\n\nconst colDendroCallback = (dendro) => {\n console.log('Column dendrogram clicked:', dendro);\n};\n\n// Initialize the matrix visualization\nawait matrix_viz(\n model,\n rootElement,\n network,\n 800,\n 800,\n rowLabelCallback,\n colLabelCallback,\n colDendroCallback\n);\n"},{"location":"overview/","title":"Overview","text":"Celldega enables researchers to easily visualize large ST datasets (e.g., datasets with >100M transcripts) alongside single-cell and spatial analysis notebook workflows (e.g., integrating with sverse tools and utilizing novel spatial analysis approaches). Celldega can be used as a Python library in a Jupyter notebook environment or as a stand-alone JavaScript library for creating visualizations. Getting Started Installation Usage "},{"location":"overview/file_formats/","title":"File Formats","text":""},{"location":"overview/file_formats/#landscapefiles","title":"LandscapeFiles","text":""},{"location":"overview/getting_started/","title":"Getting Started","text":"Celldega is a spatial analysis and visualization library that is being developed by the Spatial Technology Platform at the Broad Institute of MIT and Harvard. Celldega can be used as a Jupyter Widget in Python as well as a stand-alone JavaScript library. Please see examples notebooks below to try out Celldega in a Jupyter notebook or ObservableHQ JavaScript notebook: Celldega_Xenium_Landscape_Visualizations_Colab.ipynb Celldega Landscape Xenium ObservableHQ "},{"location":"overview/installation/","title":"Installation","text":""},{"location":"overview/installation/#python","title":"Python","text":"The Celldega library can be installed using pip # install Celldega (without vips for visualization pre-processing)\npip install celldega\n\n# install Celldega with optional pre-processing requirements\npip install celldega[pre]\n\n"},{"location":"overview/installation/#javascript","title":"JavaScript","text":"Celldega can be used in a JavaScript environment such as ObservableHQ by importing it as a module celldega = import('https://unpkg.com/celldega@' + version + '/src/celldega/static/widget.js?module')\n"},{"location":"overview/usage/","title":"Celldega Usage","text":""},{"location":"overview/usage/#terrabio","title":"Terra.bio","text":"** Coming soon **"},{"location":"python/","title":"Python API Overview","text":""},{"location":"python/#pre-module-overview","title":"Pre Module Overview","text":"The pre module contains methods for pre-processing LandscapeFiles."},{"location":"python/#viz-module-overview","title":"Viz Module Overview","text":"The viz module contains functions and classes for data visualization."},{"location":"python/#submodules","title":"Submodules","text":" widget: Widgets for visualizing spatial omics data. "},{"location":"python/api/","title":"Python API Reference","text":"Module for pre-processing to generate LandscapeFiles from ST data. Module for visualization"},{"location":"python/api/#celldega.pre.convert_long_id_to_short","title":"convert_long_id_to_short(df)","text":"Converts a column of long integer cell IDs in a DataFrame to a shorter, hash-based representation. Parameters: Name Type Description Default df DataFrame The DataFrame containing the EntityID. required Returns: pd.DataFrame: The original DataFrame with an additional column named cell_id containing the shortened cell IDs. The function applies a SHA-256 hash to each cell ID, encodes the hash using base64, and truncates it to create a shorter identifier that is added as a new column to the DataFrame. Source code in src/celldega/pre/__init__.py def convert_long_id_to_short(df):\n \"\"\"\n Converts a column of long integer cell IDs in a DataFrame to a shorter, hash-based representation.\n\n Args:\n df (pd.DataFrame): The DataFrame containing the EntityID.\n Returns:\n pd.DataFrame: The original DataFrame with an additional column named `cell_id`\n containing the shortened cell IDs.\n\n The function applies a SHA-256 hash to each cell ID, encodes the hash using base64, and truncates\n it to create a shorter identifier that is added as a new column to the DataFrame.\n \"\"\"\n # Function to hash and encode the cell ID\n def hash_and_shorten_id(cell_id):\n # Create a hash of the cell ID\n cell_id_bytes = str(cell_id).encode('utf-8')\n hash_object = hashlib.sha256(cell_id_bytes)\n hash_digest = hash_object.digest()\n\n # Encode the hash to a base64 string to mix letters and numbers, truncate to 9 characters\n short_id = base64.urlsafe_b64encode(hash_digest).decode('utf-8')[:9]\n return short_id\n\n # Apply the hash_and_shorten_id function to each cell ID in the specified column\n df['cell_id'] = df['EntityID'].apply(hash_and_shorten_id)\n\n return df\n"},{"location":"python/api/#celldega.pre.convert_to_jpeg","title":"convert_to_jpeg(image_path, quality=80)","text":"Convert a TIFF image to a JPEG image with a quality of score"},{"location":"python/api/#celldega.pre.convert_to_jpeg--parameters","title":"Parameters","text":"image_path : str Path to the image file quality : int (default=80) Quality score for the JPEG image"},{"location":"python/api/#celldega.pre.convert_to_jpeg--returns","title":"Returns","text":"new_image_path : str Path to the JPEG image file Source code in src/celldega/pre/__init__.py def convert_to_jpeg(image_path, quality=80):\n \"\"\"\n Convert a TIFF image to a JPEG image with a quality of score\n\n Parameters\n ----------\n image_path : str\n Path to the image file\n quality : int (default=80)\n Quality score for the JPEG image\n\n Returns\n -------\n new_image_path : str\n Path to the JPEG image file\n\n \"\"\"\n\n # Load the TIFF image\n image = pyvips.Image.new_from_file(image_path, access=\"sequential\")\n\n # Save the image as a JPEG with a quality of 80\n new_image_path = image_path.replace(\".tif\", \".jpeg\")\n image.jpegsave(new_image_path, Q=quality)\n\n return new_image_path\n"},{"location":"python/api/#celldega.pre.convert_to_png","title":"convert_to_png(image_path)","text":"Convert a TIFF image to a JPEG image with a quality of score"},{"location":"python/api/#celldega.pre.convert_to_png--parameters","title":"Parameters","text":"image_path : str Path to the image file quality : int (default=80) Quality score for the JPEG image"},{"location":"python/api/#celldega.pre.convert_to_png--returns","title":"Returns","text":"new_image_path : str Path to the JPEG image file Source code in src/celldega/pre/__init__.py def convert_to_png(image_path):\n \"\"\"\n Convert a TIFF image to a JPEG image with a quality of score\n\n Parameters\n ----------\n image_path : str\n Path to the image file\n quality : int (default=80)\n Quality score for the JPEG image\n\n Returns\n -------\n new_image_path : str\n Path to the JPEG image file\n\n \"\"\"\n\n # Load the TIFF image\n image = pyvips.Image.new_from_file(image_path, access=\"sequential\")\n\n # Save the image as a JPEG with a quality of 80\n new_image_path = image_path.replace(\".tif\", \".png\")\n image.pngsave(new_image_path)\n\n return new_image_path\n"},{"location":"python/api/#celldega.pre.convert_to_webp","title":"convert_to_webp(image_path, quality=100)","text":"Convert a TIFF image to a WEBP image with a specified quality score."},{"location":"python/api/#celldega.pre.convert_to_webp--parameters","title":"Parameters","text":"image_path : str Path to the image file quality : int (default=100) Quality score for the WEBP image (higher is better quality)"},{"location":"python/api/#celldega.pre.convert_to_webp--returns","title":"Returns","text":"new_image_path : str Path to the WEBP image file Source code in src/celldega/pre/__init__.py def convert_to_webp(image_path, quality=100):\n \"\"\"\n Convert a TIFF image to a WEBP image with a specified quality score.\n\n Parameters\n ----------\n image_path : str\n Path to the image file\n quality : int (default=100)\n Quality score for the WEBP image (higher is better quality)\n\n Returns\n -------\n new_image_path : str\n Path to the WEBP image file\n \"\"\"\n # Load the TIFF image\n image = pyvips.Image.new_from_file(image_path, access=\"sequential\")\n\n # Save the image as a WEBP with specified quality\n new_image_path = image_path.replace(\".tif\", \".webp\")\n image.webpsave(new_image_path, Q=quality)\n\n return new_image_path\n"},{"location":"python/api/#celldega.pre.get_max_zoom_level","title":"get_max_zoom_level(path_image_pyramid)","text":"Returns the maximum zoom level based on the highest-numbered directory in the specified path_image_pyramid. Parameters: Name Type Description Default path_image_pyramid str The path to the directory containing zoom level directories. required Returns: Name Type Description max_pyramid_zoom int The maximum zoom level. Source code in src/celldega/pre/__init__.py def get_max_zoom_level(path_image_pyramid):\n \"\"\"\n Returns the maximum zoom level based on the highest-numbered directory\n in the specified path_image_pyramid.\n\n Parameters:\n path_image_pyramid (str): The path to the directory containing zoom level directories.\n\n Returns:\n max_pyramid_zoom (int): The maximum zoom level.\n \"\"\"\n # List all entries in the path_image_pyramid that are directories and can be converted to integers\n zoom_levels = [\n entry\n for entry in os.listdir(path_image_pyramid)\n if os.path.isdir(os.path.join(path_image_pyramid, entry)) and entry.isdigit()\n ]\n\n # Convert to integer and find the maximum value\n max_pyramid_zoom = max(map(int, zoom_levels)) if zoom_levels else None\n\n return max_pyramid_zoom\n"},{"location":"python/api/#celldega.pre.make_cell_boundary_tiles","title":"make_cell_boundary_tiles(technology, path_cell_boundaries, path_meta_cell_micron, path_transformation_matrix, path_output, coarse_tile_factor=20, tile_size=250, tile_bounds=None, image_scale=1, max_workers=8)","text":"Processes cell boundary data and divides it into spatial tiles based on the provided technology. Reads cell boundary data, applies affine transformations, and divides the data into coarse and fine tiles. The resulting tiles are saved as Parquet files, each containing the geometries of cells in that tile."},{"location":"python/api/#celldega.pre.make_cell_boundary_tiles--parameters","title":"Parameters","text":"technology : str The technology used to generate the cell boundary data, e.g., \"MERSCOPE\", \"Xenium\", or \"custom\". path_cell_boundaries : str Path to the file containing the cell boundaries (Parquet format). path_meta_cell_micron : str Path to the file containing cell metadata (CSV format). path_transformation_matrix : str Path to the file containing the transformation matrix (CSV format). path_output : str Directory path where the output files (Parquet files) for each tile will be saved. coarse_tile_factor : int, optional, default=20. scaling factor of each coarse-grain tile comparing to the fine tile size. tile_size : int, optional, default=500 Size of each fine-grain tile in microns. tile_bounds : dict, optional Dictionary containing the minimum and maximum bounds for x and y coordinates. image_scale : float, optional, default=1 Scale factor to apply to the geometry data. max_workers : int, optional, default=8 Maximum number of parallel workers for processing tiles."},{"location":"python/api/#celldega.pre.make_cell_boundary_tiles--returns","title":"Returns","text":"None Source code in src/celldega/pre/boundary_tile.py def make_cell_boundary_tiles(\n technology,\n path_cell_boundaries,\n path_meta_cell_micron,\n path_transformation_matrix,\n path_output,\n coarse_tile_factor=20,\n tile_size=250,\n tile_bounds=None,\n image_scale=1,\n max_workers=8\n):\n\n\n \"\"\"\n Processes cell boundary data and divides it into spatial tiles based on the provided technology.\n Reads cell boundary data, applies affine transformations, and divides the data into coarse and fine tiles.\n The resulting tiles are saved as Parquet files, each containing the geometries of cells in that tile.\n\n Parameters\n ----------\n technology : str\n The technology used to generate the cell boundary data, e.g., \"MERSCOPE\", \"Xenium\", or \"custom\".\n path_cell_boundaries : str\n Path to the file containing the cell boundaries (Parquet format).\n path_meta_cell_micron : str\n Path to the file containing cell metadata (CSV format).\n path_transformation_matrix : str\n Path to the file containing the transformation matrix (CSV format).\n path_output : str\n Directory path where the output files (Parquet files) for each tile will be saved.\n coarse_tile_factor : int, optional, default=20.\n scaling factor of each coarse-grain tile comparing to the fine tile size.\n tile_size : int, optional, default=500\n Size of each fine-grain tile in microns.\n tile_bounds : dict, optional\n Dictionary containing the minimum and maximum bounds for x and y coordinates.\n image_scale : float, optional, default=1\n Scale factor to apply to the geometry data.\n max_workers : int, optional, default=8\n Maximum number of parallel workers for processing tiles.\n\n Returns\n -------\n None\n \"\"\"\n\n def numpy_affine_transform(coords, matrix):\n \"\"\"Apply affine transformation to numpy coordinates.\"\"\"\n # Homogeneous coordinates for affine transformation\n coords = np.hstack([coords, np.ones((coords.shape[0], 1))])\n transformed_coords = coords @ matrix.T\n return transformed_coords[:, :2] # Drop the homogeneous coordinate\n\n def batch_transform_geometries(geometries, transformation_matrix, scale):\n \"\"\"\n Batch transform geometries using numpy for optimized performance.\n \"\"\"\n # Extract affine transformation parameters into a 3x3 matrix for numpy\n affine_matrix = np.array([\n [transformation_matrix[0, 0], transformation_matrix[0, 1], transformation_matrix[0, 2]],\n [transformation_matrix[1, 0], transformation_matrix[1, 1], transformation_matrix[1, 2]],\n [0, 0, 1]\n ])\n\n transformed_geometries = []\n\n for polygon in geometries:\n # Extract coordinates and transform them\n if isinstance(polygon, MultiPolygon):\n polygon = next(polygon.geoms) # Use the first geometry\n\n # Transform the exterior of the polygon\n exterior_coords = np.array(polygon.exterior.coords)\n\n # Apply the affine transformation and scale\n transformed_coords = numpy_affine_transform(exterior_coords, affine_matrix) / scale\n\n # Append the result to the transformed_geometries list\n transformed_geometries.append([transformed_coords.tolist()])\n\n return transformed_geometries\n\n\n def filter_and_save_fine_boundary(coarse_tile, fine_i, fine_j, fine_tile_x_min, fine_tile_x_max, fine_tile_y_min, fine_tile_y_max, path_output):\n cell_ids = coarse_tile.index.values\n\n tile_filter = (\n (coarse_tile[\"center_x\"] >= fine_tile_x_min) & (coarse_tile[\"center_x\"] < fine_tile_x_max) &\n (coarse_tile[\"center_y\"] >= fine_tile_y_min) & (coarse_tile[\"center_y\"] < fine_tile_y_max)\n )\n filtered_indices = np.where(tile_filter)[0]\n\n keep_cells = cell_ids[filtered_indices]\n fine_tile_cells = coarse_tile.loc[keep_cells, [\"GEOMETRY\"]]\n fine_tile_cells = fine_tile_cells.assign(name=fine_tile_cells.index)\n\n if not fine_tile_cells.empty:\n filename = f\"{path_output}/cell_tile_{fine_i}_{fine_j}.parquet\"\n fine_tile_cells.to_parquet(filename)\n\n def process_fine_boundaries(coarse_tile, i, j, coarse_tile_x_min, coarse_tile_x_max, coarse_tile_y_min, coarse_tile_y_max, tile_size, path_output, x_min, y_min, n_fine_tiles_x, n_fine_tiles_y):\n with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:\n futures = []\n for fine_i in range(n_fine_tiles_x):\n fine_tile_x_min = x_min + fine_i * tile_size\n fine_tile_x_max = fine_tile_x_min + tile_size\n\n if not (fine_tile_x_min >= coarse_tile_x_min and fine_tile_x_max <= coarse_tile_x_max):\n continue\n\n for fine_j in range(n_fine_tiles_y):\n fine_tile_y_min = y_min + fine_j * tile_size\n fine_tile_y_max = fine_tile_y_min + tile_size\n\n if not (fine_tile_y_min >= coarse_tile_y_min and fine_tile_y_max <= coarse_tile_y_max):\n continue\n\n futures.append(executor.submit(\n filter_and_save_fine_boundary, coarse_tile, fine_i, fine_j, fine_tile_x_min, fine_tile_x_max, fine_tile_y_min, fine_tile_y_max, path_output\n ))\n\n for future in futures:\n future.result()\n\n tile_size_x = tile_size\n tile_size_y = tile_size\n\n transformation_matrix = pd.read_csv(path_transformation_matrix, header=None, sep=\" \").values\n\n # Load cell boundary data based on the technology\n if technology == \"MERSCOPE\":\n df_meta = pd.read_parquet(f\"{path_output.replace('cell_segmentation','cell_metadata.parquet')}\")\n entity_to_cell_id_dict = pd.Series(df_meta.index.values, index=df_meta.EntityID).to_dict()\n cells_orig = gpd.read_parquet(path_cell_boundaries)\n cells_orig['cell_id'] = cells_orig['EntityID'].map(entity_to_cell_id_dict)\n cells_orig = cells_orig[cells_orig[\"ZIndex\"] == 1]\n\n # Correct cell_id issues with meta_cell\n meta_cell = pd.read_csv(path_meta_cell_micron)\n meta_cell['cell_id'] = meta_cell['EntityID'].map(entity_to_cell_id_dict)\n cells_orig.index = meta_cell[meta_cell[\"cell_id\"].isin(cells_orig['cell_id'])].index\n\n # Correct 'MultiPolygon' to 'Polygon'\n cells_orig[\"geometry\"] = cells_orig[\"Geometry\"].apply(\n lambda x: list(x.geoms)[0] if isinstance(x, MultiPolygon) else x\n )\n\n cells_orig.set_index('cell_id', inplace=True)\n\n elif technology == \"Xenium\":\n xenium_cells = pd.read_parquet(path_cell_boundaries)\n grouped = xenium_cells.groupby(\"cell_id\")[[\"vertex_x\", \"vertex_y\"]].agg(lambda x: x.tolist())\n grouped[\"geometry\"] = grouped.apply(lambda row: Polygon(zip(row[\"vertex_x\"], row[\"vertex_y\"])), axis=1)\n cells_orig = gpd.GeoDataFrame(grouped, geometry=\"geometry\")[[\"geometry\"]]\n\n elif technology == \"custom\":\n cells_orig = gpd.read_parquet(path_cell_boundaries)\n\n # Transform geometries\n cells_orig[\"GEOMETRY\"] = batch_transform_geometries(cells_orig[\"geometry\"], transformation_matrix, image_scale)\n\n # Convert transformed geometries to polygons and calculate centroids\n cells_orig[\"polygon\"] = cells_orig[\"GEOMETRY\"].apply(lambda x: Polygon(x[0]))\n gdf_cells = gpd.GeoDataFrame(geometry=cells_orig[\"polygon\"])\n gdf_cells[\"center_x\"] = gdf_cells.geometry.centroid.x\n gdf_cells[\"center_y\"] = gdf_cells.geometry.centroid.y\n gdf_cells[\"GEOMETRY\"] = cells_orig[\"GEOMETRY\"]\n\n # Ensure the output directory exists\n if not os.path.exists(path_output):\n os.makedirs(path_output)\n\n # Calculate tile bounds and fine/coarse tiles\n x_min, x_max = tile_bounds[\"x_min\"], tile_bounds[\"x_max\"]\n y_min, y_max = tile_bounds[\"y_min\"], tile_bounds[\"y_max\"]\n n_fine_tiles_x = int(np.ceil((x_max - x_min) / tile_size))\n n_fine_tiles_y = int(np.ceil((y_max - y_min) / tile_size))\n n_coarse_tiles_x = int(np.ceil((x_max - x_min) / (coarse_tile_factor * tile_size)))\n n_coarse_tiles_y = int(np.ceil((y_max - y_min) / (coarse_tile_factor * tile_size)))\n\n # Process coarse tiles in parallel\n for i in tqdm(range(n_coarse_tiles_x), desc=\"Processing coarse tiles\"):\n coarse_tile_x_min = x_min + i * (coarse_tile_factor * tile_size)\n coarse_tile_x_max = coarse_tile_x_min + (coarse_tile_factor * tile_size)\n\n for j in range(n_coarse_tiles_y):\n coarse_tile_y_min = y_min + j * (coarse_tile_factor * tile_size)\n coarse_tile_y_max = coarse_tile_y_min + (coarse_tile_factor * tile_size)\n\n coarse_tile = gdf_cells[\n (gdf_cells[\"center_x\"] >= coarse_tile_x_min) & (gdf_cells[\"center_x\"] < coarse_tile_x_max) &\n (gdf_cells[\"center_y\"] >= coarse_tile_y_min) & (gdf_cells[\"center_y\"] < coarse_tile_y_max)\n ]\n if not coarse_tile.empty:\n process_fine_boundaries(coarse_tile, i, j, coarse_tile_x_min, coarse_tile_x_max, coarse_tile_y_min, coarse_tile_y_max, tile_size, path_output, x_min, y_min, n_fine_tiles_x, n_fine_tiles_y)\n"},{"location":"python/api/#celldega.pre.make_deepzoom_pyramid","title":"make_deepzoom_pyramid(image_path, output_path, pyramid_name, tile_size=512, overlap=0, suffix='.jpeg')","text":"Create a DeepZoom image pyramid from a JPEG image"},{"location":"python/api/#celldega.pre.make_deepzoom_pyramid--parameters","title":"Parameters","text":"image_path : str Path to the JPEG image file tile_size : int (default=512) Tile size for the DeepZoom pyramid overlap : int (default=0) Overlap size for the DeepZoom pyramid suffix : str (default='jpeg') Suffix for the DeepZoom pyramid tiles"},{"location":"python/api/#celldega.pre.make_deepzoom_pyramid--returns","title":"Returns","text":"None Source code in src/celldega/pre/__init__.py def make_deepzoom_pyramid(\n image_path, output_path, pyramid_name, tile_size=512, overlap=0, suffix=\".jpeg\"\n):\n \"\"\"\n Create a DeepZoom image pyramid from a JPEG image\n\n Parameters\n ----------\n image_path : str\n Path to the JPEG image file\n tile_size : int (default=512)\n Tile size for the DeepZoom pyramid\n overlap : int (default=0)\n Overlap size for the DeepZoom pyramid\n suffix : str (default='jpeg')\n Suffix for the DeepZoom pyramid tiles\n\n Returns\n -------\n None\n\n \"\"\"\n\n # Define the output path\n output_path = Path(output_path)\n\n # Load the JPEG image\n image = pyvips.Image.new_from_file(image_path, access=\"sequential\")\n\n # check if the output path exists and create it if it does not\n output_path.mkdir(parents=True, exist_ok=True)\n\n # append the pyramid name to the output path\n output_path = output_path / pyramid_name\n\n # Save the image as a DeepZoom image pyramid\n image.dzsave(output_path, tile_size=tile_size, overlap=overlap, suffix=suffix)\n"},{"location":"python/api/#celldega.pre.make_meta_cell_image_coord","title":"make_meta_cell_image_coord(technology, path_transformation_matrix, path_meta_cell_micron, path_meta_cell_image, image_scale)","text":"Apply an affine transformation to the cell coordinates in microns and save the transformed coordinates in pixels"},{"location":"python/api/#celldega.pre.make_meta_cell_image_coord--parameters","title":"Parameters","text":"technology : str The technology used to generate the data, Xenium and MERSCOPE are supported. path_transformation_matrix : str Path to the transformation matrix file path_meta_cell_micron : str Path to the meta cell file with coordinates in microns path_meta_cell_image : str Path to save the meta cell file with coordinates in pixels"},{"location":"python/api/#celldega.pre.make_meta_cell_image_coord--returns","title":"Returns","text":"None"},{"location":"python/api/#celldega.pre.make_meta_cell_image_coord--examples","title":"Examples","text":"make_meta_cell_image_coord( ... technology='Xenium', ... path_transformation_matrix='data/transformation_matrix.txt', ... path_meta_cell_micron='data/meta_cell_micron.csv', ... path_meta_cell_image='data/meta_cell_image.parquet' ... ) Source code in src/celldega/pre/__init__.py def make_meta_cell_image_coord(\n technology,\n path_transformation_matrix,\n path_meta_cell_micron,\n path_meta_cell_image,\n image_scale\n):\n \"\"\"\n Apply an affine transformation to the cell coordinates in microns and save\n the transformed coordinates in pixels\n\n Parameters\n ----------\n technology : str\n The technology used to generate the data, Xenium and MERSCOPE are supported.\n path_transformation_matrix : str\n Path to the transformation matrix file\n path_meta_cell_micron : str\n Path to the meta cell file with coordinates in microns\n path_meta_cell_image : str\n Path to save the meta cell file with coordinates in pixels\n\n Returns\n -------\n None\n\n Examples\n --------\n >>> make_meta_cell_image_coord(\n ... technology='Xenium',\n ... path_transformation_matrix='data/transformation_matrix.txt',\n ... path_meta_cell_micron='data/meta_cell_micron.csv',\n ... path_meta_cell_image='data/meta_cell_image.parquet'\n ... )\n\n \"\"\"\n\n transformation_matrix = pd.read_csv(\n path_transformation_matrix, header=None, sep=\" \"\n ).values\n\n if technology == \"MERSCOPE\":\n meta_cell = pd.read_csv(path_meta_cell_micron, usecols=[\"EntityID\", \"center_x\", \"center_y\"])\n meta_cell = convert_long_id_to_short(meta_cell)\n meta_cell[\"name\"] = meta_cell[\"cell_id\"]\n meta_cell = meta_cell.set_index('cell_id')\n elif technology == \"Xenium\":\n usecols = [\"cell_id\", \"x_centroid\", \"y_centroid\"]\n meta_cell = pd.read_csv(path_meta_cell_micron, index_col=0, usecols=usecols)\n meta_cell.columns = [\"center_x\", \"center_y\"]\n meta_cell[\"name\"] = pd.Series(meta_cell.index, index=meta_cell.index)\n\n # Adding a ones column to accommodate for affine transformation\n meta_cell[\"ones\"] = 1\n\n # Preparing the data for matrix multiplication\n points = meta_cell[[\"center_x\", \"center_y\", \"ones\"]].values\n\n # Applying the transformation matrix\n transformed_points = np.dot(transformation_matrix, points.T).T\n\n # Updating the DataFrame with transformed coordinates\n meta_cell[\"center_x\"] = transformed_points[:, 0]\n meta_cell[\"center_y\"] = transformed_points[:, 1]\n\n # Dropping the ones column as it's no longer needed\n meta_cell.drop(columns=[\"ones\"], inplace=True)\n\n meta_cell[\"center_x\"] = meta_cell[\"center_x\"] / image_scale\n meta_cell[\"center_y\"] = meta_cell[\"center_y\"] / image_scale\n\n meta_cell[\"geometry\"] = meta_cell.apply(\n lambda row: [row[\"center_x\"], row[\"center_y\"]], axis=1\n )\n\n if technology == \"MERSCOPE\":\n meta_cell = meta_cell[[\"name\", \"geometry\", \"EntityID\"]]\n else:\n meta_cell = meta_cell[[\"name\", \"geometry\"]]\n\n\n meta_cell.to_parquet(path_meta_cell_image)\n"},{"location":"python/api/#celldega.pre.make_meta_gene","title":"make_meta_gene(technology, path_cbg, path_output)","text":"Create a DataFrame with genes and their assigned colors"},{"location":"python/api/#celldega.pre.make_meta_gene--parameters","title":"Parameters","text":"technology : str The technology used to generate the data, Xenium and MERSCOPE are supported. path_cbg : str Path to the cell-by-gene matrix data (the data format can vary based on technology) path_output : str Path to save the meta gene file"},{"location":"python/api/#celldega.pre.make_meta_gene--returns","title":"Returns","text":"None"},{"location":"python/api/#celldega.pre.make_meta_gene--examples","title":"Examples","text":"make_meta_gene( ... technology='Xenium', ... path_cbg='data/', ... path_output='data/meta_gene.parquet' ... ) Source code in src/celldega/pre/__init__.py def make_meta_gene(technology, path_cbg, path_output):\n \"\"\"\n Create a DataFrame with genes and their assigned colors\n\n Parameters\n ----------\n technology : str\n The technology used to generate the data, Xenium and MERSCOPE are supported.\n path_cbg : str\n Path to the cell-by-gene matrix data (the data format can vary based on technology)\n path_output : str\n Path to save the meta gene file\n\n Returns\n -------\n None\n\n Examples\n --------\n >>> make_meta_gene(\n ... technology='Xenium',\n ... path_cbg='data/',\n ... path_output='data/meta_gene.parquet'\n ... )\n \"\"\"\n\n if technology == \"MERSCOPE\":\n cbg = pd.read_csv(path_cbg, index_col=0)\n genes = cbg.columns.tolist()\n elif technology == \"Xenium\":\n # genes = pd.read_csv(path_cbg + 'features.tsv.gz', sep='\\t', header=None)[1].values.tolist()\n cbg = read_cbg_mtx(path_cbg)\n genes = cbg.columns.tolist()\n\n # Get all categorical color palettes from Matplotlib and flatten them into a single list of colors\n palettes = [plt.get_cmap(name).colors for name in plt.colormaps() if \"tab\" in name]\n flat_colors = [color for palette in palettes for color in palette]\n\n # Convert RGB tuples to hex codes\n flat_colors_hex = [to_hex(color) for color in flat_colors]\n\n # Use modular arithmetic to assign a color to each gene, white for genes with \"Blank\"\n colors = [\n flat_colors_hex[i % len(flat_colors_hex)] if \"Blank\" not in gene else \"#FFFFFF\"\n for i, gene in enumerate(genes)\n ]\n\n # Create a DataFrame with genes and their assigned colors\n ser_color = pd.Series(colors, index=genes)\n\n # calculate gene expression metadata\n meta_gene = calc_meta_gene_data(cbg)\n meta_gene['color'] = ser_color\n\n # Identify sparse columns\n sparse_cols = [col for col in meta_gene.columns if pd.api.types.is_sparse(meta_gene[col])]\n\n # Convert sparse columns to dense\n for col in sparse_cols:\n meta_gene[col] = meta_gene[col].sparse.to_dense()\n\n meta_gene.to_parquet(path_output)\n"},{"location":"python/api/#celldega.pre.make_trx_tiles","title":"make_trx_tiles(technology, path_trx, path_transformation_matrix, path_trx_tiles, coarse_tile_factor=10, tile_size=250, chunk_size=1000000, verbose=False, image_scale=1, max_workers=8)","text":"Processes transcript data by dividing it into coarse-grain and fine-grain tiles, applying transformations, and saving the results in a parallelized manner."},{"location":"python/api/#celldega.pre.make_trx_tiles--parameters","title":"Parameters","text":"technology : str The technology used for generating the transcript data (e.g., \"MERSCOPE\" or \"Xenium\"). path_trx : str Path to the file containing the transcript data. path_transformation_matrix : str Path to the file containing the transformation matrix (CSV file). path_trx_tiles : str Directory path where the output files (Parquet files) for each tile will be saved. coarse_tile_factor : int, optional Scaling factor of each coarse-grain tile comparing to the fine tile size. tile_size : int, optional Size of each fine-grain tile in microns (default is 250). chunk_size : int, optional Number of rows to process per chunk for memory efficiency (default is 1000000). verbose : bool, optional Flag to enable verbose output (default is False). image_scale : float, optional Scale factor to apply to the transcript coordinates (default is 0.5). max_workers : int, optional Maximum number of parallel workers for processing tiles (default is 8)."},{"location":"python/api/#celldega.pre.make_trx_tiles--returns","title":"Returns","text":"dict A dictionary containing the bounds of the processed data in both x and y directions. Source code in src/celldega/pre/trx_tile.py def make_trx_tiles(\n technology,\n path_trx,\n path_transformation_matrix,\n path_trx_tiles,\n coarse_tile_factor=10,\n tile_size=250,\n chunk_size=1000000,\n verbose=False,\n image_scale=1,\n max_workers=8\n):\n \"\"\"\n Processes transcript data by dividing it into coarse-grain and fine-grain tiles,\n applying transformations, and saving the results in a parallelized manner.\n\n Parameters\n ----------\n technology : str\n The technology used for generating the transcript data (e.g., \"MERSCOPE\" or \"Xenium\").\n path_trx : str\n Path to the file containing the transcript data.\n path_transformation_matrix : str\n Path to the file containing the transformation matrix (CSV file).\n path_trx_tiles : str\n Directory path where the output files (Parquet files) for each tile will be saved.\n coarse_tile_factor : int, optional\n Scaling factor of each coarse-grain tile comparing to the fine tile size.\n tile_size : int, optional\n Size of each fine-grain tile in microns (default is 250).\n chunk_size : int, optional\n Number of rows to process per chunk for memory efficiency (default is 1000000).\n verbose : bool, optional\n Flag to enable verbose output (default is False).\n image_scale : float, optional\n Scale factor to apply to the transcript coordinates (default is 0.5).\n max_workers : int, optional\n Maximum number of parallel workers for processing tiles (default is 8).\n\n Returns\n -------\n dict\n A dictionary containing the bounds of the processed data in both x and y directions.\n \"\"\"\n\n def process_coarse_tile(trx, i, j, coarse_tile_x_min, coarse_tile_x_max, coarse_tile_y_min, coarse_tile_y_max, tile_size, path_trx_tiles, x_min, y_min, n_fine_tiles_x, n_fine_tiles_y, max_workers):\n # Filter the entire dataset for the current coarse tile\n coarse_tile = trx.filter(\n (pl.col(\"transformed_x\") >= coarse_tile_x_min) & (pl.col(\"transformed_x\") < coarse_tile_x_max) &\n (pl.col(\"transformed_y\") >= coarse_tile_y_min) & (pl.col(\"transformed_y\") < coarse_tile_y_max)\n )\n\n if not coarse_tile.is_empty():\n # Now process fine tiles using global fine tile indices\n process_fine_tiles(coarse_tile, i, j, coarse_tile_x_min, coarse_tile_x_max, coarse_tile_y_min, coarse_tile_y_max, tile_size, path_trx_tiles, x_min, y_min, n_fine_tiles_x, n_fine_tiles_y, max_workers) \n\n\n def process_fine_tiles(coarse_tile, coarse_i, coarse_j, coarse_tile_x_min, coarse_tile_x_max, coarse_tile_y_min, coarse_tile_y_max, tile_size, path_trx_tiles, x_min, y_min, n_fine_tiles_x, n_fine_tiles_y, max_workers=8):\n\n # Use ThreadPoolExecutor for parallel processing of fine-grain tiles within the coarse tile\n with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:\n futures = []\n\n # Iterate over fine-grain tiles within the global bounds\n for fine_i in range(n_fine_tiles_x):\n fine_tile_x_min = x_min + fine_i * tile_size\n fine_tile_x_max = fine_tile_x_min + tile_size\n\n # Process only if the fine tile falls within the current coarse tile's bounds\n if not (fine_tile_x_min >= coarse_tile_x_min and fine_tile_x_max <= coarse_tile_x_max):\n continue\n\n for fine_j in range(n_fine_tiles_y):\n fine_tile_y_min = y_min + fine_j * tile_size\n fine_tile_y_max = fine_tile_y_min + tile_size\n\n # Process only if the fine tile falls within the current coarse tile's bounds\n if not (fine_tile_y_min >= coarse_tile_y_min and fine_tile_y_max <= coarse_tile_y_max):\n continue\n\n # Submit the task for each fine tile to process in parallel\n futures.append(executor.submit(\n filter_and_save_fine_tile, coarse_tile, coarse_i, coarse_j, fine_i, fine_j, \n fine_tile_x_min, fine_tile_x_max, fine_tile_y_min, fine_tile_y_max, path_trx_tiles\n ))\n\n # Wait for all futures to complete\n for future in concurrent.futures.as_completed(futures):\n future.result() # Raise exceptions if any occurred during execution\n\n\n def filter_and_save_fine_tile(coarse_tile, coarse_i, coarse_j, fine_i, fine_j, fine_tile_x_min, fine_tile_x_max, fine_tile_y_min, fine_tile_y_max, path_trx_tiles):\n\n # Filter the coarse tile for the current fine tile's boundaries\n fine_tile_trx = coarse_tile.filter(\n (pl.col(\"transformed_x\") >= fine_tile_x_min) & (pl.col(\"transformed_x\") < fine_tile_x_max) &\n (pl.col(\"transformed_y\") >= fine_tile_y_min) & (pl.col(\"transformed_y\") < fine_tile_y_max)\n )\n\n if not fine_tile_trx.is_empty():\n # Add geometry column as a list of [x, y] pairs\n fine_tile_trx = fine_tile_trx.with_columns(\n pl.concat_list([pl.col(\"transformed_x\"), pl.col(\"transformed_y\")]).alias(\"geometry\")\n ).drop(['transformed_x', 'transformed_y'])\n\n # Define the filename based on fine tile coordinates\n filename = f\"{path_trx_tiles}/transcripts_tile_{fine_i}_{fine_j}.parquet\"\n\n # Save the filtered DataFrame to a Parquet file\n fine_tile_trx.to_pandas().to_parquet(filename)\n\n\n # Load transformation matrix\n transformation_matrix = np.loadtxt(path_transformation_matrix)\n\n # Load the transcript data based on the technology using Polars\n if technology == \"MERSCOPE\":\n trx_ini = pl.read_csv(path_trx, columns=[\"gene\", \"global_x\", \"global_y\"])\n trx_ini = trx_ini.with_columns([\n pl.col(\"global_x\").alias(\"x\"),\n pl.col(\"global_y\").alias(\"y\"),\n pl.col(\"gene\").alias(\"name\")\n ]).select([\"name\", \"x\", \"y\"])\n\n elif technology == \"Xenium\":\n trx_ini = pl.read_parquet(path_trx).select([\n pl.col(\"feature_name\").alias(\"name\"),\n pl.col(\"x_location\").alias(\"x\"),\n pl.col(\"y_location\").alias(\"y\")\n ])\n\n # Process the data in chunks and apply transformations\n all_chunks = []\n\n for start_row in tqdm(range(0, trx_ini.height, chunk_size), desc=\"Processing chunks\"):\n chunk = trx_ini.slice(start_row, chunk_size)\n\n # Apply transformation matrix to the coordinates\n points = np.hstack([chunk.select([\"x\", \"y\"]).to_numpy(), np.ones((chunk.height, 1))])\n transformed_points = np.dot(points, transformation_matrix.T)[:, :2]\n\n # Create new transformed columns and drop original x, y columns\n transformed_chunk = chunk.with_columns([\n (pl.Series(transformed_points[:, 0]) * image_scale).round(2).alias(\"transformed_x\"),\n (pl.Series(transformed_points[:, 1]) * image_scale).round(2).alias(\"transformed_y\")\n ]).drop([\"x\", \"y\"])\n all_chunks.append(transformed_chunk)\n\n # Concatenate all chunks after processing\n trx = pl.concat(all_chunks)\n\n # Ensure the output directory exists\n if not os.path.exists(path_trx_tiles):\n os.makedirs(path_trx_tiles)\n\n # Get min and max x, y values\n x_min, x_max = trx.select([\n pl.col(\"transformed_x\").min().alias(\"x_min\"),\n pl.col(\"transformed_x\").max().alias(\"x_max\")\n ]).row(0)\n\n y_min, y_max = trx.select([\n pl.col(\"transformed_y\").min().alias(\"y_min\"),\n pl.col(\"transformed_y\").max().alias(\"y_max\")\n ]).row(0)\n\n # Calculate the number of fine-grain tiles globally\n n_fine_tiles_x = int(np.ceil((x_max - x_min) / tile_size))\n n_fine_tiles_y = int(np.ceil((y_max - y_min) / tile_size))\n\n # Calculate the number of coarse-grain tiles\n n_coarse_tiles_x = int(np.ceil((x_max - x_min) / (coarse_tile_factor * tile_size)))\n n_coarse_tiles_y = int(np.ceil((y_max - y_min) / (coarse_tile_factor * tile_size)))\n\n # Use ThreadPoolExecutor for parallel processing of coarse-grain tiles\n with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:\n futures = []\n for i in range(n_coarse_tiles_x):\n coarse_tile_x_min = x_min + i * (coarse_tile_factor * tile_size)\n coarse_tile_x_max = coarse_tile_x_min + (coarse_tile_factor * tile_size)\n\n for j in range(n_coarse_tiles_y):\n coarse_tile_y_min = y_min + j * (coarse_tile_factor * tile_size)\n coarse_tile_y_max = coarse_tile_y_min + (coarse_tile_factor * tile_size)\n\n # Submit each coarse tile for parallel processing\n futures.append(executor.submit(\n process_coarse_tile, trx, i, j, coarse_tile_x_min, coarse_tile_x_max, coarse_tile_y_min, coarse_tile_y_max, tile_size, path_trx_tiles, x_min, y_min, n_fine_tiles_x, n_fine_tiles_y, max_workers\n ))\n\n # Wait for all coarse tiles to complete\n for future in tqdm(concurrent.futures.as_completed(futures), desc=\"Processing coarse tiles\", unit=\"tile\"):\n future.result() # Raise exceptions if any occurred during execution\n\n # Return the tile bounds\n tile_bounds = {\n \"x_min\": x_min,\n \"x_max\": x_max,\n \"y_min\": y_min,\n \"y_max\": y_max,\n }\n\n return tile_bounds\n"},{"location":"python/api/#celldega.pre.reduce_image_size","title":"reduce_image_size(image_path, scale_image=0.5, path_landscape_files='')","text":""},{"location":"python/api/#celldega.pre.reduce_image_size--parameters","title":"Parameters","text":"image_path : str Path to the image file scale_image : float (default=0.5) Scale factor for the image resize"},{"location":"python/api/#celldega.pre.reduce_image_size--returns","title":"Returns","text":"new_image_path : str Path to the resized image file Source code in src/celldega/pre/__init__.py def reduce_image_size(image_path, scale_image=0.5, path_landscape_files=\"\"):\n \"\"\"\n\n Parameters\n ----------\n image_path : str\n Path to the image file\n scale_image : float (default=0.5)\n Scale factor for the image resize\n\n Returns\n -------\n new_image_path : str\n Path to the resized image file\n \"\"\"\n\n image = pyvips.Image.new_from_file(image_path, access=\"sequential\")\n\n resized_image = image.resize(scale_image)\n\n new_image_name = image_path.split(\"/\")[-1].replace(\".tif\", \"_downsize.tif\")\n new_image_path = f\"{path_landscape_files}/{new_image_name}\"\n resized_image.write_to_file(new_image_path)\n\n return new_image_path\n"},{"location":"python/api/#celldega.pre.save_landscape_parameters","title":"save_landscape_parameters(technology, path_landscape_files, image_name='dapi_files', tile_size=1000, image_info={}, image_format='.webp')","text":"Save the landscape parameters to a JSON file. Source code in src/celldega/pre/__init__.py def save_landscape_parameters(\n technology, path_landscape_files, image_name=\"dapi_files\", tile_size=1000, image_info={}, image_format='.webp'\n):\n \"\"\"\n Save the landscape parameters to a JSON file.\n \"\"\"\n\n path_image_pyramid = f\"{path_landscape_files}/pyramid_images/{image_name}\"\n\n print(path_image_pyramid)\n\n max_pyramid_zoom = get_max_zoom_level(path_image_pyramid)\n\n landscape_parameters = {\n \"technology\": technology,\n \"max_pyramid_zoom\": max_pyramid_zoom,\n \"tile_size\": tile_size,\n \"image_info\": image_info,\n \"image_format\": image_format\n }\n\n path_landscape_parameters = f\"{path_landscape_files}/landscape_parameters.json\"\n\n with open(path_landscape_parameters, \"w\") as file:\n json.dump(landscape_parameters, file, indent=4)\n"},{"location":"python/api/#celldega.viz.Landscape","title":"Landscape","text":" Bases: AnyWidget A widget for interactive visualization of spatial omics data. This widget currently supports iST (Xenium and MERSCOPE) and sST (Visium HD data) Parameters: Name Type Description Default ini_x float The initial x-coordinate of the view. required ini_y float The initial y-coordinate of the view. required ini_zoom float The initial zoom level of the view. required token str The token traitlet. required base_url str The base URL for the widget. required dataset_name str The name of the dataset to visualize. This will show up in the user interface bar. required Attributes: Name Type Description component str The name of the component. technology str The technology used. base_url str The base URL for the widget. token str The token traitlet. ini_x float The initial x-coordinate of the view. ini_y float The initial y-coordinate of the view. ini_z float The initial z-coordinate of the view. ini_zoom float The initial zoom level of the view. dataset_name str The name of the dataset to visualize. update_trigger dict The dictionary to trigger updates. cell_clusters dict The dictionary containing cell cluster information. Returns: Name Type Description Landscape A widget for visualizing a 'landscape' view of spatial omics data. Source code in src/celldega/viz/widget.py class Landscape(anywidget.AnyWidget):\n \"\"\"\n A widget for interactive visualization of spatial omics data. This widget\n currently supports iST (Xenium and MERSCOPE) and sST (Visium HD data)\n\n Args:\n ini_x (float): The initial x-coordinate of the view.\n ini_y (float): The initial y-coordinate of the view.\n ini_zoom (float): The initial zoom level of the view.\n token (str): The token traitlet.\n base_url (str): The base URL for the widget.\n dataset_name (str, optional): The name of the dataset to visualize. This will show up in the user interface bar.\n\n Attributes:\n component (str): The name of the component.\n technology (str): The technology used.\n base_url (str): The base URL for the widget.\n token (str): The token traitlet.\n ini_x (float): The initial x-coordinate of the view.\n ini_y (float): The initial y-coordinate of the view.\n ini_z (float): The initial z-coordinate of the view.\n ini_zoom (float): The initial zoom level of the view.\n dataset_name (str): The name of the dataset to visualize.\n update_trigger (dict): The dictionary to trigger updates.\n cell_clusters (dict): The dictionary containing cell cluster information.\n\n Returns:\n Landscape: A widget for visualizing a 'landscape' view of spatial omics data.\n \"\"\"\n _esm = pathlib.Path(__file__).parent / \"../static\" / \"widget.js\"\n _css = pathlib.Path(__file__).parent / \"../static\" / \"widget.css\"\n component = traitlets.Unicode(\"Landscape\").tag(sync=True)\n\n technology = traitlets.Unicode(\"sst\").tag(sync=True)\n base_url = traitlets.Unicode(\"\").tag(sync=True)\n token = traitlets.Unicode(\"\").tag(sync=True)\n ini_x = traitlets.Float(1000).tag(sync=True)\n ini_y = traitlets.Float(1000).tag(sync=True)\n ini_z = traitlets.Float(0).tag(sync=True)\n ini_zoom = traitlets.Float(0).tag(sync=True)\n square_tile_size = traitlets.Float(1.4).tag(sync=True)\n dataset_name = traitlets.Unicode(\"\").tag(sync=True)\n region = traitlets.Dict({}).tag(sync=True)\n\n update_trigger = traitlets.Dict().tag(sync=True)\n cell_clusters = traitlets.Dict().tag(sync=True)\n\n width = traitlets.Int(0).tag(sync=True)\n height = traitlets.Int(800).tag(sync=True)\n\n def trigger_update(self, new_value):\n # This method updates the update_trigger traitlet with a new value\n # You can pass any information necessary for the update, or just a timestamp\n self.update_trigger = new_value\n\n def update_cell_clusters(self, new_clusters):\n # Convert the new_clusters to a JSON serializable format if necessary\n self.cell_clusters = new_clusters\n"},{"location":"python/api/#celldega.viz.Matrix","title":"Matrix","text":" Bases: AnyWidget A widget for interactive visualization of a hierarchically clustered matrix. Parameters: Name Type Description Default value int The value traitlet. required component str The component traitlet. required network dict The network traitlet. required click_info dict The click_info traitlet. required Attributes: Name Type Description component str The name of the component. network dict The network dictionary. click_info dict The click_info dictionary. Returns: Name Type Description Matrix A widget for visualizing a hierarchically clustered matrix. Source code in src/celldega/viz/widget.py class Matrix(anywidget.AnyWidget):\n \"\"\"\n A widget for interactive visualization of a hierarchically clustered matrix.\n\n Args:\n value (int): The value traitlet.\n component (str): The component traitlet.\n network (dict): The network traitlet.\n click_info (dict): The click_info traitlet.\n\n Attributes:\n component (str): The name of the component.\n network (dict): The network dictionary.\n click_info (dict): The click_info dictionary.\n\n Returns:\n Matrix: A widget for visualizing a hierarchically clustered matrix.\n \"\"\"\n _esm = pathlib.Path(__file__).parent / \"../static\" / \"widget.js\"\n _css = pathlib.Path(__file__).parent / \"../static\" / \"widget.css\"\n value = traitlets.Int(0).tag(sync=True)\n component = traitlets.Unicode(\"Matrix\").tag(sync=True)\n\n network = traitlets.Dict({}).tag(sync=True)\n click_info = traitlets.Dict({}).tag(sync=True)\n"},{"location":"python/pre/api/","title":"Pre Module API Reference","text":"Module for pre-processing to generate LandscapeFiles from ST data."},{"location":"python/pre/api/#celldega.pre.convert_long_id_to_short","title":"convert_long_id_to_short(df)","text":"Converts a column of long integer cell IDs in a DataFrame to a shorter, hash-based representation. Parameters: Name Type Description Default df DataFrame The DataFrame containing the EntityID. required Returns: pd.DataFrame: The original DataFrame with an additional column named cell_id containing the shortened cell IDs. The function applies a SHA-256 hash to each cell ID, encodes the hash using base64, and truncates it to create a shorter identifier that is added as a new column to the DataFrame. Source code in src/celldega/pre/__init__.py def convert_long_id_to_short(df):\n \"\"\"\n Converts a column of long integer cell IDs in a DataFrame to a shorter, hash-based representation.\n\n Args:\n df (pd.DataFrame): The DataFrame containing the EntityID.\n Returns:\n pd.DataFrame: The original DataFrame with an additional column named `cell_id`\n containing the shortened cell IDs.\n\n The function applies a SHA-256 hash to each cell ID, encodes the hash using base64, and truncates\n it to create a shorter identifier that is added as a new column to the DataFrame.\n \"\"\"\n # Function to hash and encode the cell ID\n def hash_and_shorten_id(cell_id):\n # Create a hash of the cell ID\n cell_id_bytes = str(cell_id).encode('utf-8')\n hash_object = hashlib.sha256(cell_id_bytes)\n hash_digest = hash_object.digest()\n\n # Encode the hash to a base64 string to mix letters and numbers, truncate to 9 characters\n short_id = base64.urlsafe_b64encode(hash_digest).decode('utf-8')[:9]\n return short_id\n\n # Apply the hash_and_shorten_id function to each cell ID in the specified column\n df['cell_id'] = df['EntityID'].apply(hash_and_shorten_id)\n\n return df\n"},{"location":"python/pre/api/#celldega.pre.convert_to_jpeg","title":"convert_to_jpeg(image_path, quality=80)","text":"Convert a TIFF image to a JPEG image with a quality of score"},{"location":"python/pre/api/#celldega.pre.convert_to_jpeg--parameters","title":"Parameters","text":"image_path : str Path to the image file quality : int (default=80) Quality score for the JPEG image"},{"location":"python/pre/api/#celldega.pre.convert_to_jpeg--returns","title":"Returns","text":"new_image_path : str Path to the JPEG image file Source code in src/celldega/pre/__init__.py def convert_to_jpeg(image_path, quality=80):\n \"\"\"\n Convert a TIFF image to a JPEG image with a quality of score\n\n Parameters\n ----------\n image_path : str\n Path to the image file\n quality : int (default=80)\n Quality score for the JPEG image\n\n Returns\n -------\n new_image_path : str\n Path to the JPEG image file\n\n \"\"\"\n\n # Load the TIFF image\n image = pyvips.Image.new_from_file(image_path, access=\"sequential\")\n\n # Save the image as a JPEG with a quality of 80\n new_image_path = image_path.replace(\".tif\", \".jpeg\")\n image.jpegsave(new_image_path, Q=quality)\n\n return new_image_path\n"},{"location":"python/pre/api/#celldega.pre.convert_to_png","title":"convert_to_png(image_path)","text":"Convert a TIFF image to a JPEG image with a quality of score"},{"location":"python/pre/api/#celldega.pre.convert_to_png--parameters","title":"Parameters","text":"image_path : str Path to the image file quality : int (default=80) Quality score for the JPEG image"},{"location":"python/pre/api/#celldega.pre.convert_to_png--returns","title":"Returns","text":"new_image_path : str Path to the JPEG image file Source code in src/celldega/pre/__init__.py def convert_to_png(image_path):\n \"\"\"\n Convert a TIFF image to a JPEG image with a quality of score\n\n Parameters\n ----------\n image_path : str\n Path to the image file\n quality : int (default=80)\n Quality score for the JPEG image\n\n Returns\n -------\n new_image_path : str\n Path to the JPEG image file\n\n \"\"\"\n\n # Load the TIFF image\n image = pyvips.Image.new_from_file(image_path, access=\"sequential\")\n\n # Save the image as a JPEG with a quality of 80\n new_image_path = image_path.replace(\".tif\", \".png\")\n image.pngsave(new_image_path)\n\n return new_image_path\n"},{"location":"python/pre/api/#celldega.pre.convert_to_webp","title":"convert_to_webp(image_path, quality=100)","text":"Convert a TIFF image to a WEBP image with a specified quality score."},{"location":"python/pre/api/#celldega.pre.convert_to_webp--parameters","title":"Parameters","text":"image_path : str Path to the image file quality : int (default=100) Quality score for the WEBP image (higher is better quality)"},{"location":"python/pre/api/#celldega.pre.convert_to_webp--returns","title":"Returns","text":"new_image_path : str Path to the WEBP image file Source code in src/celldega/pre/__init__.py def convert_to_webp(image_path, quality=100):\n \"\"\"\n Convert a TIFF image to a WEBP image with a specified quality score.\n\n Parameters\n ----------\n image_path : str\n Path to the image file\n quality : int (default=100)\n Quality score for the WEBP image (higher is better quality)\n\n Returns\n -------\n new_image_path : str\n Path to the WEBP image file\n \"\"\"\n # Load the TIFF image\n image = pyvips.Image.new_from_file(image_path, access=\"sequential\")\n\n # Save the image as a WEBP with specified quality\n new_image_path = image_path.replace(\".tif\", \".webp\")\n image.webpsave(new_image_path, Q=quality)\n\n return new_image_path\n"},{"location":"python/pre/api/#celldega.pre.get_max_zoom_level","title":"get_max_zoom_level(path_image_pyramid)","text":"Returns the maximum zoom level based on the highest-numbered directory in the specified path_image_pyramid. Parameters: Name Type Description Default path_image_pyramid str The path to the directory containing zoom level directories. required Returns: Name Type Description max_pyramid_zoom int The maximum zoom level. Source code in src/celldega/pre/__init__.py def get_max_zoom_level(path_image_pyramid):\n \"\"\"\n Returns the maximum zoom level based on the highest-numbered directory\n in the specified path_image_pyramid.\n\n Parameters:\n path_image_pyramid (str): The path to the directory containing zoom level directories.\n\n Returns:\n max_pyramid_zoom (int): The maximum zoom level.\n \"\"\"\n # List all entries in the path_image_pyramid that are directories and can be converted to integers\n zoom_levels = [\n entry\n for entry in os.listdir(path_image_pyramid)\n if os.path.isdir(os.path.join(path_image_pyramid, entry)) and entry.isdigit()\n ]\n\n # Convert to integer and find the maximum value\n max_pyramid_zoom = max(map(int, zoom_levels)) if zoom_levels else None\n\n return max_pyramid_zoom\n"},{"location":"python/pre/api/#celldega.pre.make_cell_boundary_tiles","title":"make_cell_boundary_tiles(technology, path_cell_boundaries, path_meta_cell_micron, path_transformation_matrix, path_output, coarse_tile_factor=20, tile_size=250, tile_bounds=None, image_scale=1, max_workers=8)","text":"Processes cell boundary data and divides it into spatial tiles based on the provided technology. Reads cell boundary data, applies affine transformations, and divides the data into coarse and fine tiles. The resulting tiles are saved as Parquet files, each containing the geometries of cells in that tile."},{"location":"python/pre/api/#celldega.pre.make_cell_boundary_tiles--parameters","title":"Parameters","text":"technology : str The technology used to generate the cell boundary data, e.g., \"MERSCOPE\", \"Xenium\", or \"custom\". path_cell_boundaries : str Path to the file containing the cell boundaries (Parquet format). path_meta_cell_micron : str Path to the file containing cell metadata (CSV format). path_transformation_matrix : str Path to the file containing the transformation matrix (CSV format). path_output : str Directory path where the output files (Parquet files) for each tile will be saved. coarse_tile_factor : int, optional, default=20. scaling factor of each coarse-grain tile comparing to the fine tile size. tile_size : int, optional, default=500 Size of each fine-grain tile in microns. tile_bounds : dict, optional Dictionary containing the minimum and maximum bounds for x and y coordinates. image_scale : float, optional, default=1 Scale factor to apply to the geometry data. max_workers : int, optional, default=8 Maximum number of parallel workers for processing tiles."},{"location":"python/pre/api/#celldega.pre.make_cell_boundary_tiles--returns","title":"Returns","text":"None Source code in src/celldega/pre/boundary_tile.py def make_cell_boundary_tiles(\n technology,\n path_cell_boundaries,\n path_meta_cell_micron,\n path_transformation_matrix,\n path_output,\n coarse_tile_factor=20,\n tile_size=250,\n tile_bounds=None,\n image_scale=1,\n max_workers=8\n):\n\n\n \"\"\"\n Processes cell boundary data and divides it into spatial tiles based on the provided technology.\n Reads cell boundary data, applies affine transformations, and divides the data into coarse and fine tiles.\n The resulting tiles are saved as Parquet files, each containing the geometries of cells in that tile.\n\n Parameters\n ----------\n technology : str\n The technology used to generate the cell boundary data, e.g., \"MERSCOPE\", \"Xenium\", or \"custom\".\n path_cell_boundaries : str\n Path to the file containing the cell boundaries (Parquet format).\n path_meta_cell_micron : str\n Path to the file containing cell metadata (CSV format).\n path_transformation_matrix : str\n Path to the file containing the transformation matrix (CSV format).\n path_output : str\n Directory path where the output files (Parquet files) for each tile will be saved.\n coarse_tile_factor : int, optional, default=20.\n scaling factor of each coarse-grain tile comparing to the fine tile size.\n tile_size : int, optional, default=500\n Size of each fine-grain tile in microns.\n tile_bounds : dict, optional\n Dictionary containing the minimum and maximum bounds for x and y coordinates.\n image_scale : float, optional, default=1\n Scale factor to apply to the geometry data.\n max_workers : int, optional, default=8\n Maximum number of parallel workers for processing tiles.\n\n Returns\n -------\n None\n \"\"\"\n\n def numpy_affine_transform(coords, matrix):\n \"\"\"Apply affine transformation to numpy coordinates.\"\"\"\n # Homogeneous coordinates for affine transformation\n coords = np.hstack([coords, np.ones((coords.shape[0], 1))])\n transformed_coords = coords @ matrix.T\n return transformed_coords[:, :2] # Drop the homogeneous coordinate\n\n def batch_transform_geometries(geometries, transformation_matrix, scale):\n \"\"\"\n Batch transform geometries using numpy for optimized performance.\n \"\"\"\n # Extract affine transformation parameters into a 3x3 matrix for numpy\n affine_matrix = np.array([\n [transformation_matrix[0, 0], transformation_matrix[0, 1], transformation_matrix[0, 2]],\n [transformation_matrix[1, 0], transformation_matrix[1, 1], transformation_matrix[1, 2]],\n [0, 0, 1]\n ])\n\n transformed_geometries = []\n\n for polygon in geometries:\n # Extract coordinates and transform them\n if isinstance(polygon, MultiPolygon):\n polygon = next(polygon.geoms) # Use the first geometry\n\n # Transform the exterior of the polygon\n exterior_coords = np.array(polygon.exterior.coords)\n\n # Apply the affine transformation and scale\n transformed_coords = numpy_affine_transform(exterior_coords, affine_matrix) / scale\n\n # Append the result to the transformed_geometries list\n transformed_geometries.append([transformed_coords.tolist()])\n\n return transformed_geometries\n\n\n def filter_and_save_fine_boundary(coarse_tile, fine_i, fine_j, fine_tile_x_min, fine_tile_x_max, fine_tile_y_min, fine_tile_y_max, path_output):\n cell_ids = coarse_tile.index.values\n\n tile_filter = (\n (coarse_tile[\"center_x\"] >= fine_tile_x_min) & (coarse_tile[\"center_x\"] < fine_tile_x_max) &\n (coarse_tile[\"center_y\"] >= fine_tile_y_min) & (coarse_tile[\"center_y\"] < fine_tile_y_max)\n )\n filtered_indices = np.where(tile_filter)[0]\n\n keep_cells = cell_ids[filtered_indices]\n fine_tile_cells = coarse_tile.loc[keep_cells, [\"GEOMETRY\"]]\n fine_tile_cells = fine_tile_cells.assign(name=fine_tile_cells.index)\n\n if not fine_tile_cells.empty:\n filename = f\"{path_output}/cell_tile_{fine_i}_{fine_j}.parquet\"\n fine_tile_cells.to_parquet(filename)\n\n def process_fine_boundaries(coarse_tile, i, j, coarse_tile_x_min, coarse_tile_x_max, coarse_tile_y_min, coarse_tile_y_max, tile_size, path_output, x_min, y_min, n_fine_tiles_x, n_fine_tiles_y):\n with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:\n futures = []\n for fine_i in range(n_fine_tiles_x):\n fine_tile_x_min = x_min + fine_i * tile_size\n fine_tile_x_max = fine_tile_x_min + tile_size\n\n if not (fine_tile_x_min >= coarse_tile_x_min and fine_tile_x_max <= coarse_tile_x_max):\n continue\n\n for fine_j in range(n_fine_tiles_y):\n fine_tile_y_min = y_min + fine_j * tile_size\n fine_tile_y_max = fine_tile_y_min + tile_size\n\n if not (fine_tile_y_min >= coarse_tile_y_min and fine_tile_y_max <= coarse_tile_y_max):\n continue\n\n futures.append(executor.submit(\n filter_and_save_fine_boundary, coarse_tile, fine_i, fine_j, fine_tile_x_min, fine_tile_x_max, fine_tile_y_min, fine_tile_y_max, path_output\n ))\n\n for future in futures:\n future.result()\n\n tile_size_x = tile_size\n tile_size_y = tile_size\n\n transformation_matrix = pd.read_csv(path_transformation_matrix, header=None, sep=\" \").values\n\n # Load cell boundary data based on the technology\n if technology == \"MERSCOPE\":\n df_meta = pd.read_parquet(f\"{path_output.replace('cell_segmentation','cell_metadata.parquet')}\")\n entity_to_cell_id_dict = pd.Series(df_meta.index.values, index=df_meta.EntityID).to_dict()\n cells_orig = gpd.read_parquet(path_cell_boundaries)\n cells_orig['cell_id'] = cells_orig['EntityID'].map(entity_to_cell_id_dict)\n cells_orig = cells_orig[cells_orig[\"ZIndex\"] == 1]\n\n # Correct cell_id issues with meta_cell\n meta_cell = pd.read_csv(path_meta_cell_micron)\n meta_cell['cell_id'] = meta_cell['EntityID'].map(entity_to_cell_id_dict)\n cells_orig.index = meta_cell[meta_cell[\"cell_id\"].isin(cells_orig['cell_id'])].index\n\n # Correct 'MultiPolygon' to 'Polygon'\n cells_orig[\"geometry\"] = cells_orig[\"Geometry\"].apply(\n lambda x: list(x.geoms)[0] if isinstance(x, MultiPolygon) else x\n )\n\n cells_orig.set_index('cell_id', inplace=True)\n\n elif technology == \"Xenium\":\n xenium_cells = pd.read_parquet(path_cell_boundaries)\n grouped = xenium_cells.groupby(\"cell_id\")[[\"vertex_x\", \"vertex_y\"]].agg(lambda x: x.tolist())\n grouped[\"geometry\"] = grouped.apply(lambda row: Polygon(zip(row[\"vertex_x\"], row[\"vertex_y\"])), axis=1)\n cells_orig = gpd.GeoDataFrame(grouped, geometry=\"geometry\")[[\"geometry\"]]\n\n elif technology == \"custom\":\n cells_orig = gpd.read_parquet(path_cell_boundaries)\n\n # Transform geometries\n cells_orig[\"GEOMETRY\"] = batch_transform_geometries(cells_orig[\"geometry\"], transformation_matrix, image_scale)\n\n # Convert transformed geometries to polygons and calculate centroids\n cells_orig[\"polygon\"] = cells_orig[\"GEOMETRY\"].apply(lambda x: Polygon(x[0]))\n gdf_cells = gpd.GeoDataFrame(geometry=cells_orig[\"polygon\"])\n gdf_cells[\"center_x\"] = gdf_cells.geometry.centroid.x\n gdf_cells[\"center_y\"] = gdf_cells.geometry.centroid.y\n gdf_cells[\"GEOMETRY\"] = cells_orig[\"GEOMETRY\"]\n\n # Ensure the output directory exists\n if not os.path.exists(path_output):\n os.makedirs(path_output)\n\n # Calculate tile bounds and fine/coarse tiles\n x_min, x_max = tile_bounds[\"x_min\"], tile_bounds[\"x_max\"]\n y_min, y_max = tile_bounds[\"y_min\"], tile_bounds[\"y_max\"]\n n_fine_tiles_x = int(np.ceil((x_max - x_min) / tile_size))\n n_fine_tiles_y = int(np.ceil((y_max - y_min) / tile_size))\n n_coarse_tiles_x = int(np.ceil((x_max - x_min) / (coarse_tile_factor * tile_size)))\n n_coarse_tiles_y = int(np.ceil((y_max - y_min) / (coarse_tile_factor * tile_size)))\n\n # Process coarse tiles in parallel\n for i in tqdm(range(n_coarse_tiles_x), desc=\"Processing coarse tiles\"):\n coarse_tile_x_min = x_min + i * (coarse_tile_factor * tile_size)\n coarse_tile_x_max = coarse_tile_x_min + (coarse_tile_factor * tile_size)\n\n for j in range(n_coarse_tiles_y):\n coarse_tile_y_min = y_min + j * (coarse_tile_factor * tile_size)\n coarse_tile_y_max = coarse_tile_y_min + (coarse_tile_factor * tile_size)\n\n coarse_tile = gdf_cells[\n (gdf_cells[\"center_x\"] >= coarse_tile_x_min) & (gdf_cells[\"center_x\"] < coarse_tile_x_max) &\n (gdf_cells[\"center_y\"] >= coarse_tile_y_min) & (gdf_cells[\"center_y\"] < coarse_tile_y_max)\n ]\n if not coarse_tile.empty:\n process_fine_boundaries(coarse_tile, i, j, coarse_tile_x_min, coarse_tile_x_max, coarse_tile_y_min, coarse_tile_y_max, tile_size, path_output, x_min, y_min, n_fine_tiles_x, n_fine_tiles_y)\n"},{"location":"python/pre/api/#celldega.pre.make_deepzoom_pyramid","title":"make_deepzoom_pyramid(image_path, output_path, pyramid_name, tile_size=512, overlap=0, suffix='.jpeg')","text":"Create a DeepZoom image pyramid from a JPEG image"},{"location":"python/pre/api/#celldega.pre.make_deepzoom_pyramid--parameters","title":"Parameters","text":"image_path : str Path to the JPEG image file tile_size : int (default=512) Tile size for the DeepZoom pyramid overlap : int (default=0) Overlap size for the DeepZoom pyramid suffix : str (default='jpeg') Suffix for the DeepZoom pyramid tiles"},{"location":"python/pre/api/#celldega.pre.make_deepzoom_pyramid--returns","title":"Returns","text":"None Source code in src/celldega/pre/__init__.py def make_deepzoom_pyramid(\n image_path, output_path, pyramid_name, tile_size=512, overlap=0, suffix=\".jpeg\"\n):\n \"\"\"\n Create a DeepZoom image pyramid from a JPEG image\n\n Parameters\n ----------\n image_path : str\n Path to the JPEG image file\n tile_size : int (default=512)\n Tile size for the DeepZoom pyramid\n overlap : int (default=0)\n Overlap size for the DeepZoom pyramid\n suffix : str (default='jpeg')\n Suffix for the DeepZoom pyramid tiles\n\n Returns\n -------\n None\n\n \"\"\"\n\n # Define the output path\n output_path = Path(output_path)\n\n # Load the JPEG image\n image = pyvips.Image.new_from_file(image_path, access=\"sequential\")\n\n # check if the output path exists and create it if it does not\n output_path.mkdir(parents=True, exist_ok=True)\n\n # append the pyramid name to the output path\n output_path = output_path / pyramid_name\n\n # Save the image as a DeepZoom image pyramid\n image.dzsave(output_path, tile_size=tile_size, overlap=overlap, suffix=suffix)\n"},{"location":"python/pre/api/#celldega.pre.make_meta_cell_image_coord","title":"make_meta_cell_image_coord(technology, path_transformation_matrix, path_meta_cell_micron, path_meta_cell_image, image_scale)","text":"Apply an affine transformation to the cell coordinates in microns and save the transformed coordinates in pixels"},{"location":"python/pre/api/#celldega.pre.make_meta_cell_image_coord--parameters","title":"Parameters","text":"technology : str The technology used to generate the data, Xenium and MERSCOPE are supported. path_transformation_matrix : str Path to the transformation matrix file path_meta_cell_micron : str Path to the meta cell file with coordinates in microns path_meta_cell_image : str Path to save the meta cell file with coordinates in pixels"},{"location":"python/pre/api/#celldega.pre.make_meta_cell_image_coord--returns","title":"Returns","text":"None"},{"location":"python/pre/api/#celldega.pre.make_meta_cell_image_coord--examples","title":"Examples","text":"make_meta_cell_image_coord( ... technology='Xenium', ... path_transformation_matrix='data/transformation_matrix.txt', ... path_meta_cell_micron='data/meta_cell_micron.csv', ... path_meta_cell_image='data/meta_cell_image.parquet' ... ) Source code in src/celldega/pre/__init__.py def make_meta_cell_image_coord(\n technology,\n path_transformation_matrix,\n path_meta_cell_micron,\n path_meta_cell_image,\n image_scale\n):\n \"\"\"\n Apply an affine transformation to the cell coordinates in microns and save\n the transformed coordinates in pixels\n\n Parameters\n ----------\n technology : str\n The technology used to generate the data, Xenium and MERSCOPE are supported.\n path_transformation_matrix : str\n Path to the transformation matrix file\n path_meta_cell_micron : str\n Path to the meta cell file with coordinates in microns\n path_meta_cell_image : str\n Path to save the meta cell file with coordinates in pixels\n\n Returns\n -------\n None\n\n Examples\n --------\n >>> make_meta_cell_image_coord(\n ... technology='Xenium',\n ... path_transformation_matrix='data/transformation_matrix.txt',\n ... path_meta_cell_micron='data/meta_cell_micron.csv',\n ... path_meta_cell_image='data/meta_cell_image.parquet'\n ... )\n\n \"\"\"\n\n transformation_matrix = pd.read_csv(\n path_transformation_matrix, header=None, sep=\" \"\n ).values\n\n if technology == \"MERSCOPE\":\n meta_cell = pd.read_csv(path_meta_cell_micron, usecols=[\"EntityID\", \"center_x\", \"center_y\"])\n meta_cell = convert_long_id_to_short(meta_cell)\n meta_cell[\"name\"] = meta_cell[\"cell_id\"]\n meta_cell = meta_cell.set_index('cell_id')\n elif technology == \"Xenium\":\n usecols = [\"cell_id\", \"x_centroid\", \"y_centroid\"]\n meta_cell = pd.read_csv(path_meta_cell_micron, index_col=0, usecols=usecols)\n meta_cell.columns = [\"center_x\", \"center_y\"]\n meta_cell[\"name\"] = pd.Series(meta_cell.index, index=meta_cell.index)\n\n # Adding a ones column to accommodate for affine transformation\n meta_cell[\"ones\"] = 1\n\n # Preparing the data for matrix multiplication\n points = meta_cell[[\"center_x\", \"center_y\", \"ones\"]].values\n\n # Applying the transformation matrix\n transformed_points = np.dot(transformation_matrix, points.T).T\n\n # Updating the DataFrame with transformed coordinates\n meta_cell[\"center_x\"] = transformed_points[:, 0]\n meta_cell[\"center_y\"] = transformed_points[:, 1]\n\n # Dropping the ones column as it's no longer needed\n meta_cell.drop(columns=[\"ones\"], inplace=True)\n\n meta_cell[\"center_x\"] = meta_cell[\"center_x\"] / image_scale\n meta_cell[\"center_y\"] = meta_cell[\"center_y\"] / image_scale\n\n meta_cell[\"geometry\"] = meta_cell.apply(\n lambda row: [row[\"center_x\"], row[\"center_y\"]], axis=1\n )\n\n if technology == \"MERSCOPE\":\n meta_cell = meta_cell[[\"name\", \"geometry\", \"EntityID\"]]\n else:\n meta_cell = meta_cell[[\"name\", \"geometry\"]]\n\n\n meta_cell.to_parquet(path_meta_cell_image)\n"},{"location":"python/pre/api/#celldega.pre.make_meta_gene","title":"make_meta_gene(technology, path_cbg, path_output)","text":"Create a DataFrame with genes and their assigned colors"},{"location":"python/pre/api/#celldega.pre.make_meta_gene--parameters","title":"Parameters","text":"technology : str The technology used to generate the data, Xenium and MERSCOPE are supported. path_cbg : str Path to the cell-by-gene matrix data (the data format can vary based on technology) path_output : str Path to save the meta gene file"},{"location":"python/pre/api/#celldega.pre.make_meta_gene--returns","title":"Returns","text":"None"},{"location":"python/pre/api/#celldega.pre.make_meta_gene--examples","title":"Examples","text":"make_meta_gene( ... technology='Xenium', ... path_cbg='data/', ... path_output='data/meta_gene.parquet' ... ) Source code in src/celldega/pre/__init__.py def make_meta_gene(technology, path_cbg, path_output):\n \"\"\"\n Create a DataFrame with genes and their assigned colors\n\n Parameters\n ----------\n technology : str\n The technology used to generate the data, Xenium and MERSCOPE are supported.\n path_cbg : str\n Path to the cell-by-gene matrix data (the data format can vary based on technology)\n path_output : str\n Path to save the meta gene file\n\n Returns\n -------\n None\n\n Examples\n --------\n >>> make_meta_gene(\n ... technology='Xenium',\n ... path_cbg='data/',\n ... path_output='data/meta_gene.parquet'\n ... )\n \"\"\"\n\n if technology == \"MERSCOPE\":\n cbg = pd.read_csv(path_cbg, index_col=0)\n genes = cbg.columns.tolist()\n elif technology == \"Xenium\":\n # genes = pd.read_csv(path_cbg + 'features.tsv.gz', sep='\\t', header=None)[1].values.tolist()\n cbg = read_cbg_mtx(path_cbg)\n genes = cbg.columns.tolist()\n\n # Get all categorical color palettes from Matplotlib and flatten them into a single list of colors\n palettes = [plt.get_cmap(name).colors for name in plt.colormaps() if \"tab\" in name]\n flat_colors = [color for palette in palettes for color in palette]\n\n # Convert RGB tuples to hex codes\n flat_colors_hex = [to_hex(color) for color in flat_colors]\n\n # Use modular arithmetic to assign a color to each gene, white for genes with \"Blank\"\n colors = [\n flat_colors_hex[i % len(flat_colors_hex)] if \"Blank\" not in gene else \"#FFFFFF\"\n for i, gene in enumerate(genes)\n ]\n\n # Create a DataFrame with genes and their assigned colors\n ser_color = pd.Series(colors, index=genes)\n\n # calculate gene expression metadata\n meta_gene = calc_meta_gene_data(cbg)\n meta_gene['color'] = ser_color\n\n # Identify sparse columns\n sparse_cols = [col for col in meta_gene.columns if pd.api.types.is_sparse(meta_gene[col])]\n\n # Convert sparse columns to dense\n for col in sparse_cols:\n meta_gene[col] = meta_gene[col].sparse.to_dense()\n\n meta_gene.to_parquet(path_output)\n"},{"location":"python/pre/api/#celldega.pre.make_trx_tiles","title":"make_trx_tiles(technology, path_trx, path_transformation_matrix, path_trx_tiles, coarse_tile_factor=10, tile_size=250, chunk_size=1000000, verbose=False, image_scale=1, max_workers=8)","text":"Processes transcript data by dividing it into coarse-grain and fine-grain tiles, applying transformations, and saving the results in a parallelized manner."},{"location":"python/pre/api/#celldega.pre.make_trx_tiles--parameters","title":"Parameters","text":"technology : str The technology used for generating the transcript data (e.g., \"MERSCOPE\" or \"Xenium\"). path_trx : str Path to the file containing the transcript data. path_transformation_matrix : str Path to the file containing the transformation matrix (CSV file). path_trx_tiles : str Directory path where the output files (Parquet files) for each tile will be saved. coarse_tile_factor : int, optional Scaling factor of each coarse-grain tile comparing to the fine tile size. tile_size : int, optional Size of each fine-grain tile in microns (default is 250). chunk_size : int, optional Number of rows to process per chunk for memory efficiency (default is 1000000). verbose : bool, optional Flag to enable verbose output (default is False). image_scale : float, optional Scale factor to apply to the transcript coordinates (default is 0.5). max_workers : int, optional Maximum number of parallel workers for processing tiles (default is 8)."},{"location":"python/pre/api/#celldega.pre.make_trx_tiles--returns","title":"Returns","text":"dict A dictionary containing the bounds of the processed data in both x and y directions. Source code in src/celldega/pre/trx_tile.py def make_trx_tiles(\n technology,\n path_trx,\n path_transformation_matrix,\n path_trx_tiles,\n coarse_tile_factor=10,\n tile_size=250,\n chunk_size=1000000,\n verbose=False,\n image_scale=1,\n max_workers=8\n):\n \"\"\"\n Processes transcript data by dividing it into coarse-grain and fine-grain tiles,\n applying transformations, and saving the results in a parallelized manner.\n\n Parameters\n ----------\n technology : str\n The technology used for generating the transcript data (e.g., \"MERSCOPE\" or \"Xenium\").\n path_trx : str\n Path to the file containing the transcript data.\n path_transformation_matrix : str\n Path to the file containing the transformation matrix (CSV file).\n path_trx_tiles : str\n Directory path where the output files (Parquet files) for each tile will be saved.\n coarse_tile_factor : int, optional\n Scaling factor of each coarse-grain tile comparing to the fine tile size.\n tile_size : int, optional\n Size of each fine-grain tile in microns (default is 250).\n chunk_size : int, optional\n Number of rows to process per chunk for memory efficiency (default is 1000000).\n verbose : bool, optional\n Flag to enable verbose output (default is False).\n image_scale : float, optional\n Scale factor to apply to the transcript coordinates (default is 0.5).\n max_workers : int, optional\n Maximum number of parallel workers for processing tiles (default is 8).\n\n Returns\n -------\n dict\n A dictionary containing the bounds of the processed data in both x and y directions.\n \"\"\"\n\n def process_coarse_tile(trx, i, j, coarse_tile_x_min, coarse_tile_x_max, coarse_tile_y_min, coarse_tile_y_max, tile_size, path_trx_tiles, x_min, y_min, n_fine_tiles_x, n_fine_tiles_y, max_workers):\n # Filter the entire dataset for the current coarse tile\n coarse_tile = trx.filter(\n (pl.col(\"transformed_x\") >= coarse_tile_x_min) & (pl.col(\"transformed_x\") < coarse_tile_x_max) &\n (pl.col(\"transformed_y\") >= coarse_tile_y_min) & (pl.col(\"transformed_y\") < coarse_tile_y_max)\n )\n\n if not coarse_tile.is_empty():\n # Now process fine tiles using global fine tile indices\n process_fine_tiles(coarse_tile, i, j, coarse_tile_x_min, coarse_tile_x_max, coarse_tile_y_min, coarse_tile_y_max, tile_size, path_trx_tiles, x_min, y_min, n_fine_tiles_x, n_fine_tiles_y, max_workers) \n\n\n def process_fine_tiles(coarse_tile, coarse_i, coarse_j, coarse_tile_x_min, coarse_tile_x_max, coarse_tile_y_min, coarse_tile_y_max, tile_size, path_trx_tiles, x_min, y_min, n_fine_tiles_x, n_fine_tiles_y, max_workers=8):\n\n # Use ThreadPoolExecutor for parallel processing of fine-grain tiles within the coarse tile\n with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:\n futures = []\n\n # Iterate over fine-grain tiles within the global bounds\n for fine_i in range(n_fine_tiles_x):\n fine_tile_x_min = x_min + fine_i * tile_size\n fine_tile_x_max = fine_tile_x_min + tile_size\n\n # Process only if the fine tile falls within the current coarse tile's bounds\n if not (fine_tile_x_min >= coarse_tile_x_min and fine_tile_x_max <= coarse_tile_x_max):\n continue\n\n for fine_j in range(n_fine_tiles_y):\n fine_tile_y_min = y_min + fine_j * tile_size\n fine_tile_y_max = fine_tile_y_min + tile_size\n\n # Process only if the fine tile falls within the current coarse tile's bounds\n if not (fine_tile_y_min >= coarse_tile_y_min and fine_tile_y_max <= coarse_tile_y_max):\n continue\n\n # Submit the task for each fine tile to process in parallel\n futures.append(executor.submit(\n filter_and_save_fine_tile, coarse_tile, coarse_i, coarse_j, fine_i, fine_j, \n fine_tile_x_min, fine_tile_x_max, fine_tile_y_min, fine_tile_y_max, path_trx_tiles\n ))\n\n # Wait for all futures to complete\n for future in concurrent.futures.as_completed(futures):\n future.result() # Raise exceptions if any occurred during execution\n\n\n def filter_and_save_fine_tile(coarse_tile, coarse_i, coarse_j, fine_i, fine_j, fine_tile_x_min, fine_tile_x_max, fine_tile_y_min, fine_tile_y_max, path_trx_tiles):\n\n # Filter the coarse tile for the current fine tile's boundaries\n fine_tile_trx = coarse_tile.filter(\n (pl.col(\"transformed_x\") >= fine_tile_x_min) & (pl.col(\"transformed_x\") < fine_tile_x_max) &\n (pl.col(\"transformed_y\") >= fine_tile_y_min) & (pl.col(\"transformed_y\") < fine_tile_y_max)\n )\n\n if not fine_tile_trx.is_empty():\n # Add geometry column as a list of [x, y] pairs\n fine_tile_trx = fine_tile_trx.with_columns(\n pl.concat_list([pl.col(\"transformed_x\"), pl.col(\"transformed_y\")]).alias(\"geometry\")\n ).drop(['transformed_x', 'transformed_y'])\n\n # Define the filename based on fine tile coordinates\n filename = f\"{path_trx_tiles}/transcripts_tile_{fine_i}_{fine_j}.parquet\"\n\n # Save the filtered DataFrame to a Parquet file\n fine_tile_trx.to_pandas().to_parquet(filename)\n\n\n # Load transformation matrix\n transformation_matrix = np.loadtxt(path_transformation_matrix)\n\n # Load the transcript data based on the technology using Polars\n if technology == \"MERSCOPE\":\n trx_ini = pl.read_csv(path_trx, columns=[\"gene\", \"global_x\", \"global_y\"])\n trx_ini = trx_ini.with_columns([\n pl.col(\"global_x\").alias(\"x\"),\n pl.col(\"global_y\").alias(\"y\"),\n pl.col(\"gene\").alias(\"name\")\n ]).select([\"name\", \"x\", \"y\"])\n\n elif technology == \"Xenium\":\n trx_ini = pl.read_parquet(path_trx).select([\n pl.col(\"feature_name\").alias(\"name\"),\n pl.col(\"x_location\").alias(\"x\"),\n pl.col(\"y_location\").alias(\"y\")\n ])\n\n # Process the data in chunks and apply transformations\n all_chunks = []\n\n for start_row in tqdm(range(0, trx_ini.height, chunk_size), desc=\"Processing chunks\"):\n chunk = trx_ini.slice(start_row, chunk_size)\n\n # Apply transformation matrix to the coordinates\n points = np.hstack([chunk.select([\"x\", \"y\"]).to_numpy(), np.ones((chunk.height, 1))])\n transformed_points = np.dot(points, transformation_matrix.T)[:, :2]\n\n # Create new transformed columns and drop original x, y columns\n transformed_chunk = chunk.with_columns([\n (pl.Series(transformed_points[:, 0]) * image_scale).round(2).alias(\"transformed_x\"),\n (pl.Series(transformed_points[:, 1]) * image_scale).round(2).alias(\"transformed_y\")\n ]).drop([\"x\", \"y\"])\n all_chunks.append(transformed_chunk)\n\n # Concatenate all chunks after processing\n trx = pl.concat(all_chunks)\n\n # Ensure the output directory exists\n if not os.path.exists(path_trx_tiles):\n os.makedirs(path_trx_tiles)\n\n # Get min and max x, y values\n x_min, x_max = trx.select([\n pl.col(\"transformed_x\").min().alias(\"x_min\"),\n pl.col(\"transformed_x\").max().alias(\"x_max\")\n ]).row(0)\n\n y_min, y_max = trx.select([\n pl.col(\"transformed_y\").min().alias(\"y_min\"),\n pl.col(\"transformed_y\").max().alias(\"y_max\")\n ]).row(0)\n\n # Calculate the number of fine-grain tiles globally\n n_fine_tiles_x = int(np.ceil((x_max - x_min) / tile_size))\n n_fine_tiles_y = int(np.ceil((y_max - y_min) / tile_size))\n\n # Calculate the number of coarse-grain tiles\n n_coarse_tiles_x = int(np.ceil((x_max - x_min) / (coarse_tile_factor * tile_size)))\n n_coarse_tiles_y = int(np.ceil((y_max - y_min) / (coarse_tile_factor * tile_size)))\n\n # Use ThreadPoolExecutor for parallel processing of coarse-grain tiles\n with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:\n futures = []\n for i in range(n_coarse_tiles_x):\n coarse_tile_x_min = x_min + i * (coarse_tile_factor * tile_size)\n coarse_tile_x_max = coarse_tile_x_min + (coarse_tile_factor * tile_size)\n\n for j in range(n_coarse_tiles_y):\n coarse_tile_y_min = y_min + j * (coarse_tile_factor * tile_size)\n coarse_tile_y_max = coarse_tile_y_min + (coarse_tile_factor * tile_size)\n\n # Submit each coarse tile for parallel processing\n futures.append(executor.submit(\n process_coarse_tile, trx, i, j, coarse_tile_x_min, coarse_tile_x_max, coarse_tile_y_min, coarse_tile_y_max, tile_size, path_trx_tiles, x_min, y_min, n_fine_tiles_x, n_fine_tiles_y, max_workers\n ))\n\n # Wait for all coarse tiles to complete\n for future in tqdm(concurrent.futures.as_completed(futures), desc=\"Processing coarse tiles\", unit=\"tile\"):\n future.result() # Raise exceptions if any occurred during execution\n\n # Return the tile bounds\n tile_bounds = {\n \"x_min\": x_min,\n \"x_max\": x_max,\n \"y_min\": y_min,\n \"y_max\": y_max,\n }\n\n return tile_bounds\n"},{"location":"python/pre/api/#celldega.pre.reduce_image_size","title":"reduce_image_size(image_path, scale_image=0.5, path_landscape_files='')","text":""},{"location":"python/pre/api/#celldega.pre.reduce_image_size--parameters","title":"Parameters","text":"image_path : str Path to the image file scale_image : float (default=0.5) Scale factor for the image resize"},{"location":"python/pre/api/#celldega.pre.reduce_image_size--returns","title":"Returns","text":"new_image_path : str Path to the resized image file Source code in src/celldega/pre/__init__.py def reduce_image_size(image_path, scale_image=0.5, path_landscape_files=\"\"):\n \"\"\"\n\n Parameters\n ----------\n image_path : str\n Path to the image file\n scale_image : float (default=0.5)\n Scale factor for the image resize\n\n Returns\n -------\n new_image_path : str\n Path to the resized image file\n \"\"\"\n\n image = pyvips.Image.new_from_file(image_path, access=\"sequential\")\n\n resized_image = image.resize(scale_image)\n\n new_image_name = image_path.split(\"/\")[-1].replace(\".tif\", \"_downsize.tif\")\n new_image_path = f\"{path_landscape_files}/{new_image_name}\"\n resized_image.write_to_file(new_image_path)\n\n return new_image_path\n"},{"location":"python/pre/api/#celldega.pre.save_landscape_parameters","title":"save_landscape_parameters(technology, path_landscape_files, image_name='dapi_files', tile_size=1000, image_info={}, image_format='.webp')","text":"Save the landscape parameters to a JSON file. Source code in src/celldega/pre/__init__.py def save_landscape_parameters(\n technology, path_landscape_files, image_name=\"dapi_files\", tile_size=1000, image_info={}, image_format='.webp'\n):\n \"\"\"\n Save the landscape parameters to a JSON file.\n \"\"\"\n\n path_image_pyramid = f\"{path_landscape_files}/pyramid_images/{image_name}\"\n\n print(path_image_pyramid)\n\n max_pyramid_zoom = get_max_zoom_level(path_image_pyramid)\n\n landscape_parameters = {\n \"technology\": technology,\n \"max_pyramid_zoom\": max_pyramid_zoom,\n \"tile_size\": tile_size,\n \"image_info\": image_info,\n \"image_format\": image_format\n }\n\n path_landscape_parameters = f\"{path_landscape_files}/landscape_parameters.json\"\n\n with open(path_landscape_parameters, \"w\") as file:\n json.dump(landscape_parameters, file, indent=4)\n"},{"location":"python/viz/api/","title":"Viz Module API Reference","text":""},{"location":"python/viz/api/#widget-classes","title":"Widget Classes","text":""},{"location":"python/viz/api/#celldega.viz.widget.Landscape","title":"Landscape","text":" Bases: AnyWidget A widget for interactive visualization of spatial omics data. This widget currently supports iST (Xenium and MERSCOPE) and sST (Visium HD data) Parameters: Name Type Description Default ini_x float The initial x-coordinate of the view. required ini_y float The initial y-coordinate of the view. required ini_zoom float The initial zoom level of the view. required token str The token traitlet. required base_url str The base URL for the widget. required dataset_name str The name of the dataset to visualize. This will show up in the user interface bar. required Attributes: Name Type Description component str The name of the component. technology str The technology used. base_url str The base URL for the widget. token str The token traitlet. ini_x float The initial x-coordinate of the view. ini_y float The initial y-coordinate of the view. ini_z float The initial z-coordinate of the view. ini_zoom float The initial zoom level of the view. dataset_name str The name of the dataset to visualize. update_trigger dict The dictionary to trigger updates. cell_clusters dict The dictionary containing cell cluster information. Returns: Name Type Description Landscape A widget for visualizing a 'landscape' view of spatial omics data. Source code in src/celldega/viz/widget.py class Landscape(anywidget.AnyWidget):\n \"\"\"\n A widget for interactive visualization of spatial omics data. This widget\n currently supports iST (Xenium and MERSCOPE) and sST (Visium HD data)\n\n Args:\n ini_x (float): The initial x-coordinate of the view.\n ini_y (float): The initial y-coordinate of the view.\n ini_zoom (float): The initial zoom level of the view.\n token (str): The token traitlet.\n base_url (str): The base URL for the widget.\n dataset_name (str, optional): The name of the dataset to visualize. This will show up in the user interface bar.\n\n Attributes:\n component (str): The name of the component.\n technology (str): The technology used.\n base_url (str): The base URL for the widget.\n token (str): The token traitlet.\n ini_x (float): The initial x-coordinate of the view.\n ini_y (float): The initial y-coordinate of the view.\n ini_z (float): The initial z-coordinate of the view.\n ini_zoom (float): The initial zoom level of the view.\n dataset_name (str): The name of the dataset to visualize.\n update_trigger (dict): The dictionary to trigger updates.\n cell_clusters (dict): The dictionary containing cell cluster information.\n\n Returns:\n Landscape: A widget for visualizing a 'landscape' view of spatial omics data.\n \"\"\"\n _esm = pathlib.Path(__file__).parent / \"../static\" / \"widget.js\"\n _css = pathlib.Path(__file__).parent / \"../static\" / \"widget.css\"\n component = traitlets.Unicode(\"Landscape\").tag(sync=True)\n\n technology = traitlets.Unicode(\"sst\").tag(sync=True)\n base_url = traitlets.Unicode(\"\").tag(sync=True)\n token = traitlets.Unicode(\"\").tag(sync=True)\n ini_x = traitlets.Float(1000).tag(sync=True)\n ini_y = traitlets.Float(1000).tag(sync=True)\n ini_z = traitlets.Float(0).tag(sync=True)\n ini_zoom = traitlets.Float(0).tag(sync=True)\n square_tile_size = traitlets.Float(1.4).tag(sync=True)\n dataset_name = traitlets.Unicode(\"\").tag(sync=True)\n region = traitlets.Dict({}).tag(sync=True)\n\n update_trigger = traitlets.Dict().tag(sync=True)\n cell_clusters = traitlets.Dict().tag(sync=True)\n\n width = traitlets.Int(0).tag(sync=True)\n height = traitlets.Int(800).tag(sync=True)\n\n def trigger_update(self, new_value):\n # This method updates the update_trigger traitlet with a new value\n # You can pass any information necessary for the update, or just a timestamp\n self.update_trigger = new_value\n\n def update_cell_clusters(self, new_clusters):\n # Convert the new_clusters to a JSON serializable format if necessary\n self.cell_clusters = new_clusters\n"},{"location":"python/viz/api/#celldega.viz.widget.Matrix","title":"Matrix","text":" Bases: AnyWidget A widget for interactive visualization of a hierarchically clustered matrix. Parameters: Name Type Description Default value int The value traitlet. required component str The component traitlet. required network dict The network traitlet. required click_info dict The click_info traitlet. required Attributes: Name Type Description component str The name of the component. network dict The network dictionary. click_info dict The click_info dictionary. Returns: Name Type Description Matrix A widget for visualizing a hierarchically clustered matrix. Source code in src/celldega/viz/widget.py class Matrix(anywidget.AnyWidget):\n \"\"\"\n A widget for interactive visualization of a hierarchically clustered matrix.\n\n Args:\n value (int): The value traitlet.\n component (str): The component traitlet.\n network (dict): The network traitlet.\n click_info (dict): The click_info traitlet.\n\n Attributes:\n component (str): The name of the component.\n network (dict): The network dictionary.\n click_info (dict): The click_info dictionary.\n\n Returns:\n Matrix: A widget for visualizing a hierarchically clustered matrix.\n \"\"\"\n _esm = pathlib.Path(__file__).parent / \"../static\" / \"widget.js\"\n _css = pathlib.Path(__file__).parent / \"../static\" / \"widget.css\"\n value = traitlets.Int(0).tag(sync=True)\n component = traitlets.Unicode(\"Matrix\").tag(sync=True)\n\n network = traitlets.Dict({}).tag(sync=True)\n click_info = traitlets.Dict({}).tag(sync=True)\n"},{"location":"technologies/","title":"Technologies Overview","text":""},{"location":"technologies/parquet/","title":"Parquet","text":""}]} \ No newline at end of file +{"config":{"lang":["en"],"separator":"[\\s\\-]+","pipeline":["stopWordFilter"]},"docs":[{"location":"","title":"Welcome to Celldega's Documentation","text":"Celldega Landscape visualization of a human skin cancer Xenium dataset obtained from 10X Genomics. Celldega is a spatial analysis and visualization library that is being developed by the Spatial Technology Platform at the Broad Institute of MIT and Harvard. This project enables researchers to easily visualize large ST datasets (e.g., datasets with >100M transcripts) alongside single-cell and spatial analysis notebook workflows (e.g., sverse tools and novel spatial analysis approaches). Getting Started Installation Usage "},{"location":"#about","title":"About","text":"Celldega is named after a Bodega, a small shop with all the essentials, that is part of the fabric of a neighborhood."},{"location":"examples/","title":"Jupyter Notebook Examples","text":"Landscape View Xenium"},{"location":"examples/short_notebooks/Landscape_View_Xenium/","title":"Landscape View Xenium","text":"In\u00a0[2]: Copied! # %load_ext autoreload\n# %autoreload 2\n# %env ANYWIDGET_HMR=1\n # %load_ext autoreload # %autoreload 2 # %env ANYWIDGET_HMR=1 In\u00a0[3]: Copied! import celldega as dega\ndega.__version__\n import celldega as dega dega.__version__ Out[3]: '0.0.0' In\u00a0[6]: Copied! from observable_jupyter import embed\n from observable_jupyter import embed In\u00a0[11]: Copied! base_url = 'https://raw.githubusercontent.com/broadinstitute/celldega_Xenium_Prime_Mouse_Brain_Coronal_FF_outs/main/Xenium_Prime_Mouse_Brain_Coronal_FF_outs'\n base_url = 'https://raw.githubusercontent.com/broadinstitute/celldega_Xenium_Prime_Mouse_Brain_Coronal_FF_outs/main/Xenium_Prime_Mouse_Brain_Coronal_FF_outs' In\u00a0[14]: Copied! embed('@cornhundred/celldega-landscape-ist', inputs={'base_url': base_url}, cells=['landscape_container'], display_logo=False)\n embed('@cornhundred/celldega-landscape-ist', inputs={'base_url': base_url}, cells=['landscape_container'], display_logo=False) In\u00a0[13]: Copied! # base_url = 'https://raw.githubusercontent.com/broadinstitute/celldega_Xenium_Prime_Human_Skin_FFPE_outs/main/Xenium_Prime_Human_Skin_FFPE_outs'\n\n# landscape_ist = dega.viz.Landscape(\n# technology='Xenium',\n# ini_zoom = -4.5,\n# ini_x=6000,\n# ini_y=8000,\n# base_url = base_url,\n\n# )\n\n# landscape_ist\n # base_url = 'https://raw.githubusercontent.com/broadinstitute/celldega_Xenium_Prime_Human_Skin_FFPE_outs/main/Xenium_Prime_Human_Skin_FFPE_outs' # landscape_ist = dega.viz.Landscape( # technology='Xenium', # ini_zoom = -4.5, # ini_x=6000, # ini_y=8000, # base_url = base_url, # ) # landscape_ist In\u00a0[\u00a0]: Copied! \n"},{"location":"examples/short_notebooks/Landscape_View_Xenium/#landscape-view-xenium","title":"Landscape View Xenium\u00b6","text":""},{"location":"gallery/","title":"Celldega Gallery","text":"This page includes links to visualizations that are made with the stand-alone Celldega JavaScript library."},{"location":"gallery/#imaging-spatial-transcriptomics","title":"Imaging Spatial Transcriptomics","text":""},{"location":"gallery/#xenium","title":"Xenium","text":" Xenium Mouse Brain Xenium Human Skin Cancer "},{"location":"gallery/#sequencing-spatial-transcriptomics","title":"Sequencing Spatial Transcriptomics","text":""},{"location":"gallery/#visium-hd","title":"Visium HD","text":""},{"location":"gallery/gallery_xenium/","title":"Celldega Xenium Gallery","text":""},{"location":"gallery/gallery_xenium/#xenium-prime-mouse-brain-coronal-ff","title":"Xenium Prime Mouse Brain Coronal FF","text":""},{"location":"gallery/gallery_xenium/#xenium-prime-human-skin-ffpe-outs","title":"Xenium Prime Human Skin FFPE outs","text":""},{"location":"gallery/gallery_xenium/#xenium-human-pancreas-ffpe","title":"Xenium Human Pancreas FFPE","text":""},{"location":"gallery/gallery_xenium/#bone-marrow","title":"Bone Marrow","text":""},{"location":"gallery/gallery_xenium_mouse_brain/","title":"Xenium Prime Mouse Brain Coronal FF","text":""},{"location":"gallery/gallery_xenium_multi/","title":"Xenium Multi Dataset","text":""},{"location":"gallery/gallery_xenium_multi/#xenium-prime-mouse-brain-coronal-ff","title":"Xenium Prime Mouse Brain Coronal FF","text":""},{"location":"gallery/gallery_xenium_multi/#xenium-prime-human-skin-ffpe-outs","title":"Xenium Prime Human Skin FFPE outs","text":""},{"location":"gallery/gallery_xenium_multi/#xenium-human-pancreas-ffpe","title":"Xenium Human Pancreas FFPE","text":""},{"location":"gallery/gallery_xenium_multi/#bone-marrow","title":"Bone Marrow","text":""},{"location":"gallery/gallery_xenium_skin_cancer/","title":"Xenium Prime Human Skin FFPE outs","text":""},{"location":"javascript/","title":"JavaScript API Overview","text":"Celldega's visualization methods can be used as a stand-alone JavaScript library outside the context of a Jupyter notebook. This can be used to create showcase visualizations with publicly hosted data."},{"location":"javascript/api/","title":"Celldega JavaScript API Documentation","text":"The JavaScript component of Celldega is used within the Jupyter Widgets framework to provide interactive visualization in the context of a Jupyter notebook but can also be used as a standalone JavaScript library."},{"location":"javascript/api/#landscape_ist-api-documentation","title":"landscape_ist API Documentation","text":"The landscape_ist function initializes and renders an interactive spatial transcriptomics (IST) landscape visualization. This API is designed to work with Deck.gl and includes customizable visualization options, dynamic data updates, and UI interactions."},{"location":"javascript/api/#parameters","title":"Parameters","text":" el (HTMLElement): The root DOM element where the visualization is rendered. ini_model (Object): The initial data model containing configuration and state. token (string): Authentication token for accessing data. ini_x, ini_y, ini_z (number): Initial spatial coordinates for the view. ini_zoom (number): Initial zoom level for the visualization. base_url (string): Base URL for accessing data files. dataset_name (string, optional): Name of the dataset being visualized. trx_radius (number, optional): Initial radius for transcript points. Default: 0.25. width (number|string, optional): Width of the visualization. Default: 100%. height (number, optional): Height of the visualization. Default: 800. view_change_custom_callback (Function, optional): Custom callback triggered on view changes. "},{"location":"javascript/api/#public-api","title":"Public API","text":"The landscape_ist function returns an object (landscape) with several methods for interacting with the visualization."},{"location":"javascript/api/#update_matrix_gene","title":"update_matrix_gene","text":"Updates the visualization to highlight data for a specific gene."},{"location":"javascript/api/#parameters_1","title":"Parameters","text":" inst_gene (string): The gene to highlight. "},{"location":"javascript/api/#behavior","title":"Behavior","text":" Updates the transcript layer to show data for the specified gene. Scrolls the bar graph to bring the selected gene into view. Toggles visibility of image layers and controls based on the selected gene. "},{"location":"javascript/api/#update_matrix_col","title":"update_matrix_col","text":"Updates the visualization to highlight data for a specific column (e.g., cluster)."},{"location":"javascript/api/#parameters_2","title":"Parameters","text":" inst_col (string): The column to highlight. "},{"location":"javascript/api/#behavior_1","title":"Behavior","text":" Highlights the bar graph corresponding to the selected column. Updates cell and path layers to reflect the selected column. Toggles visibility of layers based on the column selection. "},{"location":"javascript/api/#update_matrix_dendro_col","title":"update_matrix_dendro_col","text":"Updates the visualization based on a dendrogram selection of columns."},{"location":"javascript/api/#parameters_3","title":"Parameters","text":" selected_cols (Array<string>): The list of selected column names. "},{"location":"javascript/api/#behavior_2","title":"Behavior","text":" Highlights the selected columns in the bar graph. Updates layers to reflect the selection. "},{"location":"javascript/api/#update_view_state","title":"update_view_state","text":"Updates the view state of the Deck.gl visualization."},{"location":"javascript/api/#parameters_4","title":"Parameters","text":" new_view_state (Object): The new view state configuration. close_up (boolean): Whether the view should zoom in closely. trx_layer (Object): The transcript layer to update. "},{"location":"javascript/api/#behavior_3","title":"Behavior","text":" Adjusts the viewport and reconfigures layers based on the new view state. "},{"location":"javascript/api/#update_layers","title":"update_layers","text":"Updates all visualization layers."},{"location":"javascript/api/#behavior_4","title":"Behavior","text":" Refreshes the Deck.gl layers with the current visualization state. "},{"location":"javascript/api/#finalize","title":"finalize","text":"Finalizes the Deck.gl instance and cleans up resources."},{"location":"javascript/api/#behavior_5","title":"Behavior","text":" Disposes of all Deck.gl resources and event listeners to prevent memory leaks. "},{"location":"javascript/api/#usage-example","title":"Usage Example","text":"\njavascript\nimport { landscape_ist } from 'path/to/landscape_ist';\n\nconst rootElement = document.getElementById('visualization-container');\nconst model = { /* Model containing visualization data */ };\n\nconst visualization = await landscape_ist(\n rootElement,\n model,\n 'example-token',\n 100,\n 200,\n 0,\n -5,\n 'https://example.com/data',\n 'Example Dataset'\n);\n\n// Update the visualization with a specific gene.\nvisualization.update_matrix_gene('TP53');\n\n// Update the visualization with a specific column.\nvisualization.update_matrix_col('Cluster 1');\n\n// Finalize the visualization when done.\nvisualization.finalize();\n\n"},{"location":"javascript/api/#matrix_viz-api-documentation","title":"matrix_viz API Documentation","text":"The matrix_viz function initializes and renders a matrix visualization. This API is built using approaches and code adaptations from the Clustergrammer-GL library, and it integrates tightly with Deck.gl to provide interactive and dynamic visualizations."},{"location":"javascript/api/#parameters_5","title":"Parameters","text":" model (Object): The model object containing configuration data for the visualization. el (HTMLElement): The root DOM element where the visualization is rendered. network (Object): The network object containing the matrix data to visualize. width (string|number, optional): The width of the visualization. Default: '800'. height (string|number, optional): The height of the visualization. Default: '800'. row_label_callback (Function, optional): A callback function triggered on row label interactions. col_label_callback (Function, optional): A callback function triggered on column label interactions. col_dendro_callback (Function, optional): A callback function triggered on dendrogram column interactions. "},{"location":"javascript/api/#internal-behavior","title":"Internal Behavior","text":"The function performs the following setup: 1. Deck.gl Integration: - Initializes a Deck.gl instance for the matrix visualization. - Sets properties for interactivity, including tooltips, view state changes, and layer filtering. Matrix Data Setup: Parses and structures the matrix data from the network object. Configures labels, categories, and dendrograms for both rows and columns. Layer Initialization: Creates layers for: Matrix cells. Row and column labels. Row and column categories. Row and column dendrograms. Attaches interactions (e.g., click events) to these layers. UI Setup: Creates a container for the visualization and appends it to the root DOM element. "},{"location":"javascript/api/#example-usage","title":"Example Usage","text":"import { matrix_viz } from 'path/to/matrix_viz';\n\nconst rootElement = document.getElementById('matrix-container');\nconst model = { /* Model containing visualization data */ };\nconst network = { /* Network object representing the matrix data */ };\n\n// Callback functions\nconst rowLabelCallback = (row) => {\n console.log('Row label clicked:', row);\n};\n\nconst colLabelCallback = (col) => {\n console.log('Column label clicked:', col);\n};\n\nconst colDendroCallback = (dendro) => {\n console.log('Column dendrogram clicked:', dendro);\n};\n\n// Initialize the matrix visualization\nawait matrix_viz(\n model,\n rootElement,\n network,\n 800,\n 800,\n rowLabelCallback,\n colLabelCallback,\n colDendroCallback\n);\n"},{"location":"overview/","title":"Overview","text":"The Celldega library is being developed to help researchers easily visualize and analyze high-dimensional spatial-omics data in the context of a notebook workflow. Initial development has been focused on spatial transcriptomics visualization. Celldega can be used as a Python library in a Jupyter notebook environment or as a stand-alone JavaScript library for creating visualizations. Getting Started Installation Usage "},{"location":"overview/file_formats/","title":"File Formats","text":""},{"location":"overview/file_formats/#landscapefiles","title":"LandscapeFiles","text":""},{"location":"overview/getting_started/","title":"Getting Started","text":"Celldega is a spatial analysis and visualization library that is being developed by the Spatial Technology Platform at the Broad Institute of MIT and Harvard. Celldega can be used as a Jupyter Widget in Python as well as a stand-alone JavaScript library. Please see examples notebooks below to try out Celldega in a Jupyter notebook or ObservableHQ JavaScript notebook: Celldega_Xenium_Landscape_Visualizations_Colab.ipynb Celldega Landscape Xenium ObservableHQ "},{"location":"overview/installation/","title":"Installation","text":""},{"location":"overview/installation/#python","title":"Python","text":"The Celldega library can be installed using pip # install Celldega (without vips for visualization pre-processing)\npip install celldega\n\n# install Celldega with optional pre-processing requirements\npip install celldega[pre]\n\n"},{"location":"overview/installation/#javascript","title":"JavaScript","text":"Celldega can be used in a JavaScript environment such as ObservableHQ by importing it as a module celldega = import('https://unpkg.com/celldega@' + version + '/src/celldega/static/widget.js?module')\n"},{"location":"overview/usage/","title":"Celldega Usage","text":""},{"location":"overview/usage/#terrabio","title":"Terra.bio","text":"** Coming soon **"},{"location":"python/","title":"Python API Overview","text":""},{"location":"python/#pre-module-overview","title":"Pre Module Overview","text":"The pre module contains methods for pre-processing LandscapeFiles."},{"location":"python/#viz-module-overview","title":"Viz Module Overview","text":"The viz module contains functions and classes for data visualization."},{"location":"python/api/","title":"Python API Reference","text":"Module for pre-processing to generate LandscapeFiles from ST data. Module for visualization"},{"location":"python/api/#celldega.pre.convert_long_id_to_short","title":"convert_long_id_to_short(df)","text":"Converts a column of long integer cell IDs in a DataFrame to a shorter, hash-based representation. Parameters: Name Type Description Default df DataFrame The DataFrame containing the EntityID. required Returns: pd.DataFrame: The original DataFrame with an additional column named cell_id containing the shortened cell IDs. The function applies a SHA-256 hash to each cell ID, encodes the hash using base64, and truncates it to create a shorter identifier that is added as a new column to the DataFrame. Source code in src/celldega/pre/__init__.py def convert_long_id_to_short(df):\n \"\"\"\n Converts a column of long integer cell IDs in a DataFrame to a shorter, hash-based representation.\n\n Args:\n df (pd.DataFrame): The DataFrame containing the EntityID.\n Returns:\n pd.DataFrame: The original DataFrame with an additional column named `cell_id`\n containing the shortened cell IDs.\n\n The function applies a SHA-256 hash to each cell ID, encodes the hash using base64, and truncates\n it to create a shorter identifier that is added as a new column to the DataFrame.\n \"\"\"\n # Function to hash and encode the cell ID\n def hash_and_shorten_id(cell_id):\n # Create a hash of the cell ID\n cell_id_bytes = str(cell_id).encode('utf-8')\n hash_object = hashlib.sha256(cell_id_bytes)\n hash_digest = hash_object.digest()\n\n # Encode the hash to a base64 string to mix letters and numbers, truncate to 9 characters\n short_id = base64.urlsafe_b64encode(hash_digest).decode('utf-8')[:9]\n return short_id\n\n # Apply the hash_and_shorten_id function to each cell ID in the specified column\n df['cell_id'] = df['EntityID'].apply(hash_and_shorten_id)\n\n return df\n"},{"location":"python/api/#celldega.pre.convert_to_jpeg","title":"convert_to_jpeg(image_path, quality=80)","text":"Convert a TIFF image to a JPEG image with a quality of score"},{"location":"python/api/#celldega.pre.convert_to_jpeg--parameters","title":"Parameters","text":"image_path : str Path to the image file quality : int (default=80) Quality score for the JPEG image"},{"location":"python/api/#celldega.pre.convert_to_jpeg--returns","title":"Returns","text":"new_image_path : str Path to the JPEG image file Source code in src/celldega/pre/__init__.py def convert_to_jpeg(image_path, quality=80):\n \"\"\"\n Convert a TIFF image to a JPEG image with a quality of score\n\n Parameters\n ----------\n image_path : str\n Path to the image file\n quality : int (default=80)\n Quality score for the JPEG image\n\n Returns\n -------\n new_image_path : str\n Path to the JPEG image file\n\n \"\"\"\n\n # Load the TIFF image\n image = pyvips.Image.new_from_file(image_path, access=\"sequential\")\n\n # Save the image as a JPEG with a quality of 80\n new_image_path = image_path.replace(\".tif\", \".jpeg\")\n image.jpegsave(new_image_path, Q=quality)\n\n return new_image_path\n"},{"location":"python/api/#celldega.pre.convert_to_png","title":"convert_to_png(image_path)","text":"Convert a TIFF image to a JPEG image with a quality of score"},{"location":"python/api/#celldega.pre.convert_to_png--parameters","title":"Parameters","text":"image_path : str Path to the image file quality : int (default=80) Quality score for the JPEG image"},{"location":"python/api/#celldega.pre.convert_to_png--returns","title":"Returns","text":"new_image_path : str Path to the JPEG image file Source code in src/celldega/pre/__init__.py def convert_to_png(image_path):\n \"\"\"\n Convert a TIFF image to a JPEG image with a quality of score\n\n Parameters\n ----------\n image_path : str\n Path to the image file\n quality : int (default=80)\n Quality score for the JPEG image\n\n Returns\n -------\n new_image_path : str\n Path to the JPEG image file\n\n \"\"\"\n\n # Load the TIFF image\n image = pyvips.Image.new_from_file(image_path, access=\"sequential\")\n\n # Save the image as a JPEG with a quality of 80\n new_image_path = image_path.replace(\".tif\", \".png\")\n image.pngsave(new_image_path)\n\n return new_image_path\n"},{"location":"python/api/#celldega.pre.convert_to_webp","title":"convert_to_webp(image_path, quality=100)","text":"Convert a TIFF image to a WEBP image with a specified quality score."},{"location":"python/api/#celldega.pre.convert_to_webp--parameters","title":"Parameters","text":"image_path : str Path to the image file quality : int (default=100) Quality score for the WEBP image (higher is better quality)"},{"location":"python/api/#celldega.pre.convert_to_webp--returns","title":"Returns","text":"new_image_path : str Path to the WEBP image file Source code in src/celldega/pre/__init__.py def convert_to_webp(image_path, quality=100):\n \"\"\"\n Convert a TIFF image to a WEBP image with a specified quality score.\n\n Parameters\n ----------\n image_path : str\n Path to the image file\n quality : int (default=100)\n Quality score for the WEBP image (higher is better quality)\n\n Returns\n -------\n new_image_path : str\n Path to the WEBP image file\n \"\"\"\n # Load the TIFF image\n image = pyvips.Image.new_from_file(image_path, access=\"sequential\")\n\n # Save the image as a WEBP with specified quality\n new_image_path = image_path.replace(\".tif\", \".webp\")\n image.webpsave(new_image_path, Q=quality)\n\n return new_image_path\n"},{"location":"python/api/#celldega.pre.get_max_zoom_level","title":"get_max_zoom_level(path_image_pyramid)","text":"Returns the maximum zoom level based on the highest-numbered directory in the specified path_image_pyramid. Parameters: Name Type Description Default path_image_pyramid str The path to the directory containing zoom level directories. required Returns: Name Type Description max_pyramid_zoom int The maximum zoom level. Source code in src/celldega/pre/__init__.py def get_max_zoom_level(path_image_pyramid):\n \"\"\"\n Returns the maximum zoom level based on the highest-numbered directory\n in the specified path_image_pyramid.\n\n Parameters:\n path_image_pyramid (str): The path to the directory containing zoom level directories.\n\n Returns:\n max_pyramid_zoom (int): The maximum zoom level.\n \"\"\"\n # List all entries in the path_image_pyramid that are directories and can be converted to integers\n zoom_levels = [\n entry\n for entry in os.listdir(path_image_pyramid)\n if os.path.isdir(os.path.join(path_image_pyramid, entry)) and entry.isdigit()\n ]\n\n # Convert to integer and find the maximum value\n max_pyramid_zoom = max(map(int, zoom_levels)) if zoom_levels else None\n\n return max_pyramid_zoom\n"},{"location":"python/api/#celldega.pre.make_cell_boundary_tiles","title":"make_cell_boundary_tiles(technology, path_cell_boundaries, path_meta_cell_micron, path_transformation_matrix, path_output, coarse_tile_factor=20, tile_size=250, tile_bounds=None, image_scale=1, max_workers=8)","text":"Processes cell boundary data and divides it into spatial tiles based on the provided technology. Reads cell boundary data, applies affine transformations, and divides the data into coarse and fine tiles. The resulting tiles are saved as Parquet files, each containing the geometries of cells in that tile."},{"location":"python/api/#celldega.pre.make_cell_boundary_tiles--parameters","title":"Parameters","text":"technology : str The technology used to generate the cell boundary data, e.g., \"MERSCOPE\", \"Xenium\", or \"custom\". path_cell_boundaries : str Path to the file containing the cell boundaries (Parquet format). path_meta_cell_micron : str Path to the file containing cell metadata (CSV format). path_transformation_matrix : str Path to the file containing the transformation matrix (CSV format). path_output : str Directory path where the output files (Parquet files) for each tile will be saved. coarse_tile_factor : int, optional, default=20. scaling factor of each coarse-grain tile comparing to the fine tile size. tile_size : int, optional, default=500 Size of each fine-grain tile in microns. tile_bounds : dict, optional Dictionary containing the minimum and maximum bounds for x and y coordinates. image_scale : float, optional, default=1 Scale factor to apply to the geometry data. max_workers : int, optional, default=8 Maximum number of parallel workers for processing tiles."},{"location":"python/api/#celldega.pre.make_cell_boundary_tiles--returns","title":"Returns","text":"None Source code in src/celldega/pre/boundary_tile.py def make_cell_boundary_tiles(\n technology,\n path_cell_boundaries,\n path_meta_cell_micron,\n path_transformation_matrix,\n path_output,\n coarse_tile_factor=20,\n tile_size=250,\n tile_bounds=None,\n image_scale=1,\n max_workers=8\n):\n\n\n \"\"\"\n Processes cell boundary data and divides it into spatial tiles based on the provided technology.\n Reads cell boundary data, applies affine transformations, and divides the data into coarse and fine tiles.\n The resulting tiles are saved as Parquet files, each containing the geometries of cells in that tile.\n\n Parameters\n ----------\n technology : str\n The technology used to generate the cell boundary data, e.g., \"MERSCOPE\", \"Xenium\", or \"custom\".\n path_cell_boundaries : str\n Path to the file containing the cell boundaries (Parquet format).\n path_meta_cell_micron : str\n Path to the file containing cell metadata (CSV format).\n path_transformation_matrix : str\n Path to the file containing the transformation matrix (CSV format).\n path_output : str\n Directory path where the output files (Parquet files) for each tile will be saved.\n coarse_tile_factor : int, optional, default=20.\n scaling factor of each coarse-grain tile comparing to the fine tile size.\n tile_size : int, optional, default=500\n Size of each fine-grain tile in microns.\n tile_bounds : dict, optional\n Dictionary containing the minimum and maximum bounds for x and y coordinates.\n image_scale : float, optional, default=1\n Scale factor to apply to the geometry data.\n max_workers : int, optional, default=8\n Maximum number of parallel workers for processing tiles.\n\n Returns\n -------\n None\n \"\"\"\n\n def numpy_affine_transform(coords, matrix):\n \"\"\"Apply affine transformation to numpy coordinates.\"\"\"\n # Homogeneous coordinates for affine transformation\n coords = np.hstack([coords, np.ones((coords.shape[0], 1))])\n transformed_coords = coords @ matrix.T\n return transformed_coords[:, :2] # Drop the homogeneous coordinate\n\n def batch_transform_geometries(geometries, transformation_matrix, scale):\n \"\"\"\n Batch transform geometries using numpy for optimized performance.\n \"\"\"\n # Extract affine transformation parameters into a 3x3 matrix for numpy\n affine_matrix = np.array([\n [transformation_matrix[0, 0], transformation_matrix[0, 1], transformation_matrix[0, 2]],\n [transformation_matrix[1, 0], transformation_matrix[1, 1], transformation_matrix[1, 2]],\n [0, 0, 1]\n ])\n\n transformed_geometries = []\n\n for polygon in geometries:\n # Extract coordinates and transform them\n if isinstance(polygon, MultiPolygon):\n polygon = next(polygon.geoms) # Use the first geometry\n\n # Transform the exterior of the polygon\n exterior_coords = np.array(polygon.exterior.coords)\n\n # Apply the affine transformation and scale\n transformed_coords = numpy_affine_transform(exterior_coords, affine_matrix) / scale\n\n # Append the result to the transformed_geometries list\n transformed_geometries.append([transformed_coords.tolist()])\n\n return transformed_geometries\n\n\n def filter_and_save_fine_boundary(coarse_tile, fine_i, fine_j, fine_tile_x_min, fine_tile_x_max, fine_tile_y_min, fine_tile_y_max, path_output):\n cell_ids = coarse_tile.index.values\n\n tile_filter = (\n (coarse_tile[\"center_x\"] >= fine_tile_x_min) & (coarse_tile[\"center_x\"] < fine_tile_x_max) &\n (coarse_tile[\"center_y\"] >= fine_tile_y_min) & (coarse_tile[\"center_y\"] < fine_tile_y_max)\n )\n filtered_indices = np.where(tile_filter)[0]\n\n keep_cells = cell_ids[filtered_indices]\n fine_tile_cells = coarse_tile.loc[keep_cells, [\"GEOMETRY\"]]\n fine_tile_cells = fine_tile_cells.assign(name=fine_tile_cells.index)\n\n if not fine_tile_cells.empty:\n filename = f\"{path_output}/cell_tile_{fine_i}_{fine_j}.parquet\"\n fine_tile_cells.to_parquet(filename)\n\n def process_fine_boundaries(coarse_tile, i, j, coarse_tile_x_min, coarse_tile_x_max, coarse_tile_y_min, coarse_tile_y_max, tile_size, path_output, x_min, y_min, n_fine_tiles_x, n_fine_tiles_y):\n with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:\n futures = []\n for fine_i in range(n_fine_tiles_x):\n fine_tile_x_min = x_min + fine_i * tile_size\n fine_tile_x_max = fine_tile_x_min + tile_size\n\n if not (fine_tile_x_min >= coarse_tile_x_min and fine_tile_x_max <= coarse_tile_x_max):\n continue\n\n for fine_j in range(n_fine_tiles_y):\n fine_tile_y_min = y_min + fine_j * tile_size\n fine_tile_y_max = fine_tile_y_min + tile_size\n\n if not (fine_tile_y_min >= coarse_tile_y_min and fine_tile_y_max <= coarse_tile_y_max):\n continue\n\n futures.append(executor.submit(\n filter_and_save_fine_boundary, coarse_tile, fine_i, fine_j, fine_tile_x_min, fine_tile_x_max, fine_tile_y_min, fine_tile_y_max, path_output\n ))\n\n for future in futures:\n future.result()\n\n tile_size_x = tile_size\n tile_size_y = tile_size\n\n transformation_matrix = pd.read_csv(path_transformation_matrix, header=None, sep=\" \").values\n\n # Load cell boundary data based on the technology\n if technology == \"MERSCOPE\":\n df_meta = pd.read_parquet(f\"{path_output.replace('cell_segmentation','cell_metadata.parquet')}\")\n entity_to_cell_id_dict = pd.Series(df_meta.index.values, index=df_meta.EntityID).to_dict()\n cells_orig = gpd.read_parquet(path_cell_boundaries)\n cells_orig['cell_id'] = cells_orig['EntityID'].map(entity_to_cell_id_dict)\n cells_orig = cells_orig[cells_orig[\"ZIndex\"] == 1]\n\n # Correct cell_id issues with meta_cell\n meta_cell = pd.read_csv(path_meta_cell_micron)\n meta_cell['cell_id'] = meta_cell['EntityID'].map(entity_to_cell_id_dict)\n cells_orig.index = meta_cell[meta_cell[\"cell_id\"].isin(cells_orig['cell_id'])].index\n\n # Correct 'MultiPolygon' to 'Polygon'\n cells_orig[\"geometry\"] = cells_orig[\"Geometry\"].apply(\n lambda x: list(x.geoms)[0] if isinstance(x, MultiPolygon) else x\n )\n\n cells_orig.set_index('cell_id', inplace=True)\n\n elif technology == \"Xenium\":\n xenium_cells = pd.read_parquet(path_cell_boundaries)\n grouped = xenium_cells.groupby(\"cell_id\")[[\"vertex_x\", \"vertex_y\"]].agg(lambda x: x.tolist())\n grouped[\"geometry\"] = grouped.apply(lambda row: Polygon(zip(row[\"vertex_x\"], row[\"vertex_y\"])), axis=1)\n cells_orig = gpd.GeoDataFrame(grouped, geometry=\"geometry\")[[\"geometry\"]]\n\n elif technology == \"custom\":\n cells_orig = gpd.read_parquet(path_cell_boundaries)\n\n # Transform geometries\n cells_orig[\"GEOMETRY\"] = batch_transform_geometries(cells_orig[\"geometry\"], transformation_matrix, image_scale)\n\n # Convert transformed geometries to polygons and calculate centroids\n cells_orig[\"polygon\"] = cells_orig[\"GEOMETRY\"].apply(lambda x: Polygon(x[0]))\n gdf_cells = gpd.GeoDataFrame(geometry=cells_orig[\"polygon\"])\n gdf_cells[\"center_x\"] = gdf_cells.geometry.centroid.x\n gdf_cells[\"center_y\"] = gdf_cells.geometry.centroid.y\n gdf_cells[\"GEOMETRY\"] = cells_orig[\"GEOMETRY\"]\n\n # Ensure the output directory exists\n if not os.path.exists(path_output):\n os.makedirs(path_output)\n\n # Calculate tile bounds and fine/coarse tiles\n x_min, x_max = tile_bounds[\"x_min\"], tile_bounds[\"x_max\"]\n y_min, y_max = tile_bounds[\"y_min\"], tile_bounds[\"y_max\"]\n n_fine_tiles_x = int(np.ceil((x_max - x_min) / tile_size))\n n_fine_tiles_y = int(np.ceil((y_max - y_min) / tile_size))\n n_coarse_tiles_x = int(np.ceil((x_max - x_min) / (coarse_tile_factor * tile_size)))\n n_coarse_tiles_y = int(np.ceil((y_max - y_min) / (coarse_tile_factor * tile_size)))\n\n # Process coarse tiles in parallel\n for i in tqdm(range(n_coarse_tiles_x), desc=\"Processing coarse tiles\"):\n coarse_tile_x_min = x_min + i * (coarse_tile_factor * tile_size)\n coarse_tile_x_max = coarse_tile_x_min + (coarse_tile_factor * tile_size)\n\n for j in range(n_coarse_tiles_y):\n coarse_tile_y_min = y_min + j * (coarse_tile_factor * tile_size)\n coarse_tile_y_max = coarse_tile_y_min + (coarse_tile_factor * tile_size)\n\n coarse_tile = gdf_cells[\n (gdf_cells[\"center_x\"] >= coarse_tile_x_min) & (gdf_cells[\"center_x\"] < coarse_tile_x_max) &\n (gdf_cells[\"center_y\"] >= coarse_tile_y_min) & (gdf_cells[\"center_y\"] < coarse_tile_y_max)\n ]\n if not coarse_tile.empty:\n process_fine_boundaries(coarse_tile, i, j, coarse_tile_x_min, coarse_tile_x_max, coarse_tile_y_min, coarse_tile_y_max, tile_size, path_output, x_min, y_min, n_fine_tiles_x, n_fine_tiles_y)\n"},{"location":"python/api/#celldega.pre.make_deepzoom_pyramid","title":"make_deepzoom_pyramid(image_path, output_path, pyramid_name, tile_size=512, overlap=0, suffix='.jpeg')","text":"Create a DeepZoom image pyramid from a JPEG image"},{"location":"python/api/#celldega.pre.make_deepzoom_pyramid--parameters","title":"Parameters","text":"image_path : str Path to the JPEG image file tile_size : int (default=512) Tile size for the DeepZoom pyramid overlap : int (default=0) Overlap size for the DeepZoom pyramid suffix : str (default='jpeg') Suffix for the DeepZoom pyramid tiles"},{"location":"python/api/#celldega.pre.make_deepzoom_pyramid--returns","title":"Returns","text":"None Source code in src/celldega/pre/__init__.py def make_deepzoom_pyramid(\n image_path, output_path, pyramid_name, tile_size=512, overlap=0, suffix=\".jpeg\"\n):\n \"\"\"\n Create a DeepZoom image pyramid from a JPEG image\n\n Parameters\n ----------\n image_path : str\n Path to the JPEG image file\n tile_size : int (default=512)\n Tile size for the DeepZoom pyramid\n overlap : int (default=0)\n Overlap size for the DeepZoom pyramid\n suffix : str (default='jpeg')\n Suffix for the DeepZoom pyramid tiles\n\n Returns\n -------\n None\n\n \"\"\"\n\n # Define the output path\n output_path = Path(output_path)\n\n # Load the JPEG image\n image = pyvips.Image.new_from_file(image_path, access=\"sequential\")\n\n # check if the output path exists and create it if it does not\n output_path.mkdir(parents=True, exist_ok=True)\n\n # append the pyramid name to the output path\n output_path = output_path / pyramid_name\n\n # Save the image as a DeepZoom image pyramid\n image.dzsave(output_path, tile_size=tile_size, overlap=overlap, suffix=suffix)\n"},{"location":"python/api/#celldega.pre.make_meta_cell_image_coord","title":"make_meta_cell_image_coord(technology, path_transformation_matrix, path_meta_cell_micron, path_meta_cell_image, image_scale)","text":"Apply an affine transformation to the cell coordinates in microns and save the transformed coordinates in pixels"},{"location":"python/api/#celldega.pre.make_meta_cell_image_coord--parameters","title":"Parameters","text":"technology : str The technology used to generate the data, Xenium and MERSCOPE are supported. path_transformation_matrix : str Path to the transformation matrix file path_meta_cell_micron : str Path to the meta cell file with coordinates in microns path_meta_cell_image : str Path to save the meta cell file with coordinates in pixels"},{"location":"python/api/#celldega.pre.make_meta_cell_image_coord--returns","title":"Returns","text":"None"},{"location":"python/api/#celldega.pre.make_meta_cell_image_coord--examples","title":"Examples","text":"make_meta_cell_image_coord( ... technology='Xenium', ... path_transformation_matrix='data/transformation_matrix.txt', ... path_meta_cell_micron='data/meta_cell_micron.csv', ... path_meta_cell_image='data/meta_cell_image.parquet' ... ) Source code in src/celldega/pre/__init__.py def make_meta_cell_image_coord(\n technology,\n path_transformation_matrix,\n path_meta_cell_micron,\n path_meta_cell_image,\n image_scale\n):\n \"\"\"\n Apply an affine transformation to the cell coordinates in microns and save\n the transformed coordinates in pixels\n\n Parameters\n ----------\n technology : str\n The technology used to generate the data, Xenium and MERSCOPE are supported.\n path_transformation_matrix : str\n Path to the transformation matrix file\n path_meta_cell_micron : str\n Path to the meta cell file with coordinates in microns\n path_meta_cell_image : str\n Path to save the meta cell file with coordinates in pixels\n\n Returns\n -------\n None\n\n Examples\n --------\n >>> make_meta_cell_image_coord(\n ... technology='Xenium',\n ... path_transformation_matrix='data/transformation_matrix.txt',\n ... path_meta_cell_micron='data/meta_cell_micron.csv',\n ... path_meta_cell_image='data/meta_cell_image.parquet'\n ... )\n\n \"\"\"\n\n transformation_matrix = pd.read_csv(\n path_transformation_matrix, header=None, sep=\" \"\n ).values\n\n if technology == \"MERSCOPE\":\n meta_cell = pd.read_csv(path_meta_cell_micron, usecols=[\"EntityID\", \"center_x\", \"center_y\"])\n meta_cell = convert_long_id_to_short(meta_cell)\n meta_cell[\"name\"] = meta_cell[\"cell_id\"]\n meta_cell = meta_cell.set_index('cell_id')\n elif technology == \"Xenium\":\n usecols = [\"cell_id\", \"x_centroid\", \"y_centroid\"]\n meta_cell = pd.read_csv(path_meta_cell_micron, index_col=0, usecols=usecols)\n meta_cell.columns = [\"center_x\", \"center_y\"]\n meta_cell[\"name\"] = pd.Series(meta_cell.index, index=meta_cell.index)\n\n # Adding a ones column to accommodate for affine transformation\n meta_cell[\"ones\"] = 1\n\n # Preparing the data for matrix multiplication\n points = meta_cell[[\"center_x\", \"center_y\", \"ones\"]].values\n\n # Applying the transformation matrix\n transformed_points = np.dot(transformation_matrix, points.T).T\n\n # Updating the DataFrame with transformed coordinates\n meta_cell[\"center_x\"] = transformed_points[:, 0]\n meta_cell[\"center_y\"] = transformed_points[:, 1]\n\n # Dropping the ones column as it's no longer needed\n meta_cell.drop(columns=[\"ones\"], inplace=True)\n\n meta_cell[\"center_x\"] = meta_cell[\"center_x\"] / image_scale\n meta_cell[\"center_y\"] = meta_cell[\"center_y\"] / image_scale\n\n meta_cell[\"geometry\"] = meta_cell.apply(\n lambda row: [row[\"center_x\"], row[\"center_y\"]], axis=1\n )\n\n if technology == \"MERSCOPE\":\n meta_cell = meta_cell[[\"name\", \"geometry\", \"EntityID\"]]\n else:\n meta_cell = meta_cell[[\"name\", \"geometry\"]]\n\n\n meta_cell.to_parquet(path_meta_cell_image)\n"},{"location":"python/api/#celldega.pre.make_meta_gene","title":"make_meta_gene(technology, path_cbg, path_output)","text":"Create a DataFrame with genes and their assigned colors"},{"location":"python/api/#celldega.pre.make_meta_gene--parameters","title":"Parameters","text":"technology : str The technology used to generate the data, Xenium and MERSCOPE are supported. path_cbg : str Path to the cell-by-gene matrix data (the data format can vary based on technology) path_output : str Path to save the meta gene file"},{"location":"python/api/#celldega.pre.make_meta_gene--returns","title":"Returns","text":"None"},{"location":"python/api/#celldega.pre.make_meta_gene--examples","title":"Examples","text":"make_meta_gene( ... technology='Xenium', ... path_cbg='data/', ... path_output='data/meta_gene.parquet' ... ) Source code in src/celldega/pre/__init__.py def make_meta_gene(technology, path_cbg, path_output):\n \"\"\"\n Create a DataFrame with genes and their assigned colors\n\n Parameters\n ----------\n technology : str\n The technology used to generate the data, Xenium and MERSCOPE are supported.\n path_cbg : str\n Path to the cell-by-gene matrix data (the data format can vary based on technology)\n path_output : str\n Path to save the meta gene file\n\n Returns\n -------\n None\n\n Examples\n --------\n >>> make_meta_gene(\n ... technology='Xenium',\n ... path_cbg='data/',\n ... path_output='data/meta_gene.parquet'\n ... )\n \"\"\"\n\n if technology == \"MERSCOPE\":\n cbg = pd.read_csv(path_cbg, index_col=0)\n genes = cbg.columns.tolist()\n elif technology == \"Xenium\":\n # genes = pd.read_csv(path_cbg + 'features.tsv.gz', sep='\\t', header=None)[1].values.tolist()\n cbg = read_cbg_mtx(path_cbg)\n genes = cbg.columns.tolist()\n\n # Get all categorical color palettes from Matplotlib and flatten them into a single list of colors\n palettes = [plt.get_cmap(name).colors for name in plt.colormaps() if \"tab\" in name]\n flat_colors = [color for palette in palettes for color in palette]\n\n # Convert RGB tuples to hex codes\n flat_colors_hex = [to_hex(color) for color in flat_colors]\n\n # Use modular arithmetic to assign a color to each gene, white for genes with \"Blank\"\n colors = [\n flat_colors_hex[i % len(flat_colors_hex)] if \"Blank\" not in gene else \"#FFFFFF\"\n for i, gene in enumerate(genes)\n ]\n\n # Create a DataFrame with genes and their assigned colors\n ser_color = pd.Series(colors, index=genes)\n\n # calculate gene expression metadata\n meta_gene = calc_meta_gene_data(cbg)\n meta_gene['color'] = ser_color\n\n # Identify sparse columns\n sparse_cols = [col for col in meta_gene.columns if pd.api.types.is_sparse(meta_gene[col])]\n\n # Convert sparse columns to dense\n for col in sparse_cols:\n meta_gene[col] = meta_gene[col].sparse.to_dense()\n\n meta_gene.to_parquet(path_output)\n"},{"location":"python/api/#celldega.pre.make_trx_tiles","title":"make_trx_tiles(technology, path_trx, path_transformation_matrix, path_trx_tiles, coarse_tile_factor=10, tile_size=250, chunk_size=1000000, verbose=False, image_scale=1, max_workers=8)","text":"Processes transcript data by dividing it into coarse-grain and fine-grain tiles, applying transformations, and saving the results in a parallelized manner."},{"location":"python/api/#celldega.pre.make_trx_tiles--parameters","title":"Parameters","text":"technology : str The technology used for generating the transcript data (e.g., \"MERSCOPE\" or \"Xenium\"). path_trx : str Path to the file containing the transcript data. path_transformation_matrix : str Path to the file containing the transformation matrix (CSV file). path_trx_tiles : str Directory path where the output files (Parquet files) for each tile will be saved. coarse_tile_factor : int, optional Scaling factor of each coarse-grain tile comparing to the fine tile size. tile_size : int, optional Size of each fine-grain tile in microns (default is 250). chunk_size : int, optional Number of rows to process per chunk for memory efficiency (default is 1000000). verbose : bool, optional Flag to enable verbose output (default is False). image_scale : float, optional Scale factor to apply to the transcript coordinates (default is 0.5). max_workers : int, optional Maximum number of parallel workers for processing tiles (default is 8)."},{"location":"python/api/#celldega.pre.make_trx_tiles--returns","title":"Returns","text":"dict A dictionary containing the bounds of the processed data in both x and y directions. Source code in src/celldega/pre/trx_tile.py def make_trx_tiles(\n technology,\n path_trx,\n path_transformation_matrix,\n path_trx_tiles,\n coarse_tile_factor=10,\n tile_size=250,\n chunk_size=1000000,\n verbose=False,\n image_scale=1,\n max_workers=8\n):\n \"\"\"\n Processes transcript data by dividing it into coarse-grain and fine-grain tiles,\n applying transformations, and saving the results in a parallelized manner.\n\n Parameters\n ----------\n technology : str\n The technology used for generating the transcript data (e.g., \"MERSCOPE\" or \"Xenium\").\n path_trx : str\n Path to the file containing the transcript data.\n path_transformation_matrix : str\n Path to the file containing the transformation matrix (CSV file).\n path_trx_tiles : str\n Directory path where the output files (Parquet files) for each tile will be saved.\n coarse_tile_factor : int, optional\n Scaling factor of each coarse-grain tile comparing to the fine tile size.\n tile_size : int, optional\n Size of each fine-grain tile in microns (default is 250).\n chunk_size : int, optional\n Number of rows to process per chunk for memory efficiency (default is 1000000).\n verbose : bool, optional\n Flag to enable verbose output (default is False).\n image_scale : float, optional\n Scale factor to apply to the transcript coordinates (default is 0.5).\n max_workers : int, optional\n Maximum number of parallel workers for processing tiles (default is 8).\n\n Returns\n -------\n dict\n A dictionary containing the bounds of the processed data in both x and y directions.\n \"\"\"\n\n def process_coarse_tile(trx, i, j, coarse_tile_x_min, coarse_tile_x_max, coarse_tile_y_min, coarse_tile_y_max, tile_size, path_trx_tiles, x_min, y_min, n_fine_tiles_x, n_fine_tiles_y, max_workers):\n # Filter the entire dataset for the current coarse tile\n coarse_tile = trx.filter(\n (pl.col(\"transformed_x\") >= coarse_tile_x_min) & (pl.col(\"transformed_x\") < coarse_tile_x_max) &\n (pl.col(\"transformed_y\") >= coarse_tile_y_min) & (pl.col(\"transformed_y\") < coarse_tile_y_max)\n )\n\n if not coarse_tile.is_empty():\n # Now process fine tiles using global fine tile indices\n process_fine_tiles(coarse_tile, i, j, coarse_tile_x_min, coarse_tile_x_max, coarse_tile_y_min, coarse_tile_y_max, tile_size, path_trx_tiles, x_min, y_min, n_fine_tiles_x, n_fine_tiles_y, max_workers) \n\n\n def process_fine_tiles(coarse_tile, coarse_i, coarse_j, coarse_tile_x_min, coarse_tile_x_max, coarse_tile_y_min, coarse_tile_y_max, tile_size, path_trx_tiles, x_min, y_min, n_fine_tiles_x, n_fine_tiles_y, max_workers=8):\n\n # Use ThreadPoolExecutor for parallel processing of fine-grain tiles within the coarse tile\n with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:\n futures = []\n\n # Iterate over fine-grain tiles within the global bounds\n for fine_i in range(n_fine_tiles_x):\n fine_tile_x_min = x_min + fine_i * tile_size\n fine_tile_x_max = fine_tile_x_min + tile_size\n\n # Process only if the fine tile falls within the current coarse tile's bounds\n if not (fine_tile_x_min >= coarse_tile_x_min and fine_tile_x_max <= coarse_tile_x_max):\n continue\n\n for fine_j in range(n_fine_tiles_y):\n fine_tile_y_min = y_min + fine_j * tile_size\n fine_tile_y_max = fine_tile_y_min + tile_size\n\n # Process only if the fine tile falls within the current coarse tile's bounds\n if not (fine_tile_y_min >= coarse_tile_y_min and fine_tile_y_max <= coarse_tile_y_max):\n continue\n\n # Submit the task for each fine tile to process in parallel\n futures.append(executor.submit(\n filter_and_save_fine_tile, coarse_tile, coarse_i, coarse_j, fine_i, fine_j, \n fine_tile_x_min, fine_tile_x_max, fine_tile_y_min, fine_tile_y_max, path_trx_tiles\n ))\n\n # Wait for all futures to complete\n for future in concurrent.futures.as_completed(futures):\n future.result() # Raise exceptions if any occurred during execution\n\n\n def filter_and_save_fine_tile(coarse_tile, coarse_i, coarse_j, fine_i, fine_j, fine_tile_x_min, fine_tile_x_max, fine_tile_y_min, fine_tile_y_max, path_trx_tiles):\n\n # Filter the coarse tile for the current fine tile's boundaries\n fine_tile_trx = coarse_tile.filter(\n (pl.col(\"transformed_x\") >= fine_tile_x_min) & (pl.col(\"transformed_x\") < fine_tile_x_max) &\n (pl.col(\"transformed_y\") >= fine_tile_y_min) & (pl.col(\"transformed_y\") < fine_tile_y_max)\n )\n\n if not fine_tile_trx.is_empty():\n # Add geometry column as a list of [x, y] pairs\n fine_tile_trx = fine_tile_trx.with_columns(\n pl.concat_list([pl.col(\"transformed_x\"), pl.col(\"transformed_y\")]).alias(\"geometry\")\n ).drop(['transformed_x', 'transformed_y'])\n\n # Define the filename based on fine tile coordinates\n filename = f\"{path_trx_tiles}/transcripts_tile_{fine_i}_{fine_j}.parquet\"\n\n # Save the filtered DataFrame to a Parquet file\n fine_tile_trx.to_pandas().to_parquet(filename)\n\n\n # Load transformation matrix\n transformation_matrix = np.loadtxt(path_transformation_matrix)\n\n # Load the transcript data based on the technology using Polars\n if technology == \"MERSCOPE\":\n trx_ini = pl.read_csv(path_trx, columns=[\"gene\", \"global_x\", \"global_y\"])\n trx_ini = trx_ini.with_columns([\n pl.col(\"global_x\").alias(\"x\"),\n pl.col(\"global_y\").alias(\"y\"),\n pl.col(\"gene\").alias(\"name\")\n ]).select([\"name\", \"x\", \"y\"])\n\n elif technology == \"Xenium\":\n trx_ini = pl.read_parquet(path_trx).select([\n pl.col(\"feature_name\").alias(\"name\"),\n pl.col(\"x_location\").alias(\"x\"),\n pl.col(\"y_location\").alias(\"y\")\n ])\n\n # Process the data in chunks and apply transformations\n all_chunks = []\n\n for start_row in tqdm(range(0, trx_ini.height, chunk_size), desc=\"Processing chunks\"):\n chunk = trx_ini.slice(start_row, chunk_size)\n\n # Apply transformation matrix to the coordinates\n points = np.hstack([chunk.select([\"x\", \"y\"]).to_numpy(), np.ones((chunk.height, 1))])\n transformed_points = np.dot(points, transformation_matrix.T)[:, :2]\n\n # Create new transformed columns and drop original x, y columns\n transformed_chunk = chunk.with_columns([\n (pl.Series(transformed_points[:, 0]) * image_scale).round(2).alias(\"transformed_x\"),\n (pl.Series(transformed_points[:, 1]) * image_scale).round(2).alias(\"transformed_y\")\n ]).drop([\"x\", \"y\"])\n all_chunks.append(transformed_chunk)\n\n # Concatenate all chunks after processing\n trx = pl.concat(all_chunks)\n\n # Ensure the output directory exists\n if not os.path.exists(path_trx_tiles):\n os.makedirs(path_trx_tiles)\n\n # Get min and max x, y values\n x_min, x_max = trx.select([\n pl.col(\"transformed_x\").min().alias(\"x_min\"),\n pl.col(\"transformed_x\").max().alias(\"x_max\")\n ]).row(0)\n\n y_min, y_max = trx.select([\n pl.col(\"transformed_y\").min().alias(\"y_min\"),\n pl.col(\"transformed_y\").max().alias(\"y_max\")\n ]).row(0)\n\n # Calculate the number of fine-grain tiles globally\n n_fine_tiles_x = int(np.ceil((x_max - x_min) / tile_size))\n n_fine_tiles_y = int(np.ceil((y_max - y_min) / tile_size))\n\n # Calculate the number of coarse-grain tiles\n n_coarse_tiles_x = int(np.ceil((x_max - x_min) / (coarse_tile_factor * tile_size)))\n n_coarse_tiles_y = int(np.ceil((y_max - y_min) / (coarse_tile_factor * tile_size)))\n\n # Use ThreadPoolExecutor for parallel processing of coarse-grain tiles\n with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:\n futures = []\n for i in range(n_coarse_tiles_x):\n coarse_tile_x_min = x_min + i * (coarse_tile_factor * tile_size)\n coarse_tile_x_max = coarse_tile_x_min + (coarse_tile_factor * tile_size)\n\n for j in range(n_coarse_tiles_y):\n coarse_tile_y_min = y_min + j * (coarse_tile_factor * tile_size)\n coarse_tile_y_max = coarse_tile_y_min + (coarse_tile_factor * tile_size)\n\n # Submit each coarse tile for parallel processing\n futures.append(executor.submit(\n process_coarse_tile, trx, i, j, coarse_tile_x_min, coarse_tile_x_max, coarse_tile_y_min, coarse_tile_y_max, tile_size, path_trx_tiles, x_min, y_min, n_fine_tiles_x, n_fine_tiles_y, max_workers\n ))\n\n # Wait for all coarse tiles to complete\n for future in tqdm(concurrent.futures.as_completed(futures), desc=\"Processing coarse tiles\", unit=\"tile\"):\n future.result() # Raise exceptions if any occurred during execution\n\n # Return the tile bounds\n tile_bounds = {\n \"x_min\": x_min,\n \"x_max\": x_max,\n \"y_min\": y_min,\n \"y_max\": y_max,\n }\n\n return tile_bounds\n"},{"location":"python/api/#celldega.pre.reduce_image_size","title":"reduce_image_size(image_path, scale_image=0.5, path_landscape_files='')","text":""},{"location":"python/api/#celldega.pre.reduce_image_size--parameters","title":"Parameters","text":"image_path : str Path to the image file scale_image : float (default=0.5) Scale factor for the image resize"},{"location":"python/api/#celldega.pre.reduce_image_size--returns","title":"Returns","text":"new_image_path : str Path to the resized image file Source code in src/celldega/pre/__init__.py def reduce_image_size(image_path, scale_image=0.5, path_landscape_files=\"\"):\n \"\"\"\n\n Parameters\n ----------\n image_path : str\n Path to the image file\n scale_image : float (default=0.5)\n Scale factor for the image resize\n\n Returns\n -------\n new_image_path : str\n Path to the resized image file\n \"\"\"\n\n image = pyvips.Image.new_from_file(image_path, access=\"sequential\")\n\n resized_image = image.resize(scale_image)\n\n new_image_name = image_path.split(\"/\")[-1].replace(\".tif\", \"_downsize.tif\")\n new_image_path = f\"{path_landscape_files}/{new_image_name}\"\n resized_image.write_to_file(new_image_path)\n\n return new_image_path\n"},{"location":"python/api/#celldega.pre.save_landscape_parameters","title":"save_landscape_parameters(technology, path_landscape_files, image_name='dapi_files', tile_size=1000, image_info={}, image_format='.webp')","text":"Save the landscape parameters to a JSON file. Source code in src/celldega/pre/__init__.py def save_landscape_parameters(\n technology, path_landscape_files, image_name=\"dapi_files\", tile_size=1000, image_info={}, image_format='.webp'\n):\n \"\"\"\n Save the landscape parameters to a JSON file.\n \"\"\"\n\n path_image_pyramid = f\"{path_landscape_files}/pyramid_images/{image_name}\"\n\n print(path_image_pyramid)\n\n max_pyramid_zoom = get_max_zoom_level(path_image_pyramid)\n\n landscape_parameters = {\n \"technology\": technology,\n \"max_pyramid_zoom\": max_pyramid_zoom,\n \"tile_size\": tile_size,\n \"image_info\": image_info,\n \"image_format\": image_format\n }\n\n path_landscape_parameters = f\"{path_landscape_files}/landscape_parameters.json\"\n\n with open(path_landscape_parameters, \"w\") as file:\n json.dump(landscape_parameters, file, indent=4)\n"},{"location":"python/api/#celldega.viz.Landscape","title":"Landscape","text":" Bases: AnyWidget A widget for interactive visualization of spatial omics data. This widget currently supports iST (Xenium and MERSCOPE) and sST (Visium HD data) Parameters: Name Type Description Default ini_x float The initial x-coordinate of the view. required ini_y float The initial y-coordinate of the view. required ini_zoom float The initial zoom level of the view. required token str The token traitlet. required base_url str The base URL for the widget. required dataset_name str The name of the dataset to visualize. This will show up in the user interface bar. required Attributes: Name Type Description component str The name of the component. technology str The technology used. base_url str The base URL for the widget. token str The token traitlet. ini_x float The initial x-coordinate of the view. ini_y float The initial y-coordinate of the view. ini_z float The initial z-coordinate of the view. ini_zoom float The initial zoom level of the view. dataset_name str The name of the dataset to visualize. update_trigger dict The dictionary to trigger updates. cell_clusters dict The dictionary containing cell cluster information. Returns: Name Type Description Landscape A widget for visualizing a 'landscape' view of spatial omics data. Source code in src/celldega/viz/widget.py class Landscape(anywidget.AnyWidget):\n \"\"\"\n A widget for interactive visualization of spatial omics data. This widget\n currently supports iST (Xenium and MERSCOPE) and sST (Visium HD data)\n\n Args:\n ini_x (float): The initial x-coordinate of the view.\n ini_y (float): The initial y-coordinate of the view.\n ini_zoom (float): The initial zoom level of the view.\n token (str): The token traitlet.\n base_url (str): The base URL for the widget.\n dataset_name (str, optional): The name of the dataset to visualize. This will show up in the user interface bar.\n\n Attributes:\n component (str): The name of the component.\n technology (str): The technology used.\n base_url (str): The base URL for the widget.\n token (str): The token traitlet.\n ini_x (float): The initial x-coordinate of the view.\n ini_y (float): The initial y-coordinate of the view.\n ini_z (float): The initial z-coordinate of the view.\n ini_zoom (float): The initial zoom level of the view.\n dataset_name (str): The name of the dataset to visualize.\n update_trigger (dict): The dictionary to trigger updates.\n cell_clusters (dict): The dictionary containing cell cluster information.\n\n Returns:\n Landscape: A widget for visualizing a 'landscape' view of spatial omics data.\n \"\"\"\n _esm = pathlib.Path(__file__).parent / \"../static\" / \"widget.js\"\n _css = pathlib.Path(__file__).parent / \"../static\" / \"widget.css\"\n component = traitlets.Unicode(\"Landscape\").tag(sync=True)\n\n technology = traitlets.Unicode(\"sst\").tag(sync=True)\n base_url = traitlets.Unicode(\"\").tag(sync=True)\n token = traitlets.Unicode(\"\").tag(sync=True)\n ini_x = traitlets.Float(1000).tag(sync=True)\n ini_y = traitlets.Float(1000).tag(sync=True)\n ini_z = traitlets.Float(0).tag(sync=True)\n ini_zoom = traitlets.Float(0).tag(sync=True)\n square_tile_size = traitlets.Float(1.4).tag(sync=True)\n dataset_name = traitlets.Unicode(\"\").tag(sync=True)\n region = traitlets.Dict({}).tag(sync=True)\n\n update_trigger = traitlets.Dict().tag(sync=True)\n cell_clusters = traitlets.Dict().tag(sync=True)\n\n width = traitlets.Int(0).tag(sync=True)\n height = traitlets.Int(800).tag(sync=True)\n\n def trigger_update(self, new_value):\n # This method updates the update_trigger traitlet with a new value\n # You can pass any information necessary for the update, or just a timestamp\n self.update_trigger = new_value\n\n def update_cell_clusters(self, new_clusters):\n # Convert the new_clusters to a JSON serializable format if necessary\n self.cell_clusters = new_clusters\n"},{"location":"python/api/#celldega.viz.Matrix","title":"Matrix","text":" Bases: AnyWidget A widget for interactive visualization of a hierarchically clustered matrix. Parameters: Name Type Description Default value int The value traitlet. required component str The component traitlet. required network dict The network traitlet. required click_info dict The click_info traitlet. required Attributes: Name Type Description component str The name of the component. network dict The network dictionary. click_info dict The click_info dictionary. Returns: Name Type Description Matrix A widget for visualizing a hierarchically clustered matrix. Source code in src/celldega/viz/widget.py class Matrix(anywidget.AnyWidget):\n \"\"\"\n A widget for interactive visualization of a hierarchically clustered matrix.\n\n Args:\n value (int): The value traitlet.\n component (str): The component traitlet.\n network (dict): The network traitlet.\n click_info (dict): The click_info traitlet.\n\n Attributes:\n component (str): The name of the component.\n network (dict): The network dictionary.\n click_info (dict): The click_info dictionary.\n\n Returns:\n Matrix: A widget for visualizing a hierarchically clustered matrix.\n \"\"\"\n _esm = pathlib.Path(__file__).parent / \"../static\" / \"widget.js\"\n _css = pathlib.Path(__file__).parent / \"../static\" / \"widget.css\"\n value = traitlets.Int(0).tag(sync=True)\n component = traitlets.Unicode(\"Matrix\").tag(sync=True)\n\n network = traitlets.Dict({}).tag(sync=True)\n click_info = traitlets.Dict({}).tag(sync=True)\n"},{"location":"python/pre/api/","title":"Pre Module API Reference","text":"Module for pre-processing to generate LandscapeFiles from ST data."},{"location":"python/pre/api/#celldega.pre.convert_long_id_to_short","title":"convert_long_id_to_short(df)","text":"Converts a column of long integer cell IDs in a DataFrame to a shorter, hash-based representation. Parameters: Name Type Description Default df DataFrame The DataFrame containing the EntityID. required Returns: pd.DataFrame: The original DataFrame with an additional column named cell_id containing the shortened cell IDs. The function applies a SHA-256 hash to each cell ID, encodes the hash using base64, and truncates it to create a shorter identifier that is added as a new column to the DataFrame. Source code in src/celldega/pre/__init__.py def convert_long_id_to_short(df):\n \"\"\"\n Converts a column of long integer cell IDs in a DataFrame to a shorter, hash-based representation.\n\n Args:\n df (pd.DataFrame): The DataFrame containing the EntityID.\n Returns:\n pd.DataFrame: The original DataFrame with an additional column named `cell_id`\n containing the shortened cell IDs.\n\n The function applies a SHA-256 hash to each cell ID, encodes the hash using base64, and truncates\n it to create a shorter identifier that is added as a new column to the DataFrame.\n \"\"\"\n # Function to hash and encode the cell ID\n def hash_and_shorten_id(cell_id):\n # Create a hash of the cell ID\n cell_id_bytes = str(cell_id).encode('utf-8')\n hash_object = hashlib.sha256(cell_id_bytes)\n hash_digest = hash_object.digest()\n\n # Encode the hash to a base64 string to mix letters and numbers, truncate to 9 characters\n short_id = base64.urlsafe_b64encode(hash_digest).decode('utf-8')[:9]\n return short_id\n\n # Apply the hash_and_shorten_id function to each cell ID in the specified column\n df['cell_id'] = df['EntityID'].apply(hash_and_shorten_id)\n\n return df\n"},{"location":"python/pre/api/#celldega.pre.convert_to_jpeg","title":"convert_to_jpeg(image_path, quality=80)","text":"Convert a TIFF image to a JPEG image with a quality of score"},{"location":"python/pre/api/#celldega.pre.convert_to_jpeg--parameters","title":"Parameters","text":"image_path : str Path to the image file quality : int (default=80) Quality score for the JPEG image"},{"location":"python/pre/api/#celldega.pre.convert_to_jpeg--returns","title":"Returns","text":"new_image_path : str Path to the JPEG image file Source code in src/celldega/pre/__init__.py def convert_to_jpeg(image_path, quality=80):\n \"\"\"\n Convert a TIFF image to a JPEG image with a quality of score\n\n Parameters\n ----------\n image_path : str\n Path to the image file\n quality : int (default=80)\n Quality score for the JPEG image\n\n Returns\n -------\n new_image_path : str\n Path to the JPEG image file\n\n \"\"\"\n\n # Load the TIFF image\n image = pyvips.Image.new_from_file(image_path, access=\"sequential\")\n\n # Save the image as a JPEG with a quality of 80\n new_image_path = image_path.replace(\".tif\", \".jpeg\")\n image.jpegsave(new_image_path, Q=quality)\n\n return new_image_path\n"},{"location":"python/pre/api/#celldega.pre.convert_to_png","title":"convert_to_png(image_path)","text":"Convert a TIFF image to a JPEG image with a quality of score"},{"location":"python/pre/api/#celldega.pre.convert_to_png--parameters","title":"Parameters","text":"image_path : str Path to the image file quality : int (default=80) Quality score for the JPEG image"},{"location":"python/pre/api/#celldega.pre.convert_to_png--returns","title":"Returns","text":"new_image_path : str Path to the JPEG image file Source code in src/celldega/pre/__init__.py def convert_to_png(image_path):\n \"\"\"\n Convert a TIFF image to a JPEG image with a quality of score\n\n Parameters\n ----------\n image_path : str\n Path to the image file\n quality : int (default=80)\n Quality score for the JPEG image\n\n Returns\n -------\n new_image_path : str\n Path to the JPEG image file\n\n \"\"\"\n\n # Load the TIFF image\n image = pyvips.Image.new_from_file(image_path, access=\"sequential\")\n\n # Save the image as a JPEG with a quality of 80\n new_image_path = image_path.replace(\".tif\", \".png\")\n image.pngsave(new_image_path)\n\n return new_image_path\n"},{"location":"python/pre/api/#celldega.pre.convert_to_webp","title":"convert_to_webp(image_path, quality=100)","text":"Convert a TIFF image to a WEBP image with a specified quality score."},{"location":"python/pre/api/#celldega.pre.convert_to_webp--parameters","title":"Parameters","text":"image_path : str Path to the image file quality : int (default=100) Quality score for the WEBP image (higher is better quality)"},{"location":"python/pre/api/#celldega.pre.convert_to_webp--returns","title":"Returns","text":"new_image_path : str Path to the WEBP image file Source code in src/celldega/pre/__init__.py def convert_to_webp(image_path, quality=100):\n \"\"\"\n Convert a TIFF image to a WEBP image with a specified quality score.\n\n Parameters\n ----------\n image_path : str\n Path to the image file\n quality : int (default=100)\n Quality score for the WEBP image (higher is better quality)\n\n Returns\n -------\n new_image_path : str\n Path to the WEBP image file\n \"\"\"\n # Load the TIFF image\n image = pyvips.Image.new_from_file(image_path, access=\"sequential\")\n\n # Save the image as a WEBP with specified quality\n new_image_path = image_path.replace(\".tif\", \".webp\")\n image.webpsave(new_image_path, Q=quality)\n\n return new_image_path\n"},{"location":"python/pre/api/#celldega.pre.get_max_zoom_level","title":"get_max_zoom_level(path_image_pyramid)","text":"Returns the maximum zoom level based on the highest-numbered directory in the specified path_image_pyramid. Parameters: Name Type Description Default path_image_pyramid str The path to the directory containing zoom level directories. required Returns: Name Type Description max_pyramid_zoom int The maximum zoom level. Source code in src/celldega/pre/__init__.py def get_max_zoom_level(path_image_pyramid):\n \"\"\"\n Returns the maximum zoom level based on the highest-numbered directory\n in the specified path_image_pyramid.\n\n Parameters:\n path_image_pyramid (str): The path to the directory containing zoom level directories.\n\n Returns:\n max_pyramid_zoom (int): The maximum zoom level.\n \"\"\"\n # List all entries in the path_image_pyramid that are directories and can be converted to integers\n zoom_levels = [\n entry\n for entry in os.listdir(path_image_pyramid)\n if os.path.isdir(os.path.join(path_image_pyramid, entry)) and entry.isdigit()\n ]\n\n # Convert to integer and find the maximum value\n max_pyramid_zoom = max(map(int, zoom_levels)) if zoom_levels else None\n\n return max_pyramid_zoom\n"},{"location":"python/pre/api/#celldega.pre.make_cell_boundary_tiles","title":"make_cell_boundary_tiles(technology, path_cell_boundaries, path_meta_cell_micron, path_transformation_matrix, path_output, coarse_tile_factor=20, tile_size=250, tile_bounds=None, image_scale=1, max_workers=8)","text":"Processes cell boundary data and divides it into spatial tiles based on the provided technology. Reads cell boundary data, applies affine transformations, and divides the data into coarse and fine tiles. The resulting tiles are saved as Parquet files, each containing the geometries of cells in that tile."},{"location":"python/pre/api/#celldega.pre.make_cell_boundary_tiles--parameters","title":"Parameters","text":"technology : str The technology used to generate the cell boundary data, e.g., \"MERSCOPE\", \"Xenium\", or \"custom\". path_cell_boundaries : str Path to the file containing the cell boundaries (Parquet format). path_meta_cell_micron : str Path to the file containing cell metadata (CSV format). path_transformation_matrix : str Path to the file containing the transformation matrix (CSV format). path_output : str Directory path where the output files (Parquet files) for each tile will be saved. coarse_tile_factor : int, optional, default=20. scaling factor of each coarse-grain tile comparing to the fine tile size. tile_size : int, optional, default=500 Size of each fine-grain tile in microns. tile_bounds : dict, optional Dictionary containing the minimum and maximum bounds for x and y coordinates. image_scale : float, optional, default=1 Scale factor to apply to the geometry data. max_workers : int, optional, default=8 Maximum number of parallel workers for processing tiles."},{"location":"python/pre/api/#celldega.pre.make_cell_boundary_tiles--returns","title":"Returns","text":"None Source code in src/celldega/pre/boundary_tile.py def make_cell_boundary_tiles(\n technology,\n path_cell_boundaries,\n path_meta_cell_micron,\n path_transformation_matrix,\n path_output,\n coarse_tile_factor=20,\n tile_size=250,\n tile_bounds=None,\n image_scale=1,\n max_workers=8\n):\n\n\n \"\"\"\n Processes cell boundary data and divides it into spatial tiles based on the provided technology.\n Reads cell boundary data, applies affine transformations, and divides the data into coarse and fine tiles.\n The resulting tiles are saved as Parquet files, each containing the geometries of cells in that tile.\n\n Parameters\n ----------\n technology : str\n The technology used to generate the cell boundary data, e.g., \"MERSCOPE\", \"Xenium\", or \"custom\".\n path_cell_boundaries : str\n Path to the file containing the cell boundaries (Parquet format).\n path_meta_cell_micron : str\n Path to the file containing cell metadata (CSV format).\n path_transformation_matrix : str\n Path to the file containing the transformation matrix (CSV format).\n path_output : str\n Directory path where the output files (Parquet files) for each tile will be saved.\n coarse_tile_factor : int, optional, default=20.\n scaling factor of each coarse-grain tile comparing to the fine tile size.\n tile_size : int, optional, default=500\n Size of each fine-grain tile in microns.\n tile_bounds : dict, optional\n Dictionary containing the minimum and maximum bounds for x and y coordinates.\n image_scale : float, optional, default=1\n Scale factor to apply to the geometry data.\n max_workers : int, optional, default=8\n Maximum number of parallel workers for processing tiles.\n\n Returns\n -------\n None\n \"\"\"\n\n def numpy_affine_transform(coords, matrix):\n \"\"\"Apply affine transformation to numpy coordinates.\"\"\"\n # Homogeneous coordinates for affine transformation\n coords = np.hstack([coords, np.ones((coords.shape[0], 1))])\n transformed_coords = coords @ matrix.T\n return transformed_coords[:, :2] # Drop the homogeneous coordinate\n\n def batch_transform_geometries(geometries, transformation_matrix, scale):\n \"\"\"\n Batch transform geometries using numpy for optimized performance.\n \"\"\"\n # Extract affine transformation parameters into a 3x3 matrix for numpy\n affine_matrix = np.array([\n [transformation_matrix[0, 0], transformation_matrix[0, 1], transformation_matrix[0, 2]],\n [transformation_matrix[1, 0], transformation_matrix[1, 1], transformation_matrix[1, 2]],\n [0, 0, 1]\n ])\n\n transformed_geometries = []\n\n for polygon in geometries:\n # Extract coordinates and transform them\n if isinstance(polygon, MultiPolygon):\n polygon = next(polygon.geoms) # Use the first geometry\n\n # Transform the exterior of the polygon\n exterior_coords = np.array(polygon.exterior.coords)\n\n # Apply the affine transformation and scale\n transformed_coords = numpy_affine_transform(exterior_coords, affine_matrix) / scale\n\n # Append the result to the transformed_geometries list\n transformed_geometries.append([transformed_coords.tolist()])\n\n return transformed_geometries\n\n\n def filter_and_save_fine_boundary(coarse_tile, fine_i, fine_j, fine_tile_x_min, fine_tile_x_max, fine_tile_y_min, fine_tile_y_max, path_output):\n cell_ids = coarse_tile.index.values\n\n tile_filter = (\n (coarse_tile[\"center_x\"] >= fine_tile_x_min) & (coarse_tile[\"center_x\"] < fine_tile_x_max) &\n (coarse_tile[\"center_y\"] >= fine_tile_y_min) & (coarse_tile[\"center_y\"] < fine_tile_y_max)\n )\n filtered_indices = np.where(tile_filter)[0]\n\n keep_cells = cell_ids[filtered_indices]\n fine_tile_cells = coarse_tile.loc[keep_cells, [\"GEOMETRY\"]]\n fine_tile_cells = fine_tile_cells.assign(name=fine_tile_cells.index)\n\n if not fine_tile_cells.empty:\n filename = f\"{path_output}/cell_tile_{fine_i}_{fine_j}.parquet\"\n fine_tile_cells.to_parquet(filename)\n\n def process_fine_boundaries(coarse_tile, i, j, coarse_tile_x_min, coarse_tile_x_max, coarse_tile_y_min, coarse_tile_y_max, tile_size, path_output, x_min, y_min, n_fine_tiles_x, n_fine_tiles_y):\n with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:\n futures = []\n for fine_i in range(n_fine_tiles_x):\n fine_tile_x_min = x_min + fine_i * tile_size\n fine_tile_x_max = fine_tile_x_min + tile_size\n\n if not (fine_tile_x_min >= coarse_tile_x_min and fine_tile_x_max <= coarse_tile_x_max):\n continue\n\n for fine_j in range(n_fine_tiles_y):\n fine_tile_y_min = y_min + fine_j * tile_size\n fine_tile_y_max = fine_tile_y_min + tile_size\n\n if not (fine_tile_y_min >= coarse_tile_y_min and fine_tile_y_max <= coarse_tile_y_max):\n continue\n\n futures.append(executor.submit(\n filter_and_save_fine_boundary, coarse_tile, fine_i, fine_j, fine_tile_x_min, fine_tile_x_max, fine_tile_y_min, fine_tile_y_max, path_output\n ))\n\n for future in futures:\n future.result()\n\n tile_size_x = tile_size\n tile_size_y = tile_size\n\n transformation_matrix = pd.read_csv(path_transformation_matrix, header=None, sep=\" \").values\n\n # Load cell boundary data based on the technology\n if technology == \"MERSCOPE\":\n df_meta = pd.read_parquet(f\"{path_output.replace('cell_segmentation','cell_metadata.parquet')}\")\n entity_to_cell_id_dict = pd.Series(df_meta.index.values, index=df_meta.EntityID).to_dict()\n cells_orig = gpd.read_parquet(path_cell_boundaries)\n cells_orig['cell_id'] = cells_orig['EntityID'].map(entity_to_cell_id_dict)\n cells_orig = cells_orig[cells_orig[\"ZIndex\"] == 1]\n\n # Correct cell_id issues with meta_cell\n meta_cell = pd.read_csv(path_meta_cell_micron)\n meta_cell['cell_id'] = meta_cell['EntityID'].map(entity_to_cell_id_dict)\n cells_orig.index = meta_cell[meta_cell[\"cell_id\"].isin(cells_orig['cell_id'])].index\n\n # Correct 'MultiPolygon' to 'Polygon'\n cells_orig[\"geometry\"] = cells_orig[\"Geometry\"].apply(\n lambda x: list(x.geoms)[0] if isinstance(x, MultiPolygon) else x\n )\n\n cells_orig.set_index('cell_id', inplace=True)\n\n elif technology == \"Xenium\":\n xenium_cells = pd.read_parquet(path_cell_boundaries)\n grouped = xenium_cells.groupby(\"cell_id\")[[\"vertex_x\", \"vertex_y\"]].agg(lambda x: x.tolist())\n grouped[\"geometry\"] = grouped.apply(lambda row: Polygon(zip(row[\"vertex_x\"], row[\"vertex_y\"])), axis=1)\n cells_orig = gpd.GeoDataFrame(grouped, geometry=\"geometry\")[[\"geometry\"]]\n\n elif technology == \"custom\":\n cells_orig = gpd.read_parquet(path_cell_boundaries)\n\n # Transform geometries\n cells_orig[\"GEOMETRY\"] = batch_transform_geometries(cells_orig[\"geometry\"], transformation_matrix, image_scale)\n\n # Convert transformed geometries to polygons and calculate centroids\n cells_orig[\"polygon\"] = cells_orig[\"GEOMETRY\"].apply(lambda x: Polygon(x[0]))\n gdf_cells = gpd.GeoDataFrame(geometry=cells_orig[\"polygon\"])\n gdf_cells[\"center_x\"] = gdf_cells.geometry.centroid.x\n gdf_cells[\"center_y\"] = gdf_cells.geometry.centroid.y\n gdf_cells[\"GEOMETRY\"] = cells_orig[\"GEOMETRY\"]\n\n # Ensure the output directory exists\n if not os.path.exists(path_output):\n os.makedirs(path_output)\n\n # Calculate tile bounds and fine/coarse tiles\n x_min, x_max = tile_bounds[\"x_min\"], tile_bounds[\"x_max\"]\n y_min, y_max = tile_bounds[\"y_min\"], tile_bounds[\"y_max\"]\n n_fine_tiles_x = int(np.ceil((x_max - x_min) / tile_size))\n n_fine_tiles_y = int(np.ceil((y_max - y_min) / tile_size))\n n_coarse_tiles_x = int(np.ceil((x_max - x_min) / (coarse_tile_factor * tile_size)))\n n_coarse_tiles_y = int(np.ceil((y_max - y_min) / (coarse_tile_factor * tile_size)))\n\n # Process coarse tiles in parallel\n for i in tqdm(range(n_coarse_tiles_x), desc=\"Processing coarse tiles\"):\n coarse_tile_x_min = x_min + i * (coarse_tile_factor * tile_size)\n coarse_tile_x_max = coarse_tile_x_min + (coarse_tile_factor * tile_size)\n\n for j in range(n_coarse_tiles_y):\n coarse_tile_y_min = y_min + j * (coarse_tile_factor * tile_size)\n coarse_tile_y_max = coarse_tile_y_min + (coarse_tile_factor * tile_size)\n\n coarse_tile = gdf_cells[\n (gdf_cells[\"center_x\"] >= coarse_tile_x_min) & (gdf_cells[\"center_x\"] < coarse_tile_x_max) &\n (gdf_cells[\"center_y\"] >= coarse_tile_y_min) & (gdf_cells[\"center_y\"] < coarse_tile_y_max)\n ]\n if not coarse_tile.empty:\n process_fine_boundaries(coarse_tile, i, j, coarse_tile_x_min, coarse_tile_x_max, coarse_tile_y_min, coarse_tile_y_max, tile_size, path_output, x_min, y_min, n_fine_tiles_x, n_fine_tiles_y)\n"},{"location":"python/pre/api/#celldega.pre.make_deepzoom_pyramid","title":"make_deepzoom_pyramid(image_path, output_path, pyramid_name, tile_size=512, overlap=0, suffix='.jpeg')","text":"Create a DeepZoom image pyramid from a JPEG image"},{"location":"python/pre/api/#celldega.pre.make_deepzoom_pyramid--parameters","title":"Parameters","text":"image_path : str Path to the JPEG image file tile_size : int (default=512) Tile size for the DeepZoom pyramid overlap : int (default=0) Overlap size for the DeepZoom pyramid suffix : str (default='jpeg') Suffix for the DeepZoom pyramid tiles"},{"location":"python/pre/api/#celldega.pre.make_deepzoom_pyramid--returns","title":"Returns","text":"None Source code in src/celldega/pre/__init__.py def make_deepzoom_pyramid(\n image_path, output_path, pyramid_name, tile_size=512, overlap=0, suffix=\".jpeg\"\n):\n \"\"\"\n Create a DeepZoom image pyramid from a JPEG image\n\n Parameters\n ----------\n image_path : str\n Path to the JPEG image file\n tile_size : int (default=512)\n Tile size for the DeepZoom pyramid\n overlap : int (default=0)\n Overlap size for the DeepZoom pyramid\n suffix : str (default='jpeg')\n Suffix for the DeepZoom pyramid tiles\n\n Returns\n -------\n None\n\n \"\"\"\n\n # Define the output path\n output_path = Path(output_path)\n\n # Load the JPEG image\n image = pyvips.Image.new_from_file(image_path, access=\"sequential\")\n\n # check if the output path exists and create it if it does not\n output_path.mkdir(parents=True, exist_ok=True)\n\n # append the pyramid name to the output path\n output_path = output_path / pyramid_name\n\n # Save the image as a DeepZoom image pyramid\n image.dzsave(output_path, tile_size=tile_size, overlap=overlap, suffix=suffix)\n"},{"location":"python/pre/api/#celldega.pre.make_meta_cell_image_coord","title":"make_meta_cell_image_coord(technology, path_transformation_matrix, path_meta_cell_micron, path_meta_cell_image, image_scale)","text":"Apply an affine transformation to the cell coordinates in microns and save the transformed coordinates in pixels"},{"location":"python/pre/api/#celldega.pre.make_meta_cell_image_coord--parameters","title":"Parameters","text":"technology : str The technology used to generate the data, Xenium and MERSCOPE are supported. path_transformation_matrix : str Path to the transformation matrix file path_meta_cell_micron : str Path to the meta cell file with coordinates in microns path_meta_cell_image : str Path to save the meta cell file with coordinates in pixels"},{"location":"python/pre/api/#celldega.pre.make_meta_cell_image_coord--returns","title":"Returns","text":"None"},{"location":"python/pre/api/#celldega.pre.make_meta_cell_image_coord--examples","title":"Examples","text":"make_meta_cell_image_coord( ... technology='Xenium', ... path_transformation_matrix='data/transformation_matrix.txt', ... path_meta_cell_micron='data/meta_cell_micron.csv', ... path_meta_cell_image='data/meta_cell_image.parquet' ... ) Source code in src/celldega/pre/__init__.py def make_meta_cell_image_coord(\n technology,\n path_transformation_matrix,\n path_meta_cell_micron,\n path_meta_cell_image,\n image_scale\n):\n \"\"\"\n Apply an affine transformation to the cell coordinates in microns and save\n the transformed coordinates in pixels\n\n Parameters\n ----------\n technology : str\n The technology used to generate the data, Xenium and MERSCOPE are supported.\n path_transformation_matrix : str\n Path to the transformation matrix file\n path_meta_cell_micron : str\n Path to the meta cell file with coordinates in microns\n path_meta_cell_image : str\n Path to save the meta cell file with coordinates in pixels\n\n Returns\n -------\n None\n\n Examples\n --------\n >>> make_meta_cell_image_coord(\n ... technology='Xenium',\n ... path_transformation_matrix='data/transformation_matrix.txt',\n ... path_meta_cell_micron='data/meta_cell_micron.csv',\n ... path_meta_cell_image='data/meta_cell_image.parquet'\n ... )\n\n \"\"\"\n\n transformation_matrix = pd.read_csv(\n path_transformation_matrix, header=None, sep=\" \"\n ).values\n\n if technology == \"MERSCOPE\":\n meta_cell = pd.read_csv(path_meta_cell_micron, usecols=[\"EntityID\", \"center_x\", \"center_y\"])\n meta_cell = convert_long_id_to_short(meta_cell)\n meta_cell[\"name\"] = meta_cell[\"cell_id\"]\n meta_cell = meta_cell.set_index('cell_id')\n elif technology == \"Xenium\":\n usecols = [\"cell_id\", \"x_centroid\", \"y_centroid\"]\n meta_cell = pd.read_csv(path_meta_cell_micron, index_col=0, usecols=usecols)\n meta_cell.columns = [\"center_x\", \"center_y\"]\n meta_cell[\"name\"] = pd.Series(meta_cell.index, index=meta_cell.index)\n\n # Adding a ones column to accommodate for affine transformation\n meta_cell[\"ones\"] = 1\n\n # Preparing the data for matrix multiplication\n points = meta_cell[[\"center_x\", \"center_y\", \"ones\"]].values\n\n # Applying the transformation matrix\n transformed_points = np.dot(transformation_matrix, points.T).T\n\n # Updating the DataFrame with transformed coordinates\n meta_cell[\"center_x\"] = transformed_points[:, 0]\n meta_cell[\"center_y\"] = transformed_points[:, 1]\n\n # Dropping the ones column as it's no longer needed\n meta_cell.drop(columns=[\"ones\"], inplace=True)\n\n meta_cell[\"center_x\"] = meta_cell[\"center_x\"] / image_scale\n meta_cell[\"center_y\"] = meta_cell[\"center_y\"] / image_scale\n\n meta_cell[\"geometry\"] = meta_cell.apply(\n lambda row: [row[\"center_x\"], row[\"center_y\"]], axis=1\n )\n\n if technology == \"MERSCOPE\":\n meta_cell = meta_cell[[\"name\", \"geometry\", \"EntityID\"]]\n else:\n meta_cell = meta_cell[[\"name\", \"geometry\"]]\n\n\n meta_cell.to_parquet(path_meta_cell_image)\n"},{"location":"python/pre/api/#celldega.pre.make_meta_gene","title":"make_meta_gene(technology, path_cbg, path_output)","text":"Create a DataFrame with genes and their assigned colors"},{"location":"python/pre/api/#celldega.pre.make_meta_gene--parameters","title":"Parameters","text":"technology : str The technology used to generate the data, Xenium and MERSCOPE are supported. path_cbg : str Path to the cell-by-gene matrix data (the data format can vary based on technology) path_output : str Path to save the meta gene file"},{"location":"python/pre/api/#celldega.pre.make_meta_gene--returns","title":"Returns","text":"None"},{"location":"python/pre/api/#celldega.pre.make_meta_gene--examples","title":"Examples","text":"make_meta_gene( ... technology='Xenium', ... path_cbg='data/', ... path_output='data/meta_gene.parquet' ... ) Source code in src/celldega/pre/__init__.py def make_meta_gene(technology, path_cbg, path_output):\n \"\"\"\n Create a DataFrame with genes and their assigned colors\n\n Parameters\n ----------\n technology : str\n The technology used to generate the data, Xenium and MERSCOPE are supported.\n path_cbg : str\n Path to the cell-by-gene matrix data (the data format can vary based on technology)\n path_output : str\n Path to save the meta gene file\n\n Returns\n -------\n None\n\n Examples\n --------\n >>> make_meta_gene(\n ... technology='Xenium',\n ... path_cbg='data/',\n ... path_output='data/meta_gene.parquet'\n ... )\n \"\"\"\n\n if technology == \"MERSCOPE\":\n cbg = pd.read_csv(path_cbg, index_col=0)\n genes = cbg.columns.tolist()\n elif technology == \"Xenium\":\n # genes = pd.read_csv(path_cbg + 'features.tsv.gz', sep='\\t', header=None)[1].values.tolist()\n cbg = read_cbg_mtx(path_cbg)\n genes = cbg.columns.tolist()\n\n # Get all categorical color palettes from Matplotlib and flatten them into a single list of colors\n palettes = [plt.get_cmap(name).colors for name in plt.colormaps() if \"tab\" in name]\n flat_colors = [color for palette in palettes for color in palette]\n\n # Convert RGB tuples to hex codes\n flat_colors_hex = [to_hex(color) for color in flat_colors]\n\n # Use modular arithmetic to assign a color to each gene, white for genes with \"Blank\"\n colors = [\n flat_colors_hex[i % len(flat_colors_hex)] if \"Blank\" not in gene else \"#FFFFFF\"\n for i, gene in enumerate(genes)\n ]\n\n # Create a DataFrame with genes and their assigned colors\n ser_color = pd.Series(colors, index=genes)\n\n # calculate gene expression metadata\n meta_gene = calc_meta_gene_data(cbg)\n meta_gene['color'] = ser_color\n\n # Identify sparse columns\n sparse_cols = [col for col in meta_gene.columns if pd.api.types.is_sparse(meta_gene[col])]\n\n # Convert sparse columns to dense\n for col in sparse_cols:\n meta_gene[col] = meta_gene[col].sparse.to_dense()\n\n meta_gene.to_parquet(path_output)\n"},{"location":"python/pre/api/#celldega.pre.make_trx_tiles","title":"make_trx_tiles(technology, path_trx, path_transformation_matrix, path_trx_tiles, coarse_tile_factor=10, tile_size=250, chunk_size=1000000, verbose=False, image_scale=1, max_workers=8)","text":"Processes transcript data by dividing it into coarse-grain and fine-grain tiles, applying transformations, and saving the results in a parallelized manner."},{"location":"python/pre/api/#celldega.pre.make_trx_tiles--parameters","title":"Parameters","text":"technology : str The technology used for generating the transcript data (e.g., \"MERSCOPE\" or \"Xenium\"). path_trx : str Path to the file containing the transcript data. path_transformation_matrix : str Path to the file containing the transformation matrix (CSV file). path_trx_tiles : str Directory path where the output files (Parquet files) for each tile will be saved. coarse_tile_factor : int, optional Scaling factor of each coarse-grain tile comparing to the fine tile size. tile_size : int, optional Size of each fine-grain tile in microns (default is 250). chunk_size : int, optional Number of rows to process per chunk for memory efficiency (default is 1000000). verbose : bool, optional Flag to enable verbose output (default is False). image_scale : float, optional Scale factor to apply to the transcript coordinates (default is 0.5). max_workers : int, optional Maximum number of parallel workers for processing tiles (default is 8)."},{"location":"python/pre/api/#celldega.pre.make_trx_tiles--returns","title":"Returns","text":"dict A dictionary containing the bounds of the processed data in both x and y directions. Source code in src/celldega/pre/trx_tile.py def make_trx_tiles(\n technology,\n path_trx,\n path_transformation_matrix,\n path_trx_tiles,\n coarse_tile_factor=10,\n tile_size=250,\n chunk_size=1000000,\n verbose=False,\n image_scale=1,\n max_workers=8\n):\n \"\"\"\n Processes transcript data by dividing it into coarse-grain and fine-grain tiles,\n applying transformations, and saving the results in a parallelized manner.\n\n Parameters\n ----------\n technology : str\n The technology used for generating the transcript data (e.g., \"MERSCOPE\" or \"Xenium\").\n path_trx : str\n Path to the file containing the transcript data.\n path_transformation_matrix : str\n Path to the file containing the transformation matrix (CSV file).\n path_trx_tiles : str\n Directory path where the output files (Parquet files) for each tile will be saved.\n coarse_tile_factor : int, optional\n Scaling factor of each coarse-grain tile comparing to the fine tile size.\n tile_size : int, optional\n Size of each fine-grain tile in microns (default is 250).\n chunk_size : int, optional\n Number of rows to process per chunk for memory efficiency (default is 1000000).\n verbose : bool, optional\n Flag to enable verbose output (default is False).\n image_scale : float, optional\n Scale factor to apply to the transcript coordinates (default is 0.5).\n max_workers : int, optional\n Maximum number of parallel workers for processing tiles (default is 8).\n\n Returns\n -------\n dict\n A dictionary containing the bounds of the processed data in both x and y directions.\n \"\"\"\n\n def process_coarse_tile(trx, i, j, coarse_tile_x_min, coarse_tile_x_max, coarse_tile_y_min, coarse_tile_y_max, tile_size, path_trx_tiles, x_min, y_min, n_fine_tiles_x, n_fine_tiles_y, max_workers):\n # Filter the entire dataset for the current coarse tile\n coarse_tile = trx.filter(\n (pl.col(\"transformed_x\") >= coarse_tile_x_min) & (pl.col(\"transformed_x\") < coarse_tile_x_max) &\n (pl.col(\"transformed_y\") >= coarse_tile_y_min) & (pl.col(\"transformed_y\") < coarse_tile_y_max)\n )\n\n if not coarse_tile.is_empty():\n # Now process fine tiles using global fine tile indices\n process_fine_tiles(coarse_tile, i, j, coarse_tile_x_min, coarse_tile_x_max, coarse_tile_y_min, coarse_tile_y_max, tile_size, path_trx_tiles, x_min, y_min, n_fine_tiles_x, n_fine_tiles_y, max_workers) \n\n\n def process_fine_tiles(coarse_tile, coarse_i, coarse_j, coarse_tile_x_min, coarse_tile_x_max, coarse_tile_y_min, coarse_tile_y_max, tile_size, path_trx_tiles, x_min, y_min, n_fine_tiles_x, n_fine_tiles_y, max_workers=8):\n\n # Use ThreadPoolExecutor for parallel processing of fine-grain tiles within the coarse tile\n with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:\n futures = []\n\n # Iterate over fine-grain tiles within the global bounds\n for fine_i in range(n_fine_tiles_x):\n fine_tile_x_min = x_min + fine_i * tile_size\n fine_tile_x_max = fine_tile_x_min + tile_size\n\n # Process only if the fine tile falls within the current coarse tile's bounds\n if not (fine_tile_x_min >= coarse_tile_x_min and fine_tile_x_max <= coarse_tile_x_max):\n continue\n\n for fine_j in range(n_fine_tiles_y):\n fine_tile_y_min = y_min + fine_j * tile_size\n fine_tile_y_max = fine_tile_y_min + tile_size\n\n # Process only if the fine tile falls within the current coarse tile's bounds\n if not (fine_tile_y_min >= coarse_tile_y_min and fine_tile_y_max <= coarse_tile_y_max):\n continue\n\n # Submit the task for each fine tile to process in parallel\n futures.append(executor.submit(\n filter_and_save_fine_tile, coarse_tile, coarse_i, coarse_j, fine_i, fine_j, \n fine_tile_x_min, fine_tile_x_max, fine_tile_y_min, fine_tile_y_max, path_trx_tiles\n ))\n\n # Wait for all futures to complete\n for future in concurrent.futures.as_completed(futures):\n future.result() # Raise exceptions if any occurred during execution\n\n\n def filter_and_save_fine_tile(coarse_tile, coarse_i, coarse_j, fine_i, fine_j, fine_tile_x_min, fine_tile_x_max, fine_tile_y_min, fine_tile_y_max, path_trx_tiles):\n\n # Filter the coarse tile for the current fine tile's boundaries\n fine_tile_trx = coarse_tile.filter(\n (pl.col(\"transformed_x\") >= fine_tile_x_min) & (pl.col(\"transformed_x\") < fine_tile_x_max) &\n (pl.col(\"transformed_y\") >= fine_tile_y_min) & (pl.col(\"transformed_y\") < fine_tile_y_max)\n )\n\n if not fine_tile_trx.is_empty():\n # Add geometry column as a list of [x, y] pairs\n fine_tile_trx = fine_tile_trx.with_columns(\n pl.concat_list([pl.col(\"transformed_x\"), pl.col(\"transformed_y\")]).alias(\"geometry\")\n ).drop(['transformed_x', 'transformed_y'])\n\n # Define the filename based on fine tile coordinates\n filename = f\"{path_trx_tiles}/transcripts_tile_{fine_i}_{fine_j}.parquet\"\n\n # Save the filtered DataFrame to a Parquet file\n fine_tile_trx.to_pandas().to_parquet(filename)\n\n\n # Load transformation matrix\n transformation_matrix = np.loadtxt(path_transformation_matrix)\n\n # Load the transcript data based on the technology using Polars\n if technology == \"MERSCOPE\":\n trx_ini = pl.read_csv(path_trx, columns=[\"gene\", \"global_x\", \"global_y\"])\n trx_ini = trx_ini.with_columns([\n pl.col(\"global_x\").alias(\"x\"),\n pl.col(\"global_y\").alias(\"y\"),\n pl.col(\"gene\").alias(\"name\")\n ]).select([\"name\", \"x\", \"y\"])\n\n elif technology == \"Xenium\":\n trx_ini = pl.read_parquet(path_trx).select([\n pl.col(\"feature_name\").alias(\"name\"),\n pl.col(\"x_location\").alias(\"x\"),\n pl.col(\"y_location\").alias(\"y\")\n ])\n\n # Process the data in chunks and apply transformations\n all_chunks = []\n\n for start_row in tqdm(range(0, trx_ini.height, chunk_size), desc=\"Processing chunks\"):\n chunk = trx_ini.slice(start_row, chunk_size)\n\n # Apply transformation matrix to the coordinates\n points = np.hstack([chunk.select([\"x\", \"y\"]).to_numpy(), np.ones((chunk.height, 1))])\n transformed_points = np.dot(points, transformation_matrix.T)[:, :2]\n\n # Create new transformed columns and drop original x, y columns\n transformed_chunk = chunk.with_columns([\n (pl.Series(transformed_points[:, 0]) * image_scale).round(2).alias(\"transformed_x\"),\n (pl.Series(transformed_points[:, 1]) * image_scale).round(2).alias(\"transformed_y\")\n ]).drop([\"x\", \"y\"])\n all_chunks.append(transformed_chunk)\n\n # Concatenate all chunks after processing\n trx = pl.concat(all_chunks)\n\n # Ensure the output directory exists\n if not os.path.exists(path_trx_tiles):\n os.makedirs(path_trx_tiles)\n\n # Get min and max x, y values\n x_min, x_max = trx.select([\n pl.col(\"transformed_x\").min().alias(\"x_min\"),\n pl.col(\"transformed_x\").max().alias(\"x_max\")\n ]).row(0)\n\n y_min, y_max = trx.select([\n pl.col(\"transformed_y\").min().alias(\"y_min\"),\n pl.col(\"transformed_y\").max().alias(\"y_max\")\n ]).row(0)\n\n # Calculate the number of fine-grain tiles globally\n n_fine_tiles_x = int(np.ceil((x_max - x_min) / tile_size))\n n_fine_tiles_y = int(np.ceil((y_max - y_min) / tile_size))\n\n # Calculate the number of coarse-grain tiles\n n_coarse_tiles_x = int(np.ceil((x_max - x_min) / (coarse_tile_factor * tile_size)))\n n_coarse_tiles_y = int(np.ceil((y_max - y_min) / (coarse_tile_factor * tile_size)))\n\n # Use ThreadPoolExecutor for parallel processing of coarse-grain tiles\n with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:\n futures = []\n for i in range(n_coarse_tiles_x):\n coarse_tile_x_min = x_min + i * (coarse_tile_factor * tile_size)\n coarse_tile_x_max = coarse_tile_x_min + (coarse_tile_factor * tile_size)\n\n for j in range(n_coarse_tiles_y):\n coarse_tile_y_min = y_min + j * (coarse_tile_factor * tile_size)\n coarse_tile_y_max = coarse_tile_y_min + (coarse_tile_factor * tile_size)\n\n # Submit each coarse tile for parallel processing\n futures.append(executor.submit(\n process_coarse_tile, trx, i, j, coarse_tile_x_min, coarse_tile_x_max, coarse_tile_y_min, coarse_tile_y_max, tile_size, path_trx_tiles, x_min, y_min, n_fine_tiles_x, n_fine_tiles_y, max_workers\n ))\n\n # Wait for all coarse tiles to complete\n for future in tqdm(concurrent.futures.as_completed(futures), desc=\"Processing coarse tiles\", unit=\"tile\"):\n future.result() # Raise exceptions if any occurred during execution\n\n # Return the tile bounds\n tile_bounds = {\n \"x_min\": x_min,\n \"x_max\": x_max,\n \"y_min\": y_min,\n \"y_max\": y_max,\n }\n\n return tile_bounds\n"},{"location":"python/pre/api/#celldega.pre.reduce_image_size","title":"reduce_image_size(image_path, scale_image=0.5, path_landscape_files='')","text":""},{"location":"python/pre/api/#celldega.pre.reduce_image_size--parameters","title":"Parameters","text":"image_path : str Path to the image file scale_image : float (default=0.5) Scale factor for the image resize"},{"location":"python/pre/api/#celldega.pre.reduce_image_size--returns","title":"Returns","text":"new_image_path : str Path to the resized image file Source code in src/celldega/pre/__init__.py def reduce_image_size(image_path, scale_image=0.5, path_landscape_files=\"\"):\n \"\"\"\n\n Parameters\n ----------\n image_path : str\n Path to the image file\n scale_image : float (default=0.5)\n Scale factor for the image resize\n\n Returns\n -------\n new_image_path : str\n Path to the resized image file\n \"\"\"\n\n image = pyvips.Image.new_from_file(image_path, access=\"sequential\")\n\n resized_image = image.resize(scale_image)\n\n new_image_name = image_path.split(\"/\")[-1].replace(\".tif\", \"_downsize.tif\")\n new_image_path = f\"{path_landscape_files}/{new_image_name}\"\n resized_image.write_to_file(new_image_path)\n\n return new_image_path\n"},{"location":"python/pre/api/#celldega.pre.save_landscape_parameters","title":"save_landscape_parameters(technology, path_landscape_files, image_name='dapi_files', tile_size=1000, image_info={}, image_format='.webp')","text":"Save the landscape parameters to a JSON file. Source code in src/celldega/pre/__init__.py def save_landscape_parameters(\n technology, path_landscape_files, image_name=\"dapi_files\", tile_size=1000, image_info={}, image_format='.webp'\n):\n \"\"\"\n Save the landscape parameters to a JSON file.\n \"\"\"\n\n path_image_pyramid = f\"{path_landscape_files}/pyramid_images/{image_name}\"\n\n print(path_image_pyramid)\n\n max_pyramid_zoom = get_max_zoom_level(path_image_pyramid)\n\n landscape_parameters = {\n \"technology\": technology,\n \"max_pyramid_zoom\": max_pyramid_zoom,\n \"tile_size\": tile_size,\n \"image_info\": image_info,\n \"image_format\": image_format\n }\n\n path_landscape_parameters = f\"{path_landscape_files}/landscape_parameters.json\"\n\n with open(path_landscape_parameters, \"w\") as file:\n json.dump(landscape_parameters, file, indent=4)\n"},{"location":"python/viz/api/","title":"Viz Module API Reference","text":""},{"location":"python/viz/api/#widget-classes","title":"Widget Classes","text":""},{"location":"python/viz/api/#celldega.viz.widget.Landscape","title":"Landscape","text":" Bases: AnyWidget A widget for interactive visualization of spatial omics data. This widget currently supports iST (Xenium and MERSCOPE) and sST (Visium HD data) Parameters: Name Type Description Default ini_x float The initial x-coordinate of the view. required ini_y float The initial y-coordinate of the view. required ini_zoom float The initial zoom level of the view. required token str The token traitlet. required base_url str The base URL for the widget. required dataset_name str The name of the dataset to visualize. This will show up in the user interface bar. required Attributes: Name Type Description component str The name of the component. technology str The technology used. base_url str The base URL for the widget. token str The token traitlet. ini_x float The initial x-coordinate of the view. ini_y float The initial y-coordinate of the view. ini_z float The initial z-coordinate of the view. ini_zoom float The initial zoom level of the view. dataset_name str The name of the dataset to visualize. update_trigger dict The dictionary to trigger updates. cell_clusters dict The dictionary containing cell cluster information. Returns: Name Type Description Landscape A widget for visualizing a 'landscape' view of spatial omics data. Source code in src/celldega/viz/widget.py class Landscape(anywidget.AnyWidget):\n \"\"\"\n A widget for interactive visualization of spatial omics data. This widget\n currently supports iST (Xenium and MERSCOPE) and sST (Visium HD data)\n\n Args:\n ini_x (float): The initial x-coordinate of the view.\n ini_y (float): The initial y-coordinate of the view.\n ini_zoom (float): The initial zoom level of the view.\n token (str): The token traitlet.\n base_url (str): The base URL for the widget.\n dataset_name (str, optional): The name of the dataset to visualize. This will show up in the user interface bar.\n\n Attributes:\n component (str): The name of the component.\n technology (str): The technology used.\n base_url (str): The base URL for the widget.\n token (str): The token traitlet.\n ini_x (float): The initial x-coordinate of the view.\n ini_y (float): The initial y-coordinate of the view.\n ini_z (float): The initial z-coordinate of the view.\n ini_zoom (float): The initial zoom level of the view.\n dataset_name (str): The name of the dataset to visualize.\n update_trigger (dict): The dictionary to trigger updates.\n cell_clusters (dict): The dictionary containing cell cluster information.\n\n Returns:\n Landscape: A widget for visualizing a 'landscape' view of spatial omics data.\n \"\"\"\n _esm = pathlib.Path(__file__).parent / \"../static\" / \"widget.js\"\n _css = pathlib.Path(__file__).parent / \"../static\" / \"widget.css\"\n component = traitlets.Unicode(\"Landscape\").tag(sync=True)\n\n technology = traitlets.Unicode(\"sst\").tag(sync=True)\n base_url = traitlets.Unicode(\"\").tag(sync=True)\n token = traitlets.Unicode(\"\").tag(sync=True)\n ini_x = traitlets.Float(1000).tag(sync=True)\n ini_y = traitlets.Float(1000).tag(sync=True)\n ini_z = traitlets.Float(0).tag(sync=True)\n ini_zoom = traitlets.Float(0).tag(sync=True)\n square_tile_size = traitlets.Float(1.4).tag(sync=True)\n dataset_name = traitlets.Unicode(\"\").tag(sync=True)\n region = traitlets.Dict({}).tag(sync=True)\n\n update_trigger = traitlets.Dict().tag(sync=True)\n cell_clusters = traitlets.Dict().tag(sync=True)\n\n width = traitlets.Int(0).tag(sync=True)\n height = traitlets.Int(800).tag(sync=True)\n\n def trigger_update(self, new_value):\n # This method updates the update_trigger traitlet with a new value\n # You can pass any information necessary for the update, or just a timestamp\n self.update_trigger = new_value\n\n def update_cell_clusters(self, new_clusters):\n # Convert the new_clusters to a JSON serializable format if necessary\n self.cell_clusters = new_clusters\n"},{"location":"python/viz/api/#celldega.viz.widget.Matrix","title":"Matrix","text":" Bases: AnyWidget A widget for interactive visualization of a hierarchically clustered matrix. Parameters: Name Type Description Default value int The value traitlet. required component str The component traitlet. required network dict The network traitlet. required click_info dict The click_info traitlet. required Attributes: Name Type Description component str The name of the component. network dict The network dictionary. click_info dict The click_info dictionary. Returns: Name Type Description Matrix A widget for visualizing a hierarchically clustered matrix. Source code in src/celldega/viz/widget.py class Matrix(anywidget.AnyWidget):\n \"\"\"\n A widget for interactive visualization of a hierarchically clustered matrix.\n\n Args:\n value (int): The value traitlet.\n component (str): The component traitlet.\n network (dict): The network traitlet.\n click_info (dict): The click_info traitlet.\n\n Attributes:\n component (str): The name of the component.\n network (dict): The network dictionary.\n click_info (dict): The click_info dictionary.\n\n Returns:\n Matrix: A widget for visualizing a hierarchically clustered matrix.\n \"\"\"\n _esm = pathlib.Path(__file__).parent / \"../static\" / \"widget.js\"\n _css = pathlib.Path(__file__).parent / \"../static\" / \"widget.css\"\n value = traitlets.Int(0).tag(sync=True)\n component = traitlets.Unicode(\"Matrix\").tag(sync=True)\n\n network = traitlets.Dict({}).tag(sync=True)\n click_info = traitlets.Dict({}).tag(sync=True)\n"},{"location":"technologies/","title":"Technologies Overview","text":""},{"location":"technologies/parquet/","title":"Parquet","text":""}]} \ No newline at end of file diff --git a/sitemap.xml.gz b/sitemap.xml.gz index bc2702e..b47d35f 100644 Binary files a/sitemap.xml.gz and b/sitemap.xml.gz differ diff --git a/technologies/index.html b/technologies/index.html index ccfeb9f..5f57029 100644 --- a/technologies/index.html +++ b/technologies/index.html @@ -216,6 +216,10 @@ + + + + @@ -233,16 +237,24 @@ - - + + + + Overview - - + + + + + + + + @@ -252,6 +264,8 @@ + + diff --git a/technologies/parquet/index.html b/technologies/parquet/index.html index 2eee5a3..e058916 100644 --- a/technologies/parquet/index.html +++ b/technologies/parquet/index.html @@ -216,6 +216,10 @@ + + + + @@ -233,16 +237,24 @@ - - + + + + Overview - - + + + + + + + + @@ -252,6 +264,8 @@ + +
Celldega is a spatial analysis and visualization library that is being developed by the Spatial Technology Platform at the Broad Institute of MIT and Harvard. This project enables researchers to easily visualize large ST datasets (e.g., datasets with >100M transcripts) alongside single-cell and spatial analysis notebook workflows (e.g., integrating with sverse tools and utilizing novel spatial analysis approaches).
Celldega is a spatial analysis and visualization library that is being developed by the Spatial Technology Platform at the Broad Institute of MIT and Harvard. This project enables researchers to easily visualize large ST datasets (e.g., datasets with >100M transcripts) alongside single-cell and spatial analysis notebook workflows (e.g., sverse tools and novel spatial analysis approaches).
Celldega enables researchers to easily visualize large ST datasets (e.g., datasets with >100M transcripts) alongside single-cell and spatial analysis notebook workflows (e.g., integrating with sverse tools and utilizing novel spatial analysis approaches).
The Celldega library is being developed to help researchers easily visualize and analyze high-dimensional spatial-omics data in the context of a notebook workflow. Initial development has been focused on spatial transcriptomics visualization.
Celldega can be used as a Python library in a Jupyter notebook environment or as a stand-alone JavaScript library for creating visualizations.
The pre module contains methods for pre-processing LandscapeFiles.
pre
The viz module contains functions and classes for data visualization.
viz
widget
Celldega is named after a Bodega, a small shop with all the essentials, that is part of the fabric of a neighborhood.
Landscape View Xenium
# %load_ext autoreload\n# %autoreload 2\n# %env ANYWIDGET_HMR=1\n
import celldega as dega\ndega.__version__\n
'0.0.0'
from observable_jupyter import embed\n
base_url = 'https://raw.githubusercontent.com/broadinstitute/celldega_Xenium_Prime_Mouse_Brain_Coronal_FF_outs/main/Xenium_Prime_Mouse_Brain_Coronal_FF_outs'\n
embed('@cornhundred/celldega-landscape-ist', inputs={'base_url': base_url}, cells=['landscape_container'], display_logo=False)\n
# base_url = 'https://raw.githubusercontent.com/broadinstitute/celldega_Xenium_Prime_Human_Skin_FFPE_outs/main/Xenium_Prime_Human_Skin_FFPE_outs'\n\n# landscape_ist = dega.viz.Landscape(\n# technology='Xenium',\n# ini_zoom = -4.5,\n# ini_x=6000,\n# ini_y=8000,\n# base_url = base_url,\n\n# )\n\n# landscape_ist\n
\n
This page includes links to visualizations that are made with the stand-alone Celldega JavaScript library.
Celldega's visualization methods can be used as a stand-alone JavaScript library outside the context of a Jupyter notebook. This can be used to create showcase visualizations with publicly hosted data.
The JavaScript component of Celldega is used within the Jupyter Widgets framework to provide interactive visualization in the context of a Jupyter notebook but can also be used as a standalone JavaScript library.
landscape_ist
The landscape_ist function initializes and renders an interactive spatial transcriptomics (IST) landscape visualization. This API is designed to work with Deck.gl and includes customizable visualization options, dynamic data updates, and UI interactions.
el
HTMLElement
ini_model
Object
token
string
ini_x
ini_y
ini_z
number
ini_zoom
base_url
dataset_name
trx_radius
0.25
width
number|string
100%
height
800
view_change_custom_callback
Function
The landscape_ist function returns an object (landscape) with several methods for interacting with the visualization.
landscape
update_matrix_gene
Updates the visualization to highlight data for a specific gene.
inst_gene
update_matrix_col
Updates the visualization to highlight data for a specific column (e.g., cluster).
inst_col
update_matrix_dendro_col
Updates the visualization based on a dendrogram selection of columns.
selected_cols
Array<string>
update_view_state
Updates the view state of the Deck.gl visualization.
new_view_state
close_up
boolean
trx_layer
update_layers
Updates all visualization layers.
finalize
Finalizes the Deck.gl instance and cleans up resources.
\njavascript\nimport { landscape_ist } from 'path/to/landscape_ist';\n\nconst rootElement = document.getElementById('visualization-container');\nconst model = { /* Model containing visualization data */ };\n\nconst visualization = await landscape_ist(\n rootElement,\n model,\n 'example-token',\n 100,\n 200,\n 0,\n -5,\n 'https://example.com/data',\n 'Example Dataset'\n);\n\n// Update the visualization with a specific gene.\nvisualization.update_matrix_gene('TP53');\n\n// Update the visualization with a specific column.\nvisualization.update_matrix_col('Cluster 1');\n\n// Finalize the visualization when done.\nvisualization.finalize();\n\n
matrix_viz
The matrix_viz function initializes and renders a matrix visualization. This API is built using approaches and code adaptations from the Clustergrammer-GL library, and it integrates tightly with Deck.gl to provide interactive and dynamic visualizations.
model
network
string|number
'800'
row_label_callback
col_label_callback
col_dendro_callback
The function performs the following setup: 1. Deck.gl Integration: - Initializes a Deck.gl instance for the matrix visualization. - Sets properties for interactivity, including tooltips, view state changes, and layer filtering.
Configures labels, categories, and dendrograms for both rows and columns.
Layer Initialization:
Attaches interactions (e.g., click events) to these layers.
UI Setup:
import { matrix_viz } from 'path/to/matrix_viz';\n\nconst rootElement = document.getElementById('matrix-container');\nconst model = { /* Model containing visualization data */ };\nconst network = { /* Network object representing the matrix data */ };\n\n// Callback functions\nconst rowLabelCallback = (row) => {\n console.log('Row label clicked:', row);\n};\n\nconst colLabelCallback = (col) => {\n console.log('Column label clicked:', col);\n};\n\nconst colDendroCallback = (dendro) => {\n console.log('Column dendrogram clicked:', dendro);\n};\n\n// Initialize the matrix visualization\nawait matrix_viz(\n model,\n rootElement,\n network,\n 800,\n 800,\n rowLabelCallback,\n colLabelCallback,\n colDendroCallback\n);\n
Celldega is a spatial analysis and visualization library that is being developed by the Spatial Technology Platform at the Broad Institute of MIT and Harvard. Celldega can be used as a Jupyter Widget in Python as well as a stand-alone JavaScript library.
Please see examples notebooks below to try out Celldega in a Jupyter notebook or ObservableHQ JavaScript notebook:
The Celldega library can be installed using pip
# install Celldega (without vips for visualization pre-processing)\npip install celldega\n\n# install Celldega with optional pre-processing requirements\npip install celldega[pre]\n\n
Celldega can be used in a JavaScript environment such as ObservableHQ by importing it as a module
celldega = import('https://unpkg.com/celldega@' + version + '/src/celldega/static/widget.js?module')\n
** Coming soon **
Module for pre-processing to generate LandscapeFiles from ST data.
Module for visualization
convert_long_id_to_short(df)
Converts a column of long integer cell IDs in a DataFrame to a shorter, hash-based representation.
Parameters:
df
DataFrame
The DataFrame containing the EntityID.
Returns: pd.DataFrame: The original DataFrame with an additional column named cell_id containing the shortened cell IDs.
cell_id
The function applies a SHA-256 hash to each cell ID, encodes the hash using base64, and truncates it to create a shorter identifier that is added as a new column to the DataFrame.
src/celldega/pre/__init__.py
def convert_long_id_to_short(df):\n \"\"\"\n Converts a column of long integer cell IDs in a DataFrame to a shorter, hash-based representation.\n\n Args:\n df (pd.DataFrame): The DataFrame containing the EntityID.\n Returns:\n pd.DataFrame: The original DataFrame with an additional column named `cell_id`\n containing the shortened cell IDs.\n\n The function applies a SHA-256 hash to each cell ID, encodes the hash using base64, and truncates\n it to create a shorter identifier that is added as a new column to the DataFrame.\n \"\"\"\n # Function to hash and encode the cell ID\n def hash_and_shorten_id(cell_id):\n # Create a hash of the cell ID\n cell_id_bytes = str(cell_id).encode('utf-8')\n hash_object = hashlib.sha256(cell_id_bytes)\n hash_digest = hash_object.digest()\n\n # Encode the hash to a base64 string to mix letters and numbers, truncate to 9 characters\n short_id = base64.urlsafe_b64encode(hash_digest).decode('utf-8')[:9]\n return short_id\n\n # Apply the hash_and_shorten_id function to each cell ID in the specified column\n df['cell_id'] = df['EntityID'].apply(hash_and_shorten_id)\n\n return df\n
convert_to_jpeg(image_path, quality=80)
Convert a TIFF image to a JPEG image with a quality of score
image_path : str Path to the image file quality : int (default=80) Quality score for the JPEG image
new_image_path : str Path to the JPEG image file
def convert_to_jpeg(image_path, quality=80):\n \"\"\"\n Convert a TIFF image to a JPEG image with a quality of score\n\n Parameters\n ----------\n image_path : str\n Path to the image file\n quality : int (default=80)\n Quality score for the JPEG image\n\n Returns\n -------\n new_image_path : str\n Path to the JPEG image file\n\n \"\"\"\n\n # Load the TIFF image\n image = pyvips.Image.new_from_file(image_path, access=\"sequential\")\n\n # Save the image as a JPEG with a quality of 80\n new_image_path = image_path.replace(\".tif\", \".jpeg\")\n image.jpegsave(new_image_path, Q=quality)\n\n return new_image_path\n
convert_to_png(image_path)
def convert_to_png(image_path):\n \"\"\"\n Convert a TIFF image to a JPEG image with a quality of score\n\n Parameters\n ----------\n image_path : str\n Path to the image file\n quality : int (default=80)\n Quality score for the JPEG image\n\n Returns\n -------\n new_image_path : str\n Path to the JPEG image file\n\n \"\"\"\n\n # Load the TIFF image\n image = pyvips.Image.new_from_file(image_path, access=\"sequential\")\n\n # Save the image as a JPEG with a quality of 80\n new_image_path = image_path.replace(\".tif\", \".png\")\n image.pngsave(new_image_path)\n\n return new_image_path\n
convert_to_webp(image_path, quality=100)
Convert a TIFF image to a WEBP image with a specified quality score.
image_path : str Path to the image file quality : int (default=100) Quality score for the WEBP image (higher is better quality)
new_image_path : str Path to the WEBP image file
def convert_to_webp(image_path, quality=100):\n \"\"\"\n Convert a TIFF image to a WEBP image with a specified quality score.\n\n Parameters\n ----------\n image_path : str\n Path to the image file\n quality : int (default=100)\n Quality score for the WEBP image (higher is better quality)\n\n Returns\n -------\n new_image_path : str\n Path to the WEBP image file\n \"\"\"\n # Load the TIFF image\n image = pyvips.Image.new_from_file(image_path, access=\"sequential\")\n\n # Save the image as a WEBP with specified quality\n new_image_path = image_path.replace(\".tif\", \".webp\")\n image.webpsave(new_image_path, Q=quality)\n\n return new_image_path\n
get_max_zoom_level(path_image_pyramid)
Returns the maximum zoom level based on the highest-numbered directory in the specified path_image_pyramid.
path_image_pyramid
str
The path to the directory containing zoom level directories.
Returns:
max_pyramid_zoom
int
The maximum zoom level.
def get_max_zoom_level(path_image_pyramid):\n \"\"\"\n Returns the maximum zoom level based on the highest-numbered directory\n in the specified path_image_pyramid.\n\n Parameters:\n path_image_pyramid (str): The path to the directory containing zoom level directories.\n\n Returns:\n max_pyramid_zoom (int): The maximum zoom level.\n \"\"\"\n # List all entries in the path_image_pyramid that are directories and can be converted to integers\n zoom_levels = [\n entry\n for entry in os.listdir(path_image_pyramid)\n if os.path.isdir(os.path.join(path_image_pyramid, entry)) and entry.isdigit()\n ]\n\n # Convert to integer and find the maximum value\n max_pyramid_zoom = max(map(int, zoom_levels)) if zoom_levels else None\n\n return max_pyramid_zoom\n
make_cell_boundary_tiles(technology, path_cell_boundaries, path_meta_cell_micron, path_transformation_matrix, path_output, coarse_tile_factor=20, tile_size=250, tile_bounds=None, image_scale=1, max_workers=8)
Processes cell boundary data and divides it into spatial tiles based on the provided technology. Reads cell boundary data, applies affine transformations, and divides the data into coarse and fine tiles. The resulting tiles are saved as Parquet files, each containing the geometries of cells in that tile.
technology : str The technology used to generate the cell boundary data, e.g., \"MERSCOPE\", \"Xenium\", or \"custom\". path_cell_boundaries : str Path to the file containing the cell boundaries (Parquet format). path_meta_cell_micron : str Path to the file containing cell metadata (CSV format). path_transformation_matrix : str Path to the file containing the transformation matrix (CSV format). path_output : str Directory path where the output files (Parquet files) for each tile will be saved. coarse_tile_factor : int, optional, default=20. scaling factor of each coarse-grain tile comparing to the fine tile size. tile_size : int, optional, default=500 Size of each fine-grain tile in microns. tile_bounds : dict, optional Dictionary containing the minimum and maximum bounds for x and y coordinates. image_scale : float, optional, default=1 Scale factor to apply to the geometry data. max_workers : int, optional, default=8 Maximum number of parallel workers for processing tiles.
None
src/celldega/pre/boundary_tile.py
def make_cell_boundary_tiles(\n technology,\n path_cell_boundaries,\n path_meta_cell_micron,\n path_transformation_matrix,\n path_output,\n coarse_tile_factor=20,\n tile_size=250,\n tile_bounds=None,\n image_scale=1,\n max_workers=8\n):\n\n\n \"\"\"\n Processes cell boundary data and divides it into spatial tiles based on the provided technology.\n Reads cell boundary data, applies affine transformations, and divides the data into coarse and fine tiles.\n The resulting tiles are saved as Parquet files, each containing the geometries of cells in that tile.\n\n Parameters\n ----------\n technology : str\n The technology used to generate the cell boundary data, e.g., \"MERSCOPE\", \"Xenium\", or \"custom\".\n path_cell_boundaries : str\n Path to the file containing the cell boundaries (Parquet format).\n path_meta_cell_micron : str\n Path to the file containing cell metadata (CSV format).\n path_transformation_matrix : str\n Path to the file containing the transformation matrix (CSV format).\n path_output : str\n Directory path where the output files (Parquet files) for each tile will be saved.\n coarse_tile_factor : int, optional, default=20.\n scaling factor of each coarse-grain tile comparing to the fine tile size.\n tile_size : int, optional, default=500\n Size of each fine-grain tile in microns.\n tile_bounds : dict, optional\n Dictionary containing the minimum and maximum bounds for x and y coordinates.\n image_scale : float, optional, default=1\n Scale factor to apply to the geometry data.\n max_workers : int, optional, default=8\n Maximum number of parallel workers for processing tiles.\n\n Returns\n -------\n None\n \"\"\"\n\n def numpy_affine_transform(coords, matrix):\n \"\"\"Apply affine transformation to numpy coordinates.\"\"\"\n # Homogeneous coordinates for affine transformation\n coords = np.hstack([coords, np.ones((coords.shape[0], 1))])\n transformed_coords = coords @ matrix.T\n return transformed_coords[:, :2] # Drop the homogeneous coordinate\n\n def batch_transform_geometries(geometries, transformation_matrix, scale):\n \"\"\"\n Batch transform geometries using numpy for optimized performance.\n \"\"\"\n # Extract affine transformation parameters into a 3x3 matrix for numpy\n affine_matrix = np.array([\n [transformation_matrix[0, 0], transformation_matrix[0, 1], transformation_matrix[0, 2]],\n [transformation_matrix[1, 0], transformation_matrix[1, 1], transformation_matrix[1, 2]],\n [0, 0, 1]\n ])\n\n transformed_geometries = []\n\n for polygon in geometries:\n # Extract coordinates and transform them\n if isinstance(polygon, MultiPolygon):\n polygon = next(polygon.geoms) # Use the first geometry\n\n # Transform the exterior of the polygon\n exterior_coords = np.array(polygon.exterior.coords)\n\n # Apply the affine transformation and scale\n transformed_coords = numpy_affine_transform(exterior_coords, affine_matrix) / scale\n\n # Append the result to the transformed_geometries list\n transformed_geometries.append([transformed_coords.tolist()])\n\n return transformed_geometries\n\n\n def filter_and_save_fine_boundary(coarse_tile, fine_i, fine_j, fine_tile_x_min, fine_tile_x_max, fine_tile_y_min, fine_tile_y_max, path_output):\n cell_ids = coarse_tile.index.values\n\n tile_filter = (\n (coarse_tile[\"center_x\"] >= fine_tile_x_min) & (coarse_tile[\"center_x\"] < fine_tile_x_max) &\n (coarse_tile[\"center_y\"] >= fine_tile_y_min) & (coarse_tile[\"center_y\"] < fine_tile_y_max)\n )\n filtered_indices = np.where(tile_filter)[0]\n\n keep_cells = cell_ids[filtered_indices]\n fine_tile_cells = coarse_tile.loc[keep_cells, [\"GEOMETRY\"]]\n fine_tile_cells = fine_tile_cells.assign(name=fine_tile_cells.index)\n\n if not fine_tile_cells.empty:\n filename = f\"{path_output}/cell_tile_{fine_i}_{fine_j}.parquet\"\n fine_tile_cells.to_parquet(filename)\n\n def process_fine_boundaries(coarse_tile, i, j, coarse_tile_x_min, coarse_tile_x_max, coarse_tile_y_min, coarse_tile_y_max, tile_size, path_output, x_min, y_min, n_fine_tiles_x, n_fine_tiles_y):\n with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:\n futures = []\n for fine_i in range(n_fine_tiles_x):\n fine_tile_x_min = x_min + fine_i * tile_size\n fine_tile_x_max = fine_tile_x_min + tile_size\n\n if not (fine_tile_x_min >= coarse_tile_x_min and fine_tile_x_max <= coarse_tile_x_max):\n continue\n\n for fine_j in range(n_fine_tiles_y):\n fine_tile_y_min = y_min + fine_j * tile_size\n fine_tile_y_max = fine_tile_y_min + tile_size\n\n if not (fine_tile_y_min >= coarse_tile_y_min and fine_tile_y_max <= coarse_tile_y_max):\n continue\n\n futures.append(executor.submit(\n filter_and_save_fine_boundary, coarse_tile, fine_i, fine_j, fine_tile_x_min, fine_tile_x_max, fine_tile_y_min, fine_tile_y_max, path_output\n ))\n\n for future in futures:\n future.result()\n\n tile_size_x = tile_size\n tile_size_y = tile_size\n\n transformation_matrix = pd.read_csv(path_transformation_matrix, header=None, sep=\" \").values\n\n # Load cell boundary data based on the technology\n if technology == \"MERSCOPE\":\n df_meta = pd.read_parquet(f\"{path_output.replace('cell_segmentation','cell_metadata.parquet')}\")\n entity_to_cell_id_dict = pd.Series(df_meta.index.values, index=df_meta.EntityID).to_dict()\n cells_orig = gpd.read_parquet(path_cell_boundaries)\n cells_orig['cell_id'] = cells_orig['EntityID'].map(entity_to_cell_id_dict)\n cells_orig = cells_orig[cells_orig[\"ZIndex\"] == 1]\n\n # Correct cell_id issues with meta_cell\n meta_cell = pd.read_csv(path_meta_cell_micron)\n meta_cell['cell_id'] = meta_cell['EntityID'].map(entity_to_cell_id_dict)\n cells_orig.index = meta_cell[meta_cell[\"cell_id\"].isin(cells_orig['cell_id'])].index\n\n # Correct 'MultiPolygon' to 'Polygon'\n cells_orig[\"geometry\"] = cells_orig[\"Geometry\"].apply(\n lambda x: list(x.geoms)[0] if isinstance(x, MultiPolygon) else x\n )\n\n cells_orig.set_index('cell_id', inplace=True)\n\n elif technology == \"Xenium\":\n xenium_cells = pd.read_parquet(path_cell_boundaries)\n grouped = xenium_cells.groupby(\"cell_id\")[[\"vertex_x\", \"vertex_y\"]].agg(lambda x: x.tolist())\n grouped[\"geometry\"] = grouped.apply(lambda row: Polygon(zip(row[\"vertex_x\"], row[\"vertex_y\"])), axis=1)\n cells_orig = gpd.GeoDataFrame(grouped, geometry=\"geometry\")[[\"geometry\"]]\n\n elif technology == \"custom\":\n cells_orig = gpd.read_parquet(path_cell_boundaries)\n\n # Transform geometries\n cells_orig[\"GEOMETRY\"] = batch_transform_geometries(cells_orig[\"geometry\"], transformation_matrix, image_scale)\n\n # Convert transformed geometries to polygons and calculate centroids\n cells_orig[\"polygon\"] = cells_orig[\"GEOMETRY\"].apply(lambda x: Polygon(x[0]))\n gdf_cells = gpd.GeoDataFrame(geometry=cells_orig[\"polygon\"])\n gdf_cells[\"center_x\"] = gdf_cells.geometry.centroid.x\n gdf_cells[\"center_y\"] = gdf_cells.geometry.centroid.y\n gdf_cells[\"GEOMETRY\"] = cells_orig[\"GEOMETRY\"]\n\n # Ensure the output directory exists\n if not os.path.exists(path_output):\n os.makedirs(path_output)\n\n # Calculate tile bounds and fine/coarse tiles\n x_min, x_max = tile_bounds[\"x_min\"], tile_bounds[\"x_max\"]\n y_min, y_max = tile_bounds[\"y_min\"], tile_bounds[\"y_max\"]\n n_fine_tiles_x = int(np.ceil((x_max - x_min) / tile_size))\n n_fine_tiles_y = int(np.ceil((y_max - y_min) / tile_size))\n n_coarse_tiles_x = int(np.ceil((x_max - x_min) / (coarse_tile_factor * tile_size)))\n n_coarse_tiles_y = int(np.ceil((y_max - y_min) / (coarse_tile_factor * tile_size)))\n\n # Process coarse tiles in parallel\n for i in tqdm(range(n_coarse_tiles_x), desc=\"Processing coarse tiles\"):\n coarse_tile_x_min = x_min + i * (coarse_tile_factor * tile_size)\n coarse_tile_x_max = coarse_tile_x_min + (coarse_tile_factor * tile_size)\n\n for j in range(n_coarse_tiles_y):\n coarse_tile_y_min = y_min + j * (coarse_tile_factor * tile_size)\n coarse_tile_y_max = coarse_tile_y_min + (coarse_tile_factor * tile_size)\n\n coarse_tile = gdf_cells[\n (gdf_cells[\"center_x\"] >= coarse_tile_x_min) & (gdf_cells[\"center_x\"] < coarse_tile_x_max) &\n (gdf_cells[\"center_y\"] >= coarse_tile_y_min) & (gdf_cells[\"center_y\"] < coarse_tile_y_max)\n ]\n if not coarse_tile.empty:\n process_fine_boundaries(coarse_tile, i, j, coarse_tile_x_min, coarse_tile_x_max, coarse_tile_y_min, coarse_tile_y_max, tile_size, path_output, x_min, y_min, n_fine_tiles_x, n_fine_tiles_y)\n
make_deepzoom_pyramid(image_path, output_path, pyramid_name, tile_size=512, overlap=0, suffix='.jpeg')
Create a DeepZoom image pyramid from a JPEG image
image_path : str Path to the JPEG image file tile_size : int (default=512) Tile size for the DeepZoom pyramid overlap : int (default=0) Overlap size for the DeepZoom pyramid suffix : str (default='jpeg') Suffix for the DeepZoom pyramid tiles
def make_deepzoom_pyramid(\n image_path, output_path, pyramid_name, tile_size=512, overlap=0, suffix=\".jpeg\"\n):\n \"\"\"\n Create a DeepZoom image pyramid from a JPEG image\n\n Parameters\n ----------\n image_path : str\n Path to the JPEG image file\n tile_size : int (default=512)\n Tile size for the DeepZoom pyramid\n overlap : int (default=0)\n Overlap size for the DeepZoom pyramid\n suffix : str (default='jpeg')\n Suffix for the DeepZoom pyramid tiles\n\n Returns\n -------\n None\n\n \"\"\"\n\n # Define the output path\n output_path = Path(output_path)\n\n # Load the JPEG image\n image = pyvips.Image.new_from_file(image_path, access=\"sequential\")\n\n # check if the output path exists and create it if it does not\n output_path.mkdir(parents=True, exist_ok=True)\n\n # append the pyramid name to the output path\n output_path = output_path / pyramid_name\n\n # Save the image as a DeepZoom image pyramid\n image.dzsave(output_path, tile_size=tile_size, overlap=overlap, suffix=suffix)\n
make_meta_cell_image_coord(technology, path_transformation_matrix, path_meta_cell_micron, path_meta_cell_image, image_scale)
Apply an affine transformation to the cell coordinates in microns and save the transformed coordinates in pixels
technology : str The technology used to generate the data, Xenium and MERSCOPE are supported. path_transformation_matrix : str Path to the transformation matrix file path_meta_cell_micron : str Path to the meta cell file with coordinates in microns path_meta_cell_image : str Path to save the meta cell file with coordinates in pixels
make_meta_cell_image_coord( ... technology='Xenium', ... path_transformation_matrix='data/transformation_matrix.txt', ... path_meta_cell_micron='data/meta_cell_micron.csv', ... path_meta_cell_image='data/meta_cell_image.parquet' ... )
def make_meta_cell_image_coord(\n technology,\n path_transformation_matrix,\n path_meta_cell_micron,\n path_meta_cell_image,\n image_scale\n):\n \"\"\"\n Apply an affine transformation to the cell coordinates in microns and save\n the transformed coordinates in pixels\n\n Parameters\n ----------\n technology : str\n The technology used to generate the data, Xenium and MERSCOPE are supported.\n path_transformation_matrix : str\n Path to the transformation matrix file\n path_meta_cell_micron : str\n Path to the meta cell file with coordinates in microns\n path_meta_cell_image : str\n Path to save the meta cell file with coordinates in pixels\n\n Returns\n -------\n None\n\n Examples\n --------\n >>> make_meta_cell_image_coord(\n ... technology='Xenium',\n ... path_transformation_matrix='data/transformation_matrix.txt',\n ... path_meta_cell_micron='data/meta_cell_micron.csv',\n ... path_meta_cell_image='data/meta_cell_image.parquet'\n ... )\n\n \"\"\"\n\n transformation_matrix = pd.read_csv(\n path_transformation_matrix, header=None, sep=\" \"\n ).values\n\n if technology == \"MERSCOPE\":\n meta_cell = pd.read_csv(path_meta_cell_micron, usecols=[\"EntityID\", \"center_x\", \"center_y\"])\n meta_cell = convert_long_id_to_short(meta_cell)\n meta_cell[\"name\"] = meta_cell[\"cell_id\"]\n meta_cell = meta_cell.set_index('cell_id')\n elif technology == \"Xenium\":\n usecols = [\"cell_id\", \"x_centroid\", \"y_centroid\"]\n meta_cell = pd.read_csv(path_meta_cell_micron, index_col=0, usecols=usecols)\n meta_cell.columns = [\"center_x\", \"center_y\"]\n meta_cell[\"name\"] = pd.Series(meta_cell.index, index=meta_cell.index)\n\n # Adding a ones column to accommodate for affine transformation\n meta_cell[\"ones\"] = 1\n\n # Preparing the data for matrix multiplication\n points = meta_cell[[\"center_x\", \"center_y\", \"ones\"]].values\n\n # Applying the transformation matrix\n transformed_points = np.dot(transformation_matrix, points.T).T\n\n # Updating the DataFrame with transformed coordinates\n meta_cell[\"center_x\"] = transformed_points[:, 0]\n meta_cell[\"center_y\"] = transformed_points[:, 1]\n\n # Dropping the ones column as it's no longer needed\n meta_cell.drop(columns=[\"ones\"], inplace=True)\n\n meta_cell[\"center_x\"] = meta_cell[\"center_x\"] / image_scale\n meta_cell[\"center_y\"] = meta_cell[\"center_y\"] / image_scale\n\n meta_cell[\"geometry\"] = meta_cell.apply(\n lambda row: [row[\"center_x\"], row[\"center_y\"]], axis=1\n )\n\n if technology == \"MERSCOPE\":\n meta_cell = meta_cell[[\"name\", \"geometry\", \"EntityID\"]]\n else:\n meta_cell = meta_cell[[\"name\", \"geometry\"]]\n\n\n meta_cell.to_parquet(path_meta_cell_image)\n
make_meta_gene(technology, path_cbg, path_output)
Create a DataFrame with genes and their assigned colors
technology : str The technology used to generate the data, Xenium and MERSCOPE are supported. path_cbg : str Path to the cell-by-gene matrix data (the data format can vary based on technology) path_output : str Path to save the meta gene file
make_meta_gene( ... technology='Xenium', ... path_cbg='data/', ... path_output='data/meta_gene.parquet' ... )
def make_meta_gene(technology, path_cbg, path_output):\n \"\"\"\n Create a DataFrame with genes and their assigned colors\n\n Parameters\n ----------\n technology : str\n The technology used to generate the data, Xenium and MERSCOPE are supported.\n path_cbg : str\n Path to the cell-by-gene matrix data (the data format can vary based on technology)\n path_output : str\n Path to save the meta gene file\n\n Returns\n -------\n None\n\n Examples\n --------\n >>> make_meta_gene(\n ... technology='Xenium',\n ... path_cbg='data/',\n ... path_output='data/meta_gene.parquet'\n ... )\n \"\"\"\n\n if technology == \"MERSCOPE\":\n cbg = pd.read_csv(path_cbg, index_col=0)\n genes = cbg.columns.tolist()\n elif technology == \"Xenium\":\n # genes = pd.read_csv(path_cbg + 'features.tsv.gz', sep='\\t', header=None)[1].values.tolist()\n cbg = read_cbg_mtx(path_cbg)\n genes = cbg.columns.tolist()\n\n # Get all categorical color palettes from Matplotlib and flatten them into a single list of colors\n palettes = [plt.get_cmap(name).colors for name in plt.colormaps() if \"tab\" in name]\n flat_colors = [color for palette in palettes for color in palette]\n\n # Convert RGB tuples to hex codes\n flat_colors_hex = [to_hex(color) for color in flat_colors]\n\n # Use modular arithmetic to assign a color to each gene, white for genes with \"Blank\"\n colors = [\n flat_colors_hex[i % len(flat_colors_hex)] if \"Blank\" not in gene else \"#FFFFFF\"\n for i, gene in enumerate(genes)\n ]\n\n # Create a DataFrame with genes and their assigned colors\n ser_color = pd.Series(colors, index=genes)\n\n # calculate gene expression metadata\n meta_gene = calc_meta_gene_data(cbg)\n meta_gene['color'] = ser_color\n\n # Identify sparse columns\n sparse_cols = [col for col in meta_gene.columns if pd.api.types.is_sparse(meta_gene[col])]\n\n # Convert sparse columns to dense\n for col in sparse_cols:\n meta_gene[col] = meta_gene[col].sparse.to_dense()\n\n meta_gene.to_parquet(path_output)\n
make_trx_tiles(technology, path_trx, path_transformation_matrix, path_trx_tiles, coarse_tile_factor=10, tile_size=250, chunk_size=1000000, verbose=False, image_scale=1, max_workers=8)
Processes transcript data by dividing it into coarse-grain and fine-grain tiles, applying transformations, and saving the results in a parallelized manner.
technology : str The technology used for generating the transcript data (e.g., \"MERSCOPE\" or \"Xenium\"). path_trx : str Path to the file containing the transcript data. path_transformation_matrix : str Path to the file containing the transformation matrix (CSV file). path_trx_tiles : str Directory path where the output files (Parquet files) for each tile will be saved. coarse_tile_factor : int, optional Scaling factor of each coarse-grain tile comparing to the fine tile size. tile_size : int, optional Size of each fine-grain tile in microns (default is 250). chunk_size : int, optional Number of rows to process per chunk for memory efficiency (default is 1000000). verbose : bool, optional Flag to enable verbose output (default is False). image_scale : float, optional Scale factor to apply to the transcript coordinates (default is 0.5). max_workers : int, optional Maximum number of parallel workers for processing tiles (default is 8).
dict A dictionary containing the bounds of the processed data in both x and y directions.
src/celldega/pre/trx_tile.py
def make_trx_tiles(\n technology,\n path_trx,\n path_transformation_matrix,\n path_trx_tiles,\n coarse_tile_factor=10,\n tile_size=250,\n chunk_size=1000000,\n verbose=False,\n image_scale=1,\n max_workers=8\n):\n \"\"\"\n Processes transcript data by dividing it into coarse-grain and fine-grain tiles,\n applying transformations, and saving the results in a parallelized manner.\n\n Parameters\n ----------\n technology : str\n The technology used for generating the transcript data (e.g., \"MERSCOPE\" or \"Xenium\").\n path_trx : str\n Path to the file containing the transcript data.\n path_transformation_matrix : str\n Path to the file containing the transformation matrix (CSV file).\n path_trx_tiles : str\n Directory path where the output files (Parquet files) for each tile will be saved.\n coarse_tile_factor : int, optional\n Scaling factor of each coarse-grain tile comparing to the fine tile size.\n tile_size : int, optional\n Size of each fine-grain tile in microns (default is 250).\n chunk_size : int, optional\n Number of rows to process per chunk for memory efficiency (default is 1000000).\n verbose : bool, optional\n Flag to enable verbose output (default is False).\n image_scale : float, optional\n Scale factor to apply to the transcript coordinates (default is 0.5).\n max_workers : int, optional\n Maximum number of parallel workers for processing tiles (default is 8).\n\n Returns\n -------\n dict\n A dictionary containing the bounds of the processed data in both x and y directions.\n \"\"\"\n\n def process_coarse_tile(trx, i, j, coarse_tile_x_min, coarse_tile_x_max, coarse_tile_y_min, coarse_tile_y_max, tile_size, path_trx_tiles, x_min, y_min, n_fine_tiles_x, n_fine_tiles_y, max_workers):\n # Filter the entire dataset for the current coarse tile\n coarse_tile = trx.filter(\n (pl.col(\"transformed_x\") >= coarse_tile_x_min) & (pl.col(\"transformed_x\") < coarse_tile_x_max) &\n (pl.col(\"transformed_y\") >= coarse_tile_y_min) & (pl.col(\"transformed_y\") < coarse_tile_y_max)\n )\n\n if not coarse_tile.is_empty():\n # Now process fine tiles using global fine tile indices\n process_fine_tiles(coarse_tile, i, j, coarse_tile_x_min, coarse_tile_x_max, coarse_tile_y_min, coarse_tile_y_max, tile_size, path_trx_tiles, x_min, y_min, n_fine_tiles_x, n_fine_tiles_y, max_workers) \n\n\n def process_fine_tiles(coarse_tile, coarse_i, coarse_j, coarse_tile_x_min, coarse_tile_x_max, coarse_tile_y_min, coarse_tile_y_max, tile_size, path_trx_tiles, x_min, y_min, n_fine_tiles_x, n_fine_tiles_y, max_workers=8):\n\n # Use ThreadPoolExecutor for parallel processing of fine-grain tiles within the coarse tile\n with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:\n futures = []\n\n # Iterate over fine-grain tiles within the global bounds\n for fine_i in range(n_fine_tiles_x):\n fine_tile_x_min = x_min + fine_i * tile_size\n fine_tile_x_max = fine_tile_x_min + tile_size\n\n # Process only if the fine tile falls within the current coarse tile's bounds\n if not (fine_tile_x_min >= coarse_tile_x_min and fine_tile_x_max <= coarse_tile_x_max):\n continue\n\n for fine_j in range(n_fine_tiles_y):\n fine_tile_y_min = y_min + fine_j * tile_size\n fine_tile_y_max = fine_tile_y_min + tile_size\n\n # Process only if the fine tile falls within the current coarse tile's bounds\n if not (fine_tile_y_min >= coarse_tile_y_min and fine_tile_y_max <= coarse_tile_y_max):\n continue\n\n # Submit the task for each fine tile to process in parallel\n futures.append(executor.submit(\n filter_and_save_fine_tile, coarse_tile, coarse_i, coarse_j, fine_i, fine_j, \n fine_tile_x_min, fine_tile_x_max, fine_tile_y_min, fine_tile_y_max, path_trx_tiles\n ))\n\n # Wait for all futures to complete\n for future in concurrent.futures.as_completed(futures):\n future.result() # Raise exceptions if any occurred during execution\n\n\n def filter_and_save_fine_tile(coarse_tile, coarse_i, coarse_j, fine_i, fine_j, fine_tile_x_min, fine_tile_x_max, fine_tile_y_min, fine_tile_y_max, path_trx_tiles):\n\n # Filter the coarse tile for the current fine tile's boundaries\n fine_tile_trx = coarse_tile.filter(\n (pl.col(\"transformed_x\") >= fine_tile_x_min) & (pl.col(\"transformed_x\") < fine_tile_x_max) &\n (pl.col(\"transformed_y\") >= fine_tile_y_min) & (pl.col(\"transformed_y\") < fine_tile_y_max)\n )\n\n if not fine_tile_trx.is_empty():\n # Add geometry column as a list of [x, y] pairs\n fine_tile_trx = fine_tile_trx.with_columns(\n pl.concat_list([pl.col(\"transformed_x\"), pl.col(\"transformed_y\")]).alias(\"geometry\")\n ).drop(['transformed_x', 'transformed_y'])\n\n # Define the filename based on fine tile coordinates\n filename = f\"{path_trx_tiles}/transcripts_tile_{fine_i}_{fine_j}.parquet\"\n\n # Save the filtered DataFrame to a Parquet file\n fine_tile_trx.to_pandas().to_parquet(filename)\n\n\n # Load transformation matrix\n transformation_matrix = np.loadtxt(path_transformation_matrix)\n\n # Load the transcript data based on the technology using Polars\n if technology == \"MERSCOPE\":\n trx_ini = pl.read_csv(path_trx, columns=[\"gene\", \"global_x\", \"global_y\"])\n trx_ini = trx_ini.with_columns([\n pl.col(\"global_x\").alias(\"x\"),\n pl.col(\"global_y\").alias(\"y\"),\n pl.col(\"gene\").alias(\"name\")\n ]).select([\"name\", \"x\", \"y\"])\n\n elif technology == \"Xenium\":\n trx_ini = pl.read_parquet(path_trx).select([\n pl.col(\"feature_name\").alias(\"name\"),\n pl.col(\"x_location\").alias(\"x\"),\n pl.col(\"y_location\").alias(\"y\")\n ])\n\n # Process the data in chunks and apply transformations\n all_chunks = []\n\n for start_row in tqdm(range(0, trx_ini.height, chunk_size), desc=\"Processing chunks\"):\n chunk = trx_ini.slice(start_row, chunk_size)\n\n # Apply transformation matrix to the coordinates\n points = np.hstack([chunk.select([\"x\", \"y\"]).to_numpy(), np.ones((chunk.height, 1))])\n transformed_points = np.dot(points, transformation_matrix.T)[:, :2]\n\n # Create new transformed columns and drop original x, y columns\n transformed_chunk = chunk.with_columns([\n (pl.Series(transformed_points[:, 0]) * image_scale).round(2).alias(\"transformed_x\"),\n (pl.Series(transformed_points[:, 1]) * image_scale).round(2).alias(\"transformed_y\")\n ]).drop([\"x\", \"y\"])\n all_chunks.append(transformed_chunk)\n\n # Concatenate all chunks after processing\n trx = pl.concat(all_chunks)\n\n # Ensure the output directory exists\n if not os.path.exists(path_trx_tiles):\n os.makedirs(path_trx_tiles)\n\n # Get min and max x, y values\n x_min, x_max = trx.select([\n pl.col(\"transformed_x\").min().alias(\"x_min\"),\n pl.col(\"transformed_x\").max().alias(\"x_max\")\n ]).row(0)\n\n y_min, y_max = trx.select([\n pl.col(\"transformed_y\").min().alias(\"y_min\"),\n pl.col(\"transformed_y\").max().alias(\"y_max\")\n ]).row(0)\n\n # Calculate the number of fine-grain tiles globally\n n_fine_tiles_x = int(np.ceil((x_max - x_min) / tile_size))\n n_fine_tiles_y = int(np.ceil((y_max - y_min) / tile_size))\n\n # Calculate the number of coarse-grain tiles\n n_coarse_tiles_x = int(np.ceil((x_max - x_min) / (coarse_tile_factor * tile_size)))\n n_coarse_tiles_y = int(np.ceil((y_max - y_min) / (coarse_tile_factor * tile_size)))\n\n # Use ThreadPoolExecutor for parallel processing of coarse-grain tiles\n with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:\n futures = []\n for i in range(n_coarse_tiles_x):\n coarse_tile_x_min = x_min + i * (coarse_tile_factor * tile_size)\n coarse_tile_x_max = coarse_tile_x_min + (coarse_tile_factor * tile_size)\n\n for j in range(n_coarse_tiles_y):\n coarse_tile_y_min = y_min + j * (coarse_tile_factor * tile_size)\n coarse_tile_y_max = coarse_tile_y_min + (coarse_tile_factor * tile_size)\n\n # Submit each coarse tile for parallel processing\n futures.append(executor.submit(\n process_coarse_tile, trx, i, j, coarse_tile_x_min, coarse_tile_x_max, coarse_tile_y_min, coarse_tile_y_max, tile_size, path_trx_tiles, x_min, y_min, n_fine_tiles_x, n_fine_tiles_y, max_workers\n ))\n\n # Wait for all coarse tiles to complete\n for future in tqdm(concurrent.futures.as_completed(futures), desc=\"Processing coarse tiles\", unit=\"tile\"):\n future.result() # Raise exceptions if any occurred during execution\n\n # Return the tile bounds\n tile_bounds = {\n \"x_min\": x_min,\n \"x_max\": x_max,\n \"y_min\": y_min,\n \"y_max\": y_max,\n }\n\n return tile_bounds\n
reduce_image_size(image_path, scale_image=0.5, path_landscape_files='')
image_path : str Path to the image file scale_image : float (default=0.5) Scale factor for the image resize
new_image_path : str Path to the resized image file
def reduce_image_size(image_path, scale_image=0.5, path_landscape_files=\"\"):\n \"\"\"\n\n Parameters\n ----------\n image_path : str\n Path to the image file\n scale_image : float (default=0.5)\n Scale factor for the image resize\n\n Returns\n -------\n new_image_path : str\n Path to the resized image file\n \"\"\"\n\n image = pyvips.Image.new_from_file(image_path, access=\"sequential\")\n\n resized_image = image.resize(scale_image)\n\n new_image_name = image_path.split(\"/\")[-1].replace(\".tif\", \"_downsize.tif\")\n new_image_path = f\"{path_landscape_files}/{new_image_name}\"\n resized_image.write_to_file(new_image_path)\n\n return new_image_path\n
save_landscape_parameters(technology, path_landscape_files, image_name='dapi_files', tile_size=1000, image_info={}, image_format='.webp')
Save the landscape parameters to a JSON file.
def save_landscape_parameters(\n technology, path_landscape_files, image_name=\"dapi_files\", tile_size=1000, image_info={}, image_format='.webp'\n):\n \"\"\"\n Save the landscape parameters to a JSON file.\n \"\"\"\n\n path_image_pyramid = f\"{path_landscape_files}/pyramid_images/{image_name}\"\n\n print(path_image_pyramid)\n\n max_pyramid_zoom = get_max_zoom_level(path_image_pyramid)\n\n landscape_parameters = {\n \"technology\": technology,\n \"max_pyramid_zoom\": max_pyramid_zoom,\n \"tile_size\": tile_size,\n \"image_info\": image_info,\n \"image_format\": image_format\n }\n\n path_landscape_parameters = f\"{path_landscape_files}/landscape_parameters.json\"\n\n with open(path_landscape_parameters, \"w\") as file:\n json.dump(landscape_parameters, file, indent=4)\n
Landscape
Bases: AnyWidget
AnyWidget
A widget for interactive visualization of spatial omics data. This widget currently supports iST (Xenium and MERSCOPE) and sST (Visium HD data)
float
The initial x-coordinate of the view.
The initial y-coordinate of the view.
The initial zoom level of the view.
The token traitlet.
The base URL for the widget.
The name of the dataset to visualize. This will show up in the user interface bar.
Attributes:
component
The name of the component.
technology
The technology used.
The initial z-coordinate of the view.
The name of the dataset to visualize.
update_trigger
dict
The dictionary to trigger updates.
cell_clusters
The dictionary containing cell cluster information.
A widget for visualizing a 'landscape' view of spatial omics data.
src/celldega/viz/widget.py
class Landscape(anywidget.AnyWidget):\n \"\"\"\n A widget for interactive visualization of spatial omics data. This widget\n currently supports iST (Xenium and MERSCOPE) and sST (Visium HD data)\n\n Args:\n ini_x (float): The initial x-coordinate of the view.\n ini_y (float): The initial y-coordinate of the view.\n ini_zoom (float): The initial zoom level of the view.\n token (str): The token traitlet.\n base_url (str): The base URL for the widget.\n dataset_name (str, optional): The name of the dataset to visualize. This will show up in the user interface bar.\n\n Attributes:\n component (str): The name of the component.\n technology (str): The technology used.\n base_url (str): The base URL for the widget.\n token (str): The token traitlet.\n ini_x (float): The initial x-coordinate of the view.\n ini_y (float): The initial y-coordinate of the view.\n ini_z (float): The initial z-coordinate of the view.\n ini_zoom (float): The initial zoom level of the view.\n dataset_name (str): The name of the dataset to visualize.\n update_trigger (dict): The dictionary to trigger updates.\n cell_clusters (dict): The dictionary containing cell cluster information.\n\n Returns:\n Landscape: A widget for visualizing a 'landscape' view of spatial omics data.\n \"\"\"\n _esm = pathlib.Path(__file__).parent / \"../static\" / \"widget.js\"\n _css = pathlib.Path(__file__).parent / \"../static\" / \"widget.css\"\n component = traitlets.Unicode(\"Landscape\").tag(sync=True)\n\n technology = traitlets.Unicode(\"sst\").tag(sync=True)\n base_url = traitlets.Unicode(\"\").tag(sync=True)\n token = traitlets.Unicode(\"\").tag(sync=True)\n ini_x = traitlets.Float(1000).tag(sync=True)\n ini_y = traitlets.Float(1000).tag(sync=True)\n ini_z = traitlets.Float(0).tag(sync=True)\n ini_zoom = traitlets.Float(0).tag(sync=True)\n square_tile_size = traitlets.Float(1.4).tag(sync=True)\n dataset_name = traitlets.Unicode(\"\").tag(sync=True)\n region = traitlets.Dict({}).tag(sync=True)\n\n update_trigger = traitlets.Dict().tag(sync=True)\n cell_clusters = traitlets.Dict().tag(sync=True)\n\n width = traitlets.Int(0).tag(sync=True)\n height = traitlets.Int(800).tag(sync=True)\n\n def trigger_update(self, new_value):\n # This method updates the update_trigger traitlet with a new value\n # You can pass any information necessary for the update, or just a timestamp\n self.update_trigger = new_value\n\n def update_cell_clusters(self, new_clusters):\n # Convert the new_clusters to a JSON serializable format if necessary\n self.cell_clusters = new_clusters\n
Matrix
A widget for interactive visualization of a hierarchically clustered matrix.
value
The value traitlet.
The component traitlet.
The network traitlet.
click_info
The click_info traitlet.
The network dictionary.
The click_info dictionary.
A widget for visualizing a hierarchically clustered matrix.
class Matrix(anywidget.AnyWidget):\n \"\"\"\n A widget for interactive visualization of a hierarchically clustered matrix.\n\n Args:\n value (int): The value traitlet.\n component (str): The component traitlet.\n network (dict): The network traitlet.\n click_info (dict): The click_info traitlet.\n\n Attributes:\n component (str): The name of the component.\n network (dict): The network dictionary.\n click_info (dict): The click_info dictionary.\n\n Returns:\n Matrix: A widget for visualizing a hierarchically clustered matrix.\n \"\"\"\n _esm = pathlib.Path(__file__).parent / \"../static\" / \"widget.js\"\n _css = pathlib.Path(__file__).parent / \"../static\" / \"widget.css\"\n value = traitlets.Int(0).tag(sync=True)\n component = traitlets.Unicode(\"Matrix\").tag(sync=True)\n\n network = traitlets.Dict({}).tag(sync=True)\n click_info = traitlets.Dict({}).tag(sync=True)\n