From 8169f0d9a0a5a9e42293a352af8d0d0c98e28994 Mon Sep 17 00:00:00 2001 From: Julia820 Date: Tue, 26 Mar 2024 13:36:58 +0100 Subject: [PATCH] improved consensus tutorial --- .../code/create-consensus-peaks-python.md | 7925 +++++++++++++++++ .../create-consensus-peaks-python.ipynb | 28 +- .../tutorials/create-consensus-peaks.md | 36 +- mkdocs.yml | 4 +- 4 files changed, 7976 insertions(+), 17 deletions(-) create mode 100644 docs/geniml/code/create-consensus-peaks-python.md diff --git a/docs/geniml/code/create-consensus-peaks-python.md b/docs/geniml/code/create-consensus-peaks-python.md new file mode 100644 index 0000000..54345c3 --- /dev/null +++ b/docs/geniml/code/create-consensus-peaks-python.md @@ -0,0 +1,7925 @@ + + + + + +create-consensus-peaks-python + + + + + + + + + + + + +
+ + + + + + + + + + +
+ + diff --git a/docs/geniml/notebooks/create-consensus-peaks-python.ipynb b/docs/geniml/notebooks/create-consensus-peaks-python.ipynb index fc55a5d..0b7a405 100644 --- a/docs/geniml/notebooks/create-consensus-peaks-python.ipynb +++ b/docs/geniml/notebooks/create-consensus-peaks-python.ipynb @@ -8,7 +8,7 @@ "# How to build a new universe?\n", "\n", "## Data preprocessing\n", - "This is a jupyter version of CLI tutorial that can be found [here](). You will use here python functions insted of CLI to build and assess diffrent universe. Fielse that you will use here can be downlodead from XXX. In there you will find a compressed folder:\n", + "This is a jupyter version of CLI tutorial that can be found [here](../tutorials/create-consensus-peaks.md). You will use here python functions insted of CLI to build and assess diffrent universe. Fielse that you will use here can be downlodead from XXX. In there you will find a compressed folder:\n", "\n", "```\n", "consensus:\n", @@ -23,7 +23,7 @@ "\n", "In the raw folder there are example BED files used in this tutorial and file withe names of files we will analyze.\n", "\n", - "It assummes that you alread have files of the genome coverage by the analzed colletion. The example of how to creat them can be found [here]()." + "It assummes that you alread have files of the genome coverage by the analzed colletion. The example of how to creat them can be found [here](../tutorials/create-consensus-peaks.md)." ] }, { @@ -332,7 +332,7 @@ "id": "ece5e3df-647f-46ab-bd95-4add00ebdfd5", "metadata": {}, "source": [ - "Additionaly using `get_rbs_from_assessment_file` and `get_f_10_score_from_assessment_file` you can calculate the assessment metrics from prevously calaculated assessment file.`" + "In CLI version of this [tutorial](../tutorials/create-consensus-peaks.md) it was shown how to calculate an assessment file with all the metrics. This file can be further summarized into specific metrics assessing the fit of a universe to a whole collection. " ] }, { @@ -341,7 +341,27 @@ "id": "462ffa69-3867-42b3-84f9-0cbe394dbd20", "metadata": {}, "outputs": [], - "source": [] + "source": [ + "from geniml.assess.assess import get_rbs_from_assessment_file, get_f_10_score_from_assessment_file\n", + "import pandas as pd\n", + "\n", + "assessment_file_path = \"test_assess_data.csv\"\n", + "df = pd.read(assessment_file_path)\n", + "df.head()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4f9f3a13", + "metadata": {}, + "outputs": [], + "source": [ + "rbs = get_rbs_from_assessment_file(assessment_file_path)\n", + "f_10 = get_f_10_score_from_assessment_file(assessment_file_path)\n", + "rbs_flex = get_f_10_score_from_assessment_file(assessment_file_path, flexible=True)\n", + "f\"Universe\\nF10: {f_10:.2f}\\nRBS: {rbs:.2f}\\nflexible RBS: {rbs_flex:.2f}\"" + ] } ], "metadata": { diff --git a/docs/geniml/tutorials/create-consensus-peaks.md b/docs/geniml/tutorials/create-consensus-peaks.md index f4a802a..b45c9a9 100644 --- a/docs/geniml/tutorials/create-consensus-peaks.md +++ b/docs/geniml/tutorials/create-consensus-peaks.md @@ -1,16 +1,20 @@ # How to build a new universe? ## Data preprocessing -In this tutorial, you will use CLI to build different types of universes from example files, which can be downloaded from XXX. In there you will find a compressed folder: +In this tutorial, you will use CLI of geniml package to build different types of universes from example files, which can be downloaded from XXX. In there you will find a compressed folder: ``` consensus: - raw + test_1.bed + test_2.bed + test_3.bed + test_4.bed file_list.txt chrom.sizes ``` -In the raw folder there are example BED files used in this tutorial and file withe names of files we will analyze. Additionally there is a file with chromosome sizes, which you will use to preprocess the data. +In the raw folder there are example BED files used in this tutorial and in file_list.txt are names of files we will analyze. Additionally there is a file with chromosome sizes, which you will use to preprocess the data. To build any kind of a universe you need bigWig files with genome coverage by the analyzed collection, which can be made it using [uniwig](https://github.com/databio/uniwig/). First we have to combine all the analyzed files into one BED file: @@ -18,12 +22,14 @@ To build any kind of a universe you need bigWig files with genome coverage by th cat raw/* > raw/combined_files.bed ``` -This combined file can next be used to prepare the genome coverage tracks, with smoothing of breakpoints set to 5: +This combined file can next be used to prepare the genome coverage tracks, with window size for smoothing of breakpoints set to 25: ``` $UNIWIG_PATH/bin/uniwig -m 25 raw/combined_files.bed chrom.sizes coverage/all ``` +This will create three files: `coverage/all_start.bw`, `coverage/all_core.bw`, `coverage/all_end.bw`, with coverage of the genome by regions' starts, regions and regions' ends respectively. Those files can be loaded into Genomic Viewer for visualization. + ## Coverage cutoff universe First, you will create a coverage cutoff universe (CC). This is the simplest type of a universe that only includes genomic positions with coverage greater or equal to cutoff *x*. This cutoff by default is calculated using simple likelihood model that calculates the probability of appearing in a collection. The universe can be build just based on genome coverage: @@ -36,10 +42,10 @@ geniml build-universe cc --coverage-folder coverage/ \ Depending on the task the universe can be smooth by setting `--merge` flag with the distance beloved witch peaks should be merged together and -`--filter-size` with minimum size of peak that should be part of the universe. Instead of it using maximum likelihood cutoff one can also defined cutoff with `--cutoff` flag. If it is set to 1 the result is union universe, and when to number of files it wil produce intersection universe. +`--filter-size` with minimum size of peak that should be part of the universe. Instead of it using maximum likelihood cutoff one can also defined cutoff with `--cutoff` flag. If it is set to 1 the result is union universe, and when to number of analyzed files it wil produce intersection universe. ## Coverage cutoff flexible universe -A more complex version of coverage cutoff universe is coverage cutoff flexible universe (CCF). In contrast to its' fixed version it produces flexible universes. It uses two cutoffs calculated based on maximum likelihood cutoff, making a confidence interval around the optimal cutoff value. Despite the fact that the CFF universe is more complex it is build using the same input as the CC universe: +A more complex version of coverage cutoff universe is coverage cutoff flexible universe (CCF). In contrast to its' fixed version it produces flexible universe. It builds confidence interval around the maximum likelihood cutoff. This results in two values one for the cutoff for boundaries, and the other one for the region core. Despite the fact that the CFF universe is more complex it is build using the same input as the CC universe: ``` geniml build-universe ccf --coverage-folder coverage/ \ @@ -48,7 +54,7 @@ geniml build-universe ccf --coverage-folder coverage/ \ ``` ## Maximum likelihood universe -In the previous examples both CC anf CCF universes used simple likelihood model to calculate the cutoff. However, we also developed more complex likelihood model that takes into account the positions of starts and ends of the regions in the collection. This LH model can build based on coverage files: +In the previous examples both CC anf CCF universes used simple likelihood model to calculate the cutoff. However, we also developed more complex likelihood model that takes into account the positions of starts and ends of the regions in the collection. This LH model can build based on coverage files and number of analyzed files: ``` geniml lh build_model --model-file model.tar \ @@ -56,7 +62,7 @@ geniml lh build_model --model-file model.tar \ --file-no `wc -l file_list.txt` ``` - The resulting tar archiver contains LH model that can be used for building flexible universes called a maximum likelihood universe (ML): +The resulting tar archiver contains LH model. This model can be used as a scoring function that assigns to each position probability of it being a start, core or end. It can be both used for universe assessment and universe building. Combination of LH model and optimization algorithm is for building flexible universes called a maximum likelihood universe (ML): ``` geniml build-universe ml --model-file model.tar \ @@ -65,7 +71,7 @@ geniml build-universe ml --model-file model.tar \ ``` ## HMM -The forth presented method of creating universes utilizes Hidden Markov Models. In this approach the parts of flexible regions are hidden states of the model, while genome coverage by the collections are emissions. The resulting universe is called Hidden Markov Model universe. It can be build only based on the genome coverage by the collection: +The forth presented method of creating universes utilizes Hidden Markov Models (HMM). In this approach the parts of flexible regions are hidden states of the model, while genome coverage by the collections are emissions. The resulting universe is called Hidden Markov Model universe. It can be build only based on the genome coverage by the collection: ``` geniml build-universe hmm --coverage-folder coverage/ \ @@ -75,7 +81,15 @@ geniml build-universe hmm --coverage-folder coverage/ \ # How to assess new universe? -So far you used many different methods for creating new universes. But choosing, which universe represents data the best can be challenging. To help with this decision we created three different metrics for assessing universe fit to the region collections: a base-level overlap score, a region boundary score, and a likelihood score. The two first metrics can be calculated separately for each file in the collections and than summarized. To calculate them you need raw files as well as the analyzed universe. It is also necessary to choose at least one metric out of : `--overlap`, `--distance`, `--distance-universe-to-file`, `--distance-flexible`, `--distance-flexible-universe-to-file` to be calculated. Here we present an example, which calculates all possible metrics for HMM universe: +So far you used many different methods for creating new universes. But choosing, which universe represents data the best can be challenging. To help with this decision we created three different metrics for assessing universe fit to the region collections: a base-level overlap score, a region boundary score, and a likelihood score. The two first metrics can be calculated separately for each file in the collections and than summarized. To calculate them you need raw files as well as the analyzed universe. It is also necessary to choose at least one assessment metric to be calculated: + +* `--overlap` - to calculate base pair overlap between universe and regions in the file, number of base pair in only the universe, number of base pair in only the file, which can be used to calculate F10 score; +* `--distance` - to calculate median of distance form regions in the raw file to the universe; +* `--distance-universe-to-file` - to calculate median of distance form the universe to regions in the raw file; +* `--distance-flexible` - to calculate median of distance form regions in the raw file to the universe taking into account universe flexibility; +* `--distance-flexible-universe-to-file` - - to calculate median of distance form the universe to regions in the raw file taking into account universe flexibility. + +Here we present an example, which calculates all possible metrics for HMM universe: ``` geniml assess-universe --raw-data-folder raw/ \ @@ -89,5 +103,5 @@ So far you used many different methods for creating new universes. But choosing, --distance-flexible \ --distance-flexible-universe-to-file ``` -The resulting file is called test_assess_data.csv, and contains seven columns with the raw calculated metrics for each file. -More information about assessing fit of universe to a collection of files can be found in jupyter notebook version of this tutorial tha can be found [here](). \ No newline at end of file +The resulting file is called test_assess_data.csv, and contains columns with the raw calculated metrics for each file: *file*, *univers/file*, *file/universe*, *universe&file*, *median_dist_file_to_universe*, *median_dist_file_to_universe_flex*, *median_dist_universe_to_file*, *median_dist_universe_to_file_flex*. +More information about assessing fit of universe to a collection of files can be found in jupyter notebook version of this tutorial tha can be found [here](../code/create-consensus-peaks-python.md). \ No newline at end of file diff --git a/mkdocs.yml b/mkdocs.yml index c1660f6..a480264 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -98,8 +98,8 @@ nav: - Populate a vector store: geniml/tutorials/load-qdrant-with-cell-embeddings.md - Predict cell-types using KNN: geniml/tutorials/cell-type-annotation-with-knn.md - Evaluate embeddings: geniml/tutorials/evaluation.md - - Create consensus peaks: geniml/tutorials/create-consensus-peaks.md - - Assess universe fit: geniml/tutorials/assess-universe.md + - Create consensus peaks with CLI: geniml/tutorials/create-consensus-peaks.md + - Create consensus peaks with python: geniml/code/create-consensus-peaks-python.md - Fine-tune embeddings: geniml/tutorials/fine-tune-region2vec-model.md - Randomize bed files: geniml/tutorials/bedshift.md - Create evaluation dataset with bedshift: geniml/tutorials/bedshift-evaluation-guide.md