diff --git a/config/config.yaml b/config/config.yaml index 3061040..3e8a1cb 100644 --- a/config/config.yaml +++ b/config/config.yaml @@ -99,3 +99,20 @@ yahs: # o: "hifiasm_p_purged_yahs" # output prefix use of this needs evaluation optional_params: "-e": "" # you can specify the restriction enzyme(s) used by the Hi-C experiment + + +# Customisable parameters for quast +quast: + t: 4 # number of threads + mem_mb: 1000 # memory in MB + optional_params: + "--fragmented": "" + "--large": "" +# "-r": "resources/reference_genomes/yeast/Saccharomyces_cerevisiae.R64-1-1.dna.toplevel.fa" +# "-g": "resources/reference_genomes/yeast/Saccharomyces_cerevisiae.R64-1-1.101.gff3" + +# Customisable parameters for busco +busco: + t: 4 # number of threads + mem_mb: 1000 # memory in MB + lineage: "resources/saccharomycetes_odb10" # lineage to be used for busco analysis diff --git a/config/config_test.yaml b/config/config_test.yaml index c4f7b1c..402bfd5 100644 --- a/config/config_test.yaml +++ b/config/config_test.yaml @@ -79,7 +79,7 @@ fcsgx: path_to_gx_db: "resources/gx_test_db/test-only" # Set this to False if you want to skip purge_dups steps: -include_purge_dups: True +include_purge_dups: False # Customisable parameters for purge_dups purge_dups: @@ -97,3 +97,19 @@ yahs: # o: "hifiasm_p_purged_yahs" # output prefix #use of this needs evaluation optional_params: "-e": "GATC" # you can specify the restriction enzyme(s) used by the Hi-C experiment + +# Customisable parameters for quast +quast: + t: 4 # number of threads + mem_mb: 1000 # memory in MB + optional_params: + "--fragmented": "" + "--large": "" +# "-r": "resources/reference_genomes/yeast/Saccharomyces_cerevisiae.R64-1-1.dna.toplevel.fa" +# "-g": "resources/reference_genomes/yeast/Saccharomyces_cerevisiae.R64-1-1.101.gff3" + +# Customisable parameters for busco +busco: + t: 4 # number of threads + mem_mb: 1000 # memory in MB + lineage: "resources/saccharomycetes_odb10" # lineage to be used for busco analysis diff --git a/workflow/Snakefile b/workflow/Snakefile index 2b37bf7..29f5f12 100644 --- a/workflow/Snakefile +++ b/workflow/Snakefile @@ -8,11 +8,12 @@ configfile: "config/config.yaml" # set {samples} global variable - used to read Hi-C files samples_in_pattern = "{sample}_1.fastq.gz" -samples = glob_wildcards(config["hic_path"] + samples_in_pattern).sample +sample, = glob_wildcards(config["hic_path"] + samples_in_pattern).sample # set {diploid_mode} global variable - used to run hifiasm_het rather than hifiasm diploid_mode=config["hifiasm"]["optional_params"]["--h1"] and config["hifiasm"]["optional_params"]["--h2"] + # Include the common rule, where the inputs for rule_all are defined include: "rules/common.smk" # Include the hifi_prep rule @@ -47,7 +48,10 @@ include: "rules/two_read_bam_combiner.smk" include: "rules/picard.smk" # Include the yahs rule include: "rules/yahs.smk" - +# Include the quast rule +include: "rules/quast.smk" +# Include the busco rule +include: "rules/busco.smk" # Define the rule all, which is the default rule that Snakemake runs when no specific rule or target is specified # input files are defined in the common.smk file diff --git a/workflow/envs/busco.yaml b/workflow/envs/busco.yaml new file mode 100644 index 0000000..1bea8c4 --- /dev/null +++ b/workflow/envs/busco.yaml @@ -0,0 +1,6 @@ +channels: + - conda-forge + - bioconda + +dependencies: + - busco=5.7.0 \ No newline at end of file diff --git a/workflow/envs/quast.yaml b/workflow/envs/quast.yaml new file mode 100644 index 0000000..dda2a1f --- /dev/null +++ b/workflow/envs/quast.yaml @@ -0,0 +1,6 @@ +channels: + - conda-forge + - bioconda + +dependencies: + - quast=5.2.0 \ No newline at end of file diff --git a/workflow/rules/busco.smk b/workflow/rules/busco.smk new file mode 100644 index 0000000..ee83669 --- /dev/null +++ b/workflow/rules/busco.smk @@ -0,0 +1,28 @@ +# rule busco for assembly QC + +rule busco: + input: + unpack(get_assemblies_QC) + output: + dir = directory("results/busco"), + params: + lineage=config["busco"]["lineage"], + labels = list(get_assemblies_QC().keys()) + threads: config["busco"]["t"] + log: + "logs/busco.log" + resources: + mem_mb=config['busco']['mem_mb'], # access memory from config + conda: + "../envs/busco.yaml" + shell: + """ + mkdir -p {output} + labels=({params.labels}) + count=0 + for v in {input} ; do + k=${{labels[$count]}} + busco -i $v -o {output}/$k -m genome -l {params.lineage} -f -c {threads} >> {log} 2>&1 + count=$(( $count + 1 )) + done + """ \ No newline at end of file diff --git a/workflow/rules/bwa_index.smk b/workflow/rules/bwa_index.smk index d42b7ca..2e03418 100644 --- a/workflow/rules/bwa_index.smk +++ b/workflow/rules/bwa_index.smk @@ -3,7 +3,6 @@ # For more info: https://github.com/ArimaGenomics/mapping_pipeline # colora doesn't contain the codes to handle technical and biological replicates of hic reads, refer to the original pipeline for that - rule bwa_index: input: get_bwa_index_inputs diff --git a/workflow/rules/bwa_mem.smk b/workflow/rules/bwa_mem.smk index 87d45e0..ae02479 100644 --- a/workflow/rules/bwa_mem.smk +++ b/workflow/rules/bwa_mem.smk @@ -8,14 +8,14 @@ rule bwa_mem: input: REF="results/bwa_index_{hap}/asm.fa", - forward_hic="results/fastp/{sample}_trim_1.fastq.gz", - reverse_hic="results/fastp/{sample}_trim_2.fastq.gz", + forward_hic="results/fastp/hic_trim_1.fastq.gz", + reverse_hic="results/fastp/hic_trim_2.fastq.gz", output: - bam1="results/arima_mapping_pipeline_{hap}/RAW_DIR/{sample}_1.bam", - bam2="results/arima_mapping_pipeline_{hap}/RAW_DIR/{sample}_2.bam", + bam1="results/arima_mapping_pipeline_{hap}/RAW_DIR/hic_vs_contigs_1.bam", + bam2="results/arima_mapping_pipeline_{hap}/RAW_DIR/hic_vs_contigs_2.bam", threads: config["arima"]["CPU"] log: - "logs/bwa_mem_{hap}_{sample}.log", + "logs/bwa_mem_{hap}.log", resources: mem_mb=config['arima']['mem_mb'], # access memory from config conda: diff --git a/workflow/rules/common.smk b/workflow/rules/common.smk index 05c1e3a..462de5d 100644 --- a/workflow/rules/common.smk +++ b/workflow/rules/common.smk @@ -4,14 +4,14 @@ import os wildcard_constraints: - hap = "primary|hap1|hap2" + hap = "primary|hap1|hap2", def get_all_inputs(wc=None): if diploid_mode: - hap= ["hap1", "hap2"] + hap = ["hap1", "hap2"] else: - hap = ["primary"] - if not samples: + hap = ["primary"] + if not sample: # Show the user a meaningful error message logger.error(f"No files matching {samples_in_pattern} were found in" f" {config.get('hic_path')}. Please check your config file.") @@ -24,6 +24,8 @@ def get_all_inputs(wc=None): "results/nanoplot/NanoPlot-report.html", # nanoplot report "results/kmc/out.hist", # output of kmc rule "results/genomescope", # output directory of genomescope + "results/quast", # output directory of quast + "results/busco", # output directory of busco ] # Get the OATK outputs @@ -37,13 +39,15 @@ def get_all_inputs(wc=None): inputs.extend(oatk_outputs) inputs.extend(expand( - "results/yahs_{hap}/asm_yahs_{sample}_scaffolds_final.fa", sample=samples, hap=hap + "results/yahs_{hap}/asm_yahs_scaffolds_final.fa", hap=hap )) # final scaffolded assembly # If not in diploid mode, add asm.alternate.fa as an input if not diploid_mode and config["include_purge_dups"] == True: inputs.append("results/purge_dups_alt/asm.alternate_purged.fa") +# inputs.extend(expand("results/busco/{tool}_{hap}_busco", tool=tool, hap=hap)) + return inputs @@ -82,3 +86,17 @@ def get_bwa_index_inputs(wildcards): return f"results/hifiasm/asm.{hap}.fa" +def get_assemblies_QC(wildcards=None): + if diploid_mode: + haps = ["hap1", "hap2"] + else: + haps = ["primary"] + results = {} + for hap in haps: + results[f"asm_{hap}"] = f"results/assemblies/asm_{hap}.fa" + results[f"yahs_{hap}"] = f"results/assemblies/yahs_{hap}.fa" + if config["include_purge_dups"]: + results[f"purged_{hap}"] = f"results/assemblies/purged_{hap}.fa" + if config["include_fcsgx"]: + results[f"fcsgx_{hap}"] = f"results/assemblies/fcsgx_{hap}.fa" + return results \ No newline at end of file diff --git a/workflow/rules/fastp.smk b/workflow/rules/fastp.smk index 05949f6..c4be9ee 100644 --- a/workflow/rules/fastp.smk +++ b/workflow/rules/fastp.smk @@ -5,28 +5,20 @@ import glob rule fastp: input: - forward_in=expand( - "{hic_path}{sample}_1.fastq.gz", - hic_path=config["hic_path"], - sample=glob_wildcards(config["hic_path"] + "{sample}_1.fastq.gz").sample, - ), - reverse_in=expand( - "{hic_path}{sample}_2.fastq.gz", - hic_path=config["hic_path"], - sample=glob_wildcards(config["hic_path"] + "{sample}_2.fastq.gz").sample, - ), + forward_in=f"{config["hic_path"]}{sample}_1.fastq.gz", + reverse_in=f"{config["hic_path"]}{sample}_2.fastq.gz", output: - forward_out="results/fastp/{sample}_trim_1.fastq.gz", - reverse_out="results/fastp/{sample}_trim_2.fastq.gz", - json="results/fastp/{sample}_report_fastp.HiC.json", - html="results/fastp/{sample}_report_fastp.HiC.html", + forward_out="results/fastp/hic_trim_1.fastq.gz", + reverse_out="results/fastp/hic_trim_2.fastq.gz", + json="results/fastp/hic_report_fastp.HiC.json", + html="results/fastp/hic_report_fastp.HiC.html", params: optional_params=" ".join( f"{k} {v}" for k, v in config["fastp"]["optional_params"].items() if v ), threads: config["fastp"]["t"] log: - "logs/{sample}_fastp.log", + "logs/fastp.log", resources: mem_mb=config['fastp']['mem_mb'], # access memory from config conda: diff --git a/workflow/rules/filter_five_end.smk b/workflow/rules/filter_five_end.smk index 7a893bd..514a78d 100644 --- a/workflow/rules/filter_five_end.smk +++ b/workflow/rules/filter_five_end.smk @@ -6,13 +6,13 @@ rule fiter_five_end: input: - bam1="results/arima_mapping_pipeline_{hap}/RAW_DIR/{sample}_1.bam", - bam2="results/arima_mapping_pipeline_{hap}/RAW_DIR/{sample}_2.bam", + bam1="results/arima_mapping_pipeline_{hap}/RAW_DIR/hic_vs_contigs_1.bam", + bam2="results/arima_mapping_pipeline_{hap}/RAW_DIR/hic_vs_contigs_2.bam", output: - bam1_filt="results/arima_mapping_pipeline_{hap}/FILT_DIR/{sample}_1.bam", - bam2_filt="results/arima_mapping_pipeline_{hap}/FILT_DIR/{sample}_2.bam", + bam1_filt="results/arima_mapping_pipeline_{hap}/FILT_DIR/hic_vs_contigs_filt_1.bam", + bam2_filt="results/arima_mapping_pipeline_{hap}/FILT_DIR/hic_vs_contigs_filt_2.bam", log: - "logs/fiter_five_end_{hap}_{sample}.log", + "logs/fiter_five_end_{hap}.log", resources: mem_mb=config['arima']['mem_mb'], # access memory from config conda: diff --git a/workflow/rules/get_assemblies.smk b/workflow/rules/get_assemblies.smk new file mode 100644 index 0000000..58eb087 --- /dev/null +++ b/workflow/rules/get_assemblies.smk @@ -0,0 +1,15 @@ +include: "common.smk" + +rule get_assemblies: + output: + directory("results/assemblies") + log: + "logs/get_assemblies.log", + resources: + mem_mb=config['arima']['mem_mb'], # access memory from config + conda: + "../envs/basic.yaml" + shell: + """ + mkdir -p {output} + """ \ No newline at end of file diff --git a/workflow/rules/hifiasm.smk b/workflow/rules/hifiasm.smk index d838c06..1ce0147 100644 --- a/workflow/rules/hifiasm.smk +++ b/workflow/rules/hifiasm.smk @@ -4,12 +4,13 @@ rule hifiasm: input: - "results/reads/hifi/hifi.fastq.gz", + reads = "results/reads/hifi/hifi.fastq.gz", output: gfa="results/hifiasm/asm.primary.gfa", gfa_alt="results/hifiasm/asm.alternate.gfa", fasta="results/hifiasm/asm.primary.fa", fasta_alt="results/hifiasm/asm.alternate.fa", + link_primary = "results/assemblies/asm_primary.fa", threads: config["hifiasm"]["t"] log: "logs/hifiasm.log", @@ -23,22 +24,28 @@ rule hifiasm: ), shell: """ - hifiasm {input} -t {threads} -o results/hifiasm/asm --primary {params.optional_params} >> {log} 2>&1 + hifiasm {input.reads} -t {threads} -o results/hifiasm/asm --primary {params.optional_params} >> {log} 2>&1 mv results/hifiasm/asm.p_ctg.gfa {output.gfa} mv results/hifiasm/asm.a_ctg.gfa {output.gfa_alt} awk -f scripts/gfa_to_fasta.awk < {output.gfa} > {output.fasta} awk -f scripts/gfa_to_fasta.awk < {output.gfa_alt} > {output.fasta_alt} + + # all the assemblies produced by the workflow will be symlinked to results/assemblies + + ln -srn {output.fasta} {output.link_primary} """ rule hifiasm_het: input: - "results/reads/hifi/hifi.fastq.gz", + reads = "results/reads/hifi/hifi.fastq.gz", output: - gfa="results/hifiasm/asm.hap1.gfa", - gfa_alt="results/hifiasm/asm.hap2.gfa", - fasta="results/hifiasm/asm.hap1.fa", - fasta_alt="results/hifiasm/asm.hap2.fa", + gfa_hap1="results/hifiasm/asm.hap1.gfa", + gfa_hap2="results/hifiasm/asm.hap2.gfa", + fasta_hap1="results/hifiasm/asm.hap1.fa", + fasta_hap2="results/hifiasm/asm.hap2.fa", + link_hap1 = "results/assemblies/asm_hap1.fa", + link_hap2 = "results/assemblies/asm_hap2.fa" threads: config["hifiasm"]["t"] log: "logs/hifiasm.log", @@ -52,9 +59,14 @@ rule hifiasm_het: ), shell: """ - hifiasm {input} -t {threads} -o results/hifiasm/asm {params.optional_params} >> {log} 2>&1 - mv results/hifiasm/asm.hic.hap1.p_ctg.gfa {output.gfa} - mv results/hifiasm/asm.hic.hap2.p_ctg.gfa {output.gfa_alt} - awk -f scripts/gfa_to_fasta.awk < {output.gfa} > {output.fasta} - awk -f scripts/gfa_to_fasta.awk < {output.gfa_alt} > {output.fasta_alt} + hifiasm {input.reads} -t {threads} -o results/hifiasm/asm {params.optional_params} >> {log} 2>&1 + mv results/hifiasm/asm.hic.hap1.p_ctg.gfa {output.gfa_hap1} + mv results/hifiasm/asm.hic.hap2.p_ctg.gfa {output.gfa_hap2} + awk -f scripts/gfa_to_fasta.awk < {output.gfa_hap1} > {output.fasta_hap1} + awk -f scripts/gfa_to_fasta.awk < {output.gfa_hap2} > {output.fasta_hap2} + + # all the assemblies produced by the workflow will be symlinked to results/assemblies + + ln -srn {output.fasta_hap1} {output.link_hap1} + ln -srn {output.fasta_hap2} {output.link_hap2} """ diff --git a/workflow/rules/ncbi_fcsgx.smk b/workflow/rules/ncbi_fcsgx.smk index 6d9e203..14b5478 100644 --- a/workflow/rules/ncbi_fcsgx.smk +++ b/workflow/rules/ncbi_fcsgx.smk @@ -3,13 +3,14 @@ rule fcsgx: input: - fasta="results/hifiasm/asm.{hap}.fa", + fasta = "results/hifiasm/asm.{hap}.fa", params: ncbi_tax_id=config["fcsgx"]["ncbi_tax_id"], path_to_gx_db=config["fcsgx"]["path_to_gx_db"], output: clean_fasta="results/ncbi_fcsgx_{hap}/asm_clean.fa", - contaminants="results/ncbi_fcsgx_{hap}/contaminants.fa" + contaminants="results/ncbi_fcsgx_{hap}/contaminants.fa", + link="results/assemblies/fcsgx_{hap}.fa" threads: config["hifiasm"]["t"] log: "logs/fcsgx_{hap}.log", @@ -21,4 +22,5 @@ rule fcsgx: """ run_gx.py --fasta {input.fasta} --out-dir results/ncbi_fcsgx_{wildcards.hap}/out --gx-db {params.path_to_gx_db} --tax-id {params.ncbi_tax_id} --debug >> {log} 2>&1 cat {input.fasta} | gx clean-genome --action-report results/ncbi_fcsgx_{wildcards.hap}/out/asm.{wildcards.hap}.{params.ncbi_tax_id}.fcs_gx_report.txt --output {output.clean_fasta} --contam-fasta-out {output.contaminants} >> {log} 2>&1 + ln -srn {output.clean_fasta} {output.link} """ diff --git a/workflow/rules/oatk.smk b/workflow/rules/oatk.smk index e039749..5be3626 100644 --- a/workflow/rules/oatk.smk +++ b/workflow/rules/oatk.smk @@ -1,5 +1,5 @@ # This rule runs oatk to extract organelles reads from the hifi reads. - +# get_oatk_outputs is used to dynamically decide the outputs of oatk rule oatk: input: diff --git a/workflow/rules/picard.smk b/workflow/rules/picard.smk index abb54d0..2004b4b 100644 --- a/workflow/rules/picard.smk +++ b/workflow/rules/picard.smk @@ -2,18 +2,21 @@ # This pipeline is currently built for processing a single sample with one read1 and read2 FASTQ file. # For more info: https://github.com/ArimaGenomics/mapping_pipeline # colora doesn't contain the codes to handle technical and biological replicates of hic reads, refer to the original pipeline for that +# The final files will be in REP_DIR to be consistent with the original pipeline but they will be marked with "final" and not with "rep1" rule picard: input: - tmp_bam="results/arima_mapping_pipeline_{hap}/TMP_DIR/{sample}.bam", + tmp_bam="results/arima_mapping_pipeline_{hap}/TMP_DIR/hic_vs_contigs_filt_paired.bam", output: - bam_paired="results/arima_mapping_pipeline_{hap}/PAIR_DIR/{sample}.bam", - mark_dup="results/arima_mapping_pipeline_{hap}/REP_DIR/{sample}_rep1.bam", - metrics="results/arima_mapping_pipeline_{hap}/REP_DIR/metrics.{sample}_rep1.txt", - stats="results/arima_mapping_pipeline_{hap}/REP_DIR/{sample}_rep1.bam.stats", + bam_add_read_group="results/arima_mapping_pipeline_{hap}/PAIR_DIR/paired_add_read_group.bam", + mark_dup="results/arima_mapping_pipeline_{hap}/REP_DIR/paired_mark_dups_final.bam", + metrics="results/arima_mapping_pipeline_{hap}/REP_DIR/metrics.final.txt", + stats="results/arima_mapping_pipeline_{hap}/REP_DIR/final.bam.stats", + params: + hic_sample_name=sample log: - "logs/picard_{hap}_{sample}.log", + "logs/picard_{hap}.log", resources: mem_mb=config['arima']['mem_mb'], # access memory from config conda: @@ -21,8 +24,8 @@ rule picard: shell: """ PICARD=${{CONDA_PREFIX}}/share/picard-*/picard.jar - java -Xmx4G -Djava.io.tmpdir=temp/ -jar ${{PICARD}} AddOrReplaceReadGroups INPUT={input.tmp_bam} OUTPUT={output.bam_paired} ID={wildcards.sample} LB={wildcards.sample} SM={wildcards.sample} PL=ILLUMINA PU=none - java -Xmx30G -XX:-UseGCOverheadLimit -Djava.io.tmpdir=temp/ -jar ${{PICARD}} MarkDuplicates INPUT={output.bam_paired} OUTPUT={output.mark_dup} METRICS_FILE={output.metrics} TMP_DIR=results/arima_mapping_pipeline_{wildcards.hap}/TMP_DIR ASSUME_SORTED=TRUE VALIDATION_STRINGENCY=LENIENT REMOVE_DUPLICATES=TRUE + java -Xmx4G -Djava.io.tmpdir=temp/ -jar ${{PICARD}} AddOrReplaceReadGroups INPUT={input.tmp_bam} OUTPUT={output.bam_add_read_group} ID={params.hic_sample_name} LB={params.hic_sample_name} SM={params.hic_sample_name} PL=ILLUMINA PU=none + java -Xmx30G -XX:-UseGCOverheadLimit -Djava.io.tmpdir=temp/ -jar ${{PICARD}} MarkDuplicates INPUT={output.bam_add_read_group} OUTPUT={output.mark_dup} METRICS_FILE={output.metrics} TMP_DIR=results/arima_mapping_pipeline_{wildcards.hap}/TMP_DIR ASSUME_SORTED=TRUE VALIDATION_STRINGENCY=LENIENT REMOVE_DUPLICATES=TRUE samtools index {output.mark_dup} perl scripts/get_stats.pl {output.mark_dup} > {output.stats} """ diff --git a/workflow/rules/purge_dups.smk b/workflow/rules/purge_dups.smk index a36134c..df6ce50 100644 --- a/workflow/rules/purge_dups.smk +++ b/workflow/rules/purge_dups.smk @@ -2,8 +2,8 @@ rule purge_dups: input: - reads="results/reads/hifi/hifi.fastq.gz", - fasta=get_purge_dups_inputs(), + reads = "results/reads/hifi/hifi.fastq.gz", + fasta = get_purge_dups_inputs(), output: paf="results/purge_dups/hifi_vs_primary_contigs.paf.gz", calcuts_log="results/purge_dups/calcuts.log", @@ -18,6 +18,7 @@ rule purge_dups: hap_fa="results/purge_dups/hap.fa", purged_fasta="results/purge_dups/asm.primary_purged.fa", hist_plot="results/purge_dups/hist.out.png", + link = "results/assemblies/purged_primary.fa", threads: config["minimap2"]["t"] log: "logs/purge_dups.log", @@ -50,4 +51,6 @@ rule purge_dups: mv hap.fa {output.hap_fa} mv purged.fa {output.purged_fasta} mv hist.out.png {output.hist_plot} + + ln -srn {output.purged_fasta} {output.link} """ diff --git a/workflow/rules/quast.smk b/workflow/rules/quast.smk new file mode 100644 index 0000000..9d71b5d --- /dev/null +++ b/workflow/rules/quast.smk @@ -0,0 +1,23 @@ +# rule quast for assembly QC + +rule quast: + input: + unpack(get_assemblies_QC) + output: + directory("results/quast"), + threads: config["quast"]["t"] + log: + "logs/quast.log", + resources: + mem_mb=config['quast']['mem_mb'], # access memory from config + conda: + "../envs/quast.yaml" + params: + optional_params=" ".join( + f"{k} {v}" for k, v in config["quast"]["optional_params"].items() if v + ), + labels = ",".join(get_assemblies_QC().keys()) + shell: + """ + quast {input} -l {params.labels} -o {output} -t {threads} {params.optional_params} >> {log} 2>&1 + """ diff --git a/workflow/rules/two_read_bam_combiner.smk b/workflow/rules/two_read_bam_combiner.smk index 518536c..99f85e4 100644 --- a/workflow/rules/two_read_bam_combiner.smk +++ b/workflow/rules/two_read_bam_combiner.smk @@ -7,16 +7,16 @@ rule two_read_bam_combiner: input: - bam1_filt="results/arima_mapping_pipeline_{hap}/FILT_DIR/{sample}_1.bam", - bam2_filt="results/arima_mapping_pipeline_{hap}/FILT_DIR/{sample}_2.bam", + bam1_filt="results/arima_mapping_pipeline_{hap}/FILT_DIR/hic_vs_contigs_filt_1.bam", + bam2_filt="results/arima_mapping_pipeline_{hap}/FILT_DIR/hic_vs_contigs_filt_2.bam", REF="results/bwa_index_{hap}/asm.fa", output: - tmp_bam="results/arima_mapping_pipeline_{hap}/TMP_DIR/{sample}.bam", + tmp_bam="results/arima_mapping_pipeline_{hap}/TMP_DIR/hic_vs_contigs_filt_paired.bam", threads: config["arima"]["CPU"] params: MAPQ_FILTER=config["arima"]["MAPQ_FILTER"], log: - "logs/two_read_bam_combiner_{hap}_{sample}.log", + "logs/two_read_bam_combiner_{hap}.log", resources: mem_mb=config['arima']['mem_mb'], # access memory from config conda: diff --git a/workflow/rules/yahs.smk b/workflow/rules/yahs.smk index a9f9917..db057e6 100644 --- a/workflow/rules/yahs.smk +++ b/workflow/rules/yahs.smk @@ -5,11 +5,12 @@ import glob rule yahs: input: REF="results/bwa_index_{hap}/asm.fa", - bam="results/arima_mapping_pipeline_{hap}/REP_DIR/{sample}_rep1.bam", + bam="results/arima_mapping_pipeline_{hap}/REP_DIR/paired_mark_dups_final.bam", output: - scaffolds="results/yahs_{hap}/asm_yahs_{sample}_scaffolds_final.fa", + scaffolds="results/yahs_{hap}/asm_yahs_scaffolds_final.fa", + link="results/assemblies/yahs_{hap}.fa" log: - "logs/yahs_{hap}_{sample}.log", + "logs/yahs_{hap}.log", resources: mem_mb=config['arima']['mem_mb'], # access memory from config conda: @@ -20,5 +21,6 @@ rule yahs: ), shell: """ - yahs {input.REF} {input.bam} -o results/yahs_{wildcards.hap}/asm_yahs_{wildcards.sample} {params.optional_params} >> {log} 2>&1 + yahs {input.REF} {input.bam} -o results/yahs_{wildcards.hap}/asm_yahs {params.optional_params} >> {log} 2>&1 + ln -srn {output.scaffolds} {output.link} """ diff --git a/workflow/rules/yahs_completed.smk b/workflow/rules/yahs_completed.smk new file mode 100644 index 0000000..0d61b69 --- /dev/null +++ b/workflow/rules/yahs_completed.smk @@ -0,0 +1,15 @@ +# add a checkpoint to run the QC after all the assemblies have been symlinked to results/assemblies + +checkpoint yahs_done: + input: + get_yahs_output() + output: + completion_marker = "results/yahs_completed.txt" + log: + "logs/yahs_completed.log", + conda: + "../envs/basic.yaml" + shell: + """ + ls {input} > {output.completion_marker} + """ \ No newline at end of file