diff --git a/.gitignore b/.gitignore index 3cecd390f..c04f56b69 100644 --- a/.gitignore +++ b/.gitignore @@ -92,6 +92,8 @@ nbgrader/docs/source/user_guide/release/ps1/problem2.html nbgrader/docs/source/user_guide/source/header.html nbgrader/docs/source/user_guide/source/ps1/problem1.html nbgrader/docs/source/user_guide/source/ps1/problem2.html +nbgrader/docs/source/user_guide/source/ps1_autotest/problem1.html +nbgrader/docs/source/user_guide/source/ps1_autotest/problem2.html # components stuff node_modules diff --git a/nbgrader/apps/generateassignmentapp.py b/nbgrader/apps/generateassignmentapp.py index 7acf78974..b46a7a96a 100644 --- a/nbgrader/apps/generateassignmentapp.py +++ b/nbgrader/apps/generateassignmentapp.py @@ -2,10 +2,11 @@ import sys -from traitlets import default +from traitlets import default, Bool +from textwrap import dedent from .baseapp import NbGrader, nbgrader_aliases, nbgrader_flags -from ..converters import BaseConverter, GenerateAssignment, NbGraderException +from ..converters import BaseConverter, GenerateAssignment, NbGraderException, GenerateSourceWithTests from traitlets.traitlets import MetaHasTraits from typing import List, Any from traitlets.config.loader import Config @@ -51,6 +52,12 @@ {'BaseConverter': {'force': True}}, "Overwrite an assignment/submission if it already exists." ), + 'source_with_tests': ( + {'GenerateAssignmentApp': {'source_with_tests': True}}, + "Generate intermediate notebooks that contain both the autogenerated test code and the solutions. " + "Results will be saved in the source_with_tests/ folder. " + "This is useful for instructors to debug problematic autogenerated test code." + ), }) @@ -62,6 +69,17 @@ class GenerateAssignmentApp(NbGrader): aliases = aliases flags = flags + source_with_tests = Bool( + False, + help=dedent( + """ + Generate intermediate notebooks that contain both the autogenerated test code and the solutions. + Results will be saved in the source_with_tests/ folder. + This is useful for instructors to debug issues in autogenerated test code. + """ + ) + ).tag(config=True) + examples = """ Produce the version of the assignment that is intended to be released to students. This performs several modifications to the original assignment: @@ -112,7 +130,7 @@ class GenerateAssignmentApp(NbGrader): @default("classes") def _classes_default(self) -> List[MetaHasTraits]: classes = super(GenerateAssignmentApp, self)._classes_default() - classes.extend([BaseConverter, GenerateAssignment]) + classes.extend([BaseConverter, GenerateAssignment, GenerateSourceWithTests]) return classes def _load_config(self, cfg: Config, **kwargs: Any) -> None: @@ -141,6 +159,14 @@ def start(self) -> None: elif len(self.extra_args) == 1: self.coursedir.assignment_id = self.extra_args[0] + + if self.source_with_tests: + converter = GenerateSourceWithTests(coursedir=self.coursedir, parent=self) + try: + converter.start() + except NbGraderException: + sys.exit(1) + converter = GenerateAssignment(coursedir=self.coursedir, parent=self) try: converter.start() diff --git a/nbgrader/apps/quickstartapp.py b/nbgrader/apps/quickstartapp.py index 20512b072..c477f3091 100644 --- a/nbgrader/apps/quickstartapp.py +++ b/nbgrader/apps/quickstartapp.py @@ -40,6 +40,15 @@ """ ) ), + 'autotest': ( + {'QuickStartApp': {'autotest': True}}, + dedent( + """ + Create notebook assignments that have examples of automatic test generation via + ### AUTOTEST and ### HASHED AUTOTEST statements. + """ + ) + ), } class QuickStartApp(NbGrader): @@ -73,6 +82,8 @@ class QuickStartApp(NbGrader): force = Bool(False, help="Whether to overwrite existing files").tag(config=True) + autotest = Bool(False, help="Whether to use automatic test generation in example files").tag(config=True) + @default("classes") def _classes_default(self): classes = super(QuickStartApp, self)._classes_default() @@ -115,12 +126,20 @@ def start(self): if not os.path.isdir(course_path): os.mkdir(course_path) - # populating it with an example + # populate it with an example self.log.info("Copying example from the user guide...") example = os.path.abspath(os.path.join( os.path.dirname(__file__), '..', 'docs', 'source', 'user_guide', 'source')) - ignore_html = shutil.ignore_patterns("*.html") - shutil.copytree(example, os.path.join(course_path, "source"), ignore=ignore_html) + if self.autotest: + tests_file_path = os.path.abspath(os.path.join( + os.path.dirname(__file__), '..', 'docs', 'source', 'user_guide', 'autotests.yml')) + shutil.copyfile(tests_file_path, os.path.join(course_path, 'autotests.yml')) + ignored_files = shutil.ignore_patterns("*.html", "ps1") + shutil.copytree(example, os.path.join(course_path, "source"), ignore=ignored_files) + os.rename(os.path.join(course_path, "source", "ps1_autotest"), os.path.join(course_path, "source", "ps1")) + else: + ignored_files = shutil.ignore_patterns("*.html", "autotests.yml", "ps1_autotest") + shutil.copytree(example, os.path.join(course_path, "source"), ignore=ignored_files) # create the config file self.log.info("Generating example config file...") diff --git a/nbgrader/converters/__init__.py b/nbgrader/converters/__init__.py index f0aab0f6a..7c42e28e3 100644 --- a/nbgrader/converters/__init__.py +++ b/nbgrader/converters/__init__.py @@ -5,6 +5,7 @@ from .feedback import Feedback from .generate_feedback import GenerateFeedback from .generate_solution import GenerateSolution +from .generate_source_with_tests import GenerateSourceWithTests __all__ = [ "BaseConverter", @@ -14,5 +15,6 @@ "Autograde", "Feedback", "GenerateFeedback", - "GenerateSolution" + "GenerateSolution", + "GenerateSourceWithTests" ] diff --git a/nbgrader/converters/generate_assignment.py b/nbgrader/converters/generate_assignment.py index 6d7601ef3..231e59bd4 100644 --- a/nbgrader/converters/generate_assignment.py +++ b/nbgrader/converters/generate_assignment.py @@ -8,6 +8,7 @@ from .base import BaseConverter, NbGraderException from ..preprocessors import ( IncludeHeaderFooter, + InstantiateTests, ClearSolutions, LockCells, ComputeChecksums, @@ -57,6 +58,7 @@ def _output_directory(self) -> str: preprocessors = List([ IncludeHeaderFooter, + InstantiateTests, LockCells, ClearSolutions, ClearOutput, diff --git a/nbgrader/converters/generate_source_with_tests.py b/nbgrader/converters/generate_source_with_tests.py new file mode 100644 index 000000000..2cebdb87b --- /dev/null +++ b/nbgrader/converters/generate_source_with_tests.py @@ -0,0 +1,49 @@ +import os +import re + +from traitlets import List, default + +from .base import BaseConverter +from ..preprocessors import ( + InstantiateTests, + ClearOutput, + CheckCellMetadata +) +from traitlets.config.loader import Config +from typing import Any +from ..coursedir import CourseDirectory + + +class GenerateSourceWithTests(BaseConverter): + + @default("permissions") + def _permissions_default(self) -> int: + return 664 if self.coursedir.groupshared else 644 + + @property + def _input_directory(self) -> str: + return self.coursedir.source_directory + + @property + def _output_directory(self) -> str: + return self.coursedir.source_with_tests_directory + + preprocessors = List([ + InstantiateTests, + ClearOutput, + CheckCellMetadata + ]).tag(config=True) + + def _load_config(self, cfg: Config, **kwargs: Any) -> None: + super(GenerateSourceWithTests, self)._load_config(cfg, **kwargs) + + def __init__(self, coursedir: CourseDirectory = None, **kwargs: Any) -> None: + super(GenerateSourceWithTests, self).__init__(coursedir=coursedir, **kwargs) + + def start(self) -> None: + old_student_id = self.coursedir.student_id + self.coursedir.student_id = '.' + try: + super(GenerateSourceWithTests, self).start() + finally: + self.coursedir.student_id = old_student_id diff --git a/nbgrader/coursedir.py b/nbgrader/coursedir.py index bbbe6c54d..9242e56a7 100644 --- a/nbgrader/coursedir.py +++ b/nbgrader/coursedir.py @@ -142,6 +142,18 @@ def _validate_notebook_id(self, proposal: Bunch) -> str: ) ).tag(config=True) + source_with_tests_directory = Unicode( + 'source_with_tests', + help=dedent( + """ + The name of the directory that contains notebooks with both solutions + and instantiated test code (i.e., all AUTOTEST directives are removed + and replaced by actual test code). This corresponds to the + `nbgrader_step` variable in the `directory_structure` config option. + """ + ) + ).tag(config=True) + submitted_directory = Unicode( 'submitted', help=dedent( diff --git a/nbgrader/docs/source/user_guide/advanced.rst b/nbgrader/docs/source/user_guide/advanced.rst index 502c0d393..715cab537 100644 --- a/nbgrader/docs/source/user_guide/advanced.rst +++ b/nbgrader/docs/source/user_guide/advanced.rst @@ -194,3 +194,160 @@ containerization system. For details on using ``envkernel`` with singularity, see the `README `_ of ``envkernel``. + +.. _customizing-autotests: + +Automatic test code generation +--------------------------------------- + +.. versionadded:: 0.9.0 + +.. seealso:: + + :ref:`autograder-tests-cell-automatic-test-code` + General introduction to automatic test code generation. + + +nbgrader now supports generating test code automatically +using ``### AUTOTEST`` and ``### HASHED AUTOTEST`` statements. +In this section, you can find more detail on how this works and +how to customize the test generation process. +Suppose you ask students to create a ``foo`` function that adds 5 to +an integer. In the source copy of the notebook, you might write something like + +.. code:: python + + ### BEGIN SOLUTION + def foo(x): + return x + 5 + ### END SOLUTION + +In a test cell, you would normally then write test code manually to probe various aspects of the solution. +For example, you might check that the function increments 3 to 8 properly, and that the type +of the output is an integer. + +.. code:: python + + assert isinstance(foo(3), int), "incrementing an int by 5 should return an int" + assert foo(3) == 8, "3+5 should be 8" + +nbgrader now provides functionality to automate this process. Instead of writing tests explicitly, +you can instead specify *what you want to test*, and let nbgrader decide *how to test it* automatically. + +.. code:: python + + ### AUTOTEST foo(3) + +This directive indicates that you want to check ``foo(3)`` in the student's notebook, and make sure it +aligns with the value of ``foo(3)`` in the current source copy. You can write any valid expression (in the +language of your notebook) after the ``### AUTOTEST`` directive. For example, you could write + +.. code:: python + + ### AUTOTEST (foo(3) - 5 == 3) + +to generate test code for the expression ``foo(3)-5==3`` (i.e., a boolean value), and make sure that evaluating +the student's copy of this expression has a result that aligns with the source version (i.e., ``True``). You can write multiple +``### AUTOTEST`` directives in one cell. You can also separate multiple expressions on one line with semicolons: + +.. code:: python + + ### AUTOTEST foo(3); foo(4); foo(5) != 8 + +These directives will insert code into student notebooks where the solution is available in plaintext. If you want to +obfuscate the answers in the student copy, you should instead use a ``### HASHED AUTOTEST``, which will produce +a student notebook where the answers are hashed and not viewable by students. + +When you generate an assignment containing ``### AUTOTEST`` (or ``### HASHED AUTOTEST``) statements, nbgrader looks for a file +named ``autotests.yml`` that contains instructions on how to generate test code. It first looks +in the assignment directory itself (in case you want to specify special tests for just that assignment), and if it is +not found there, nbgrader searches in the course root directory. +The ``autotests.yml`` file is a `YAML `__ file that looks something like this: + +.. code:: yaml + + python3: + setup: "from hashlib import sha1" + hash: 'sha1({{snippet}}.encode("utf-8")+b"{{salt}}").hexdigest()' + dispatch: "type({{snippet}})" + normalize: "str({{snippet}})" + check: 'assert {{snippet}} == """{{value}}""", """{{message}}"""' + success: "print('Success!')" + + templates: + default: + - test: "type({{snippet}})" + fail: "type of {{snippet}} is not correct" + + - test: "{{snippet}}" + fail: "value of {{snippet}} is not correct" + + int: + - test: "type({{snippet}})" + fail: "type of {{snippet}} is not int. Please make sure it is int and not np.int64, etc. You can cast your value into an int using int()" + + - test: "{{snippet}}" + fail: "value of {{snippet}} is not correct" + +The outermost level in the YAML file (the example shows an entry for ``python3``) specifies which kernel the configuration applies to. ``autotests.yml`` can +have separate sections for multiple kernels / languages. The ``autotests.yml`` file uses `Jinja templates `__ to +specify snippets of code that will be executed/inserted into Jupyter notebooks in the process of generating the assignment. You should familiarize yourself +with the basics of Jinja templates before proceeding. For each kernel, there are a few configuration settings possible: + +- **dispatch:** When you write ``### AUTOTEST foo(3)``, nbgrader needs to know how to test ``foo(3)``. It does so by executing ``foo(3)``, then checking its *type*, + and then running tests corresponding to that type in the ``autotests.yml`` file. Specifically, when generating an assignment, nbgrader substitutes the ``{{snippet}}`` template + variable with the expression ``foo(3)``, and then evaluates the dispatch code based on that. In this case, nbgrader runs ``type(foo(3))``, which will + return ``int``, so nbgrader will know to test ``foo(3)`` using tests for integer variables. +- **templates:** Once nbgrader determines the type of the expression ``foo(3)``, it will look for that type in the list of templates for the kernel. In this case, + it will find the ``int`` type in the list (it will use the **default** if the type is not found). Each type will have associated with it a + list of **test**/**fail** template pairs, which tell nbgrader what tests to run + and what messages to print in the event of a failure. Once again, ``{{snippet}}`` will be replaced by the ``foo(3)`` expression. In ``autotests.yml`` above, the + ``int`` type has two tests: one that checks type of the expression, and one that checks its value. In this case, the student notebook will have + two tests: one that checks the value of ``type(foo(3))``, and one that checks the value of ``foo(3)``. +- **normalize:** For each test code expression (for example, ``type(foo(3))`` as mentioned previously), nbgrader will execute code using the corresponding + Jupyter kernel, which will respond with a result in the form of a *string*. So nbgrader now knows that if it runs ``type(foo(3))`` at this + point in the notebook, and converts the output to a string (i.e., *normalizes it*), it should obtain ``"int"``. However, nbgrader does not know how to convert output to a string; that + depends on the kernel! So the normalize code template tells nbgrader how to convert an expression to a string. In the ``autotests.yml`` example above, the + normalize template suggests that nbgrader should try to compare ``str(type(foo(3)))`` to ``"int"``. +- **check:** This is the code template that will be inserted into the student notebook to run each test. The template has three variables. ``{{snippet}}`` is the normalized + test code. The ``{{value}}`` is the evaluated version of that test code, based on the source notebook. The ``{{message}}`` is + text that will be printed in the event of a test failure. In the example above, the check code template tells nbgrader to insert an ``assert`` statement to run the test. +- **hash (optional):** This is a code template that is responsible for hashing (i.e., obfuscating) the answers in the student notebok. The template has two variables. + ``{{snippet}}`` represents the expression that will be hashed, and ``{{salt}}`` is used for nbgrader to insert a `salt `__ + prior to hashing. The salt helps avoid students being able to identify hashes from common question types. For example, a true/false question has only two possible answers; + without a salt, students would be able to recognize the hashes of ``True`` and ``False`` in their notebooks. By adding a salt, nbgrader makes the hashed version of the answer + different for each question, preventing identifying answers based on their hashes. +- **setup (optional):** This is a code template that will be run at the beginning of all test cells containing ``### AUTOTEST`` or ``### HASHED AUTOTEST`` directives. It is often used to import + special packages that only the test code requires. In the example above, the setup code is used to import the ``sha1`` function from ``hashlib``, which is necessary + for hashed test generation. +- **success (optional):** This is a code template that will be added to the end of all test cells containing ``### AUTOTEST`` or ``### HASHED AUTOTEST`` directives. In the + generated student version of the notebook, + this code will run if all the tests pass. In the example ``autotests.yml`` file above, the success code is used to run ``print('Success!')``, i.e., simply print a message to + indicate that all tests in the cell passed. + +.. note:: + + For assignments with ``### AUTOTEST`` and ``### HASHED AUTOTEST`` directives, it is often handy + to have an editable copy of the assignment with solutions *and* test code inserted. You can + use ``nbgrader generate_assignment --source_with_tests`` to generate this version of an assignment, + which will appear in the ``source_with_tests/`` folder in the course repository. + +.. warning:: + + The default ``autotests.yml`` test templates file included with the repository has tests for many + common data types (``int``, ``dict``, ``list``, ``float``, etc). It also has a ``default`` test template + that it will try to apply to any types that do not have specified tests. If you want to automatically + generate your own tests for custom types, you will need to implement those test templates in ``autotests.yml``. That being said, custom + object types often have standard Python types as class attributes. Sometimes an easier option is to use nbgrader to test these + attributes automatically instead. For example, if ``obj`` is a complicated type with no specific test template available, + but ``obj`` has an ``int`` attribute ``x``, you could consider testing that attribute directly, e.g., ``### AUTOTEST obj.x``. + +.. warning:: + + The InstantiateTests preprocessor in nbgrader is responsible for generating test code from ``### AUTOTEST`` + directives and the ``autotests.yml`` file. It has some configuration parameters not yet mentioned here. + The most important of these is the ``InstantiateTests.sanitizers`` dictionary, which tells nbgrader how to + clean up the string output from each kind of Jupyter kernel before using it in the process of generating tests. We have + implemented sanitizers for popular kernels in nbgrader already, but you might need to add your own. + + diff --git a/nbgrader/docs/source/user_guide/autotests.yml b/nbgrader/docs/source/user_guide/autotests.yml new file mode 100644 index 000000000..d1ed36e11 --- /dev/null +++ b/nbgrader/docs/source/user_guide/autotests.yml @@ -0,0 +1,305 @@ +python3: + setup: "from hashlib import sha1" + hash: 'sha1({{snippet}}.encode("utf-8")+b"{{salt}}").hexdigest()' + dispatch: "type({{snippet}})" + normalize: "str({{snippet}})" + check: 'assert {{snippet}} == """{{value}}""", """{{message}}"""' + success: "print('Success!')" + + templates: + default: + - test: "type({{snippet}})" + fail: "type of {{snippet}} is not correct" + + - test: "{{snippet}}" + fail: "value of {{snippet}} is not correct" + + int: + - test: "type({{snippet}})" + fail: "type of {{snippet}} is not int. Please make sure it is int and not np.int64, etc. You can cast your value into an int using int()" + + - test: "{{snippet}}" + fail: "value of {{snippet}} is not correct" + + float: + - test: "type({{snippet}})" + fail: "type of {{snippet}} is not float. Please make sure it is float and not np.float64, etc. You can cast your value into a float using float()" + + - test: "round({{snippet}}, 2)" + fail: "value of {{snippet}} is not correct (rounded to 2 decimal places)" + + set: + - test: "type({{snippet}})" + fail: "type of {{snippet}} is not set. {{snippet}} should be a set" + + - test: "len({{snippet}})" + fail: "length of {{snippet}} is not correct" + + - test: "{{snippet}}" + fail: "value of {{snippet}} is not correct" + + list: + - test: "type({{snippet}})" + fail: "type of {{snippet}} is not list. {{snippet}} should be a list" + + - test: "len({{snippet}})" + fail: "length of {{snippet}} is not correct" + + - test: "sorted(map(str, {{snippet}}))" + fail: "values of {{snippet}} are not correct" + + - test: "{{snippet}}" + fail: "order of elements of {{snippet}} is not correct" + + tuple: + - test: "type({{snippet}})" + fail: "type of {{snippet}} is not tuple. {{snippet}} should be a tuple" + + - test: "len({{snippet}})" + fail: "length of {{snippet}} is not correct" + + - test: "sorted(map(str, {{snippet}}))" + fail: "values of {{snippet}} are not correct" + + - test: "{{snippet}}" + fail: "order of elements of {{snippet}} is not correct" + + str: + + - test: "type({{snippet}})" + fail: "type of {{snippet}} is not str. {{snippet}} should be an str" + + - test: "len({{snippet}})" + fail: "length of {{snippet}} is not correct" + + - test: "{{snippet}}.lower()" + fail: "value of {{snippet}} is not correct" + + - test: "{{snippet}}" + fail: "correct string value of {{snippet}} but incorrect case of letters" + + dict: + + - test: "type({{snippet}})" + fail: "type of {{snippet}} is not dict. {{snippet}} should be a dict" + + - test: "len(list({{snippet}}.keys()))" + fail: "number of keys of {{snippet}} is not correct" + + - test: "sorted(map(str, {{snippet}}.keys()))" + fail: "keys of {{snippet}} are not correct" + + - test: "sorted(map(str, {{snippet}}.values()))" + fail: "correct keys, but values of {{snippet}} are not correct" + + - test: "{{snippet}}" + fail: "correct keys and values, but incorrect correspondence in keys and values of {{snippet}}" + + bool: + - test: "type({{snippet}})" + fail: "type of {{snippet}} is not bool. {{snippet}} should be a bool" + + - test: "{{snippet}}" + fail: "boolean value of {{snippet}} is not correct" + + type: + - test: "{{snippet}}" + fail: "type of {{snippet}} is not correct" + + pandas.core.frame.DataFrame: + - test: "type({{snippet}})" + fail: "type of {{snippet}} is not pandas.core.frame.DataFrame. {{snippet}} should be a DataFrame" + + - test: "{{snippet}}.reindex(sorted({{snippet}}.columns), axis=1)" + fail: "some or all elements of {{snippet}} are not correct" + +# --------------------------------------------- + +python: + setup: "from hashlib import sha1" + hash: 'sha1({{snippet}}.encode("utf-8")+b"{{salt}}").hexdigest()' + dispatch: "type({{snippet}})" + normalize: "str({{snippet}})" + check: 'assert {{snippet}} == """{{value}}""", """{{message}}"""' + success: "print('Success!')" + + templates: + default: + - test: "type({{snippet}})" + fail: "type of {{snippet}} is not correct" + + - test: "{{snippet}}" + fail: "value of {{snippet}} is not correct" + + int: + - test: "type({{snippet}})" + fail: "type of {{snippet}} is not int. Please make sure it is int and not np.int64, etc. You can cast your value into an int using int()" + + - test: "{{snippet}}" + fail: "value of {{snippet}} is not correct" + + float: + - test: "type({{snippet}})" + fail: "type of {{snippet}} is not float. Please make sure it is float and not np.float64, etc. You can cast your value into a float using float()" + + - test: "round({{snippet}}, 2)" + fail: "value of {{snippet}} is not correct (rounded to 2 decimal places)" + + set: + - test: "type({{snippet}})" + fail: "type of {{snippet}} is not set. {{snippet}} should be a set" + + - test: "len({{snippet}})" + fail: "length of {{snippet}} is not correct" + + - test: "{{snippet}}" + fail: "value of {{snippet}} is not correct" + + list: + - test: "type({{snippet}})" + fail: "type of {{snippet}} is not list. {{snippet}} should be a list" + + - test: "len({{snippet}})" + fail: "length of {{snippet}} is not correct" + + - test: "sorted(map(str, {{snippet}}))" + fail: "values of {{snippet}} are not correct" + + - test: "{{snippet}}" + fail: "order of elements of {{snippet}} is not correct" + + tuple: + - test: "type({{snippet}})" + fail: "type of {{snippet}} is not tuple. {{snippet}} should be a tuple" + + - test: "len({{snippet}})" + fail: "length of {{snippet}} is not correct" + + - test: "sorted(map(str, {{snippet}}))" + fail: "values of {{snippet}} are not correct" + + - test: "{{snippet}}" + fail: "order of elements of {{snippet}} is not correct" + + str: + + - test: "type({{snippet}})" + fail: "type of {{snippet}} is not str. {{snippet}} should be an str" + + - test: "len({{snippet}})" + fail: "length of {{snippet}} is not correct" + + - test: "{{snippet}}.lower()" + fail: "value of {{snippet}} is not correct" + + - test: "{{snippet}}" + fail: "correct string value of {{snippet}} but incorrect case of letters" + + dict: + + - test: "type({{snippet}})" + fail: "type of {{snippet}} is not dict. {{snippet}} should be a dict" + + - test: "len(list({{snippet}}.keys()))" + fail: "number of keys of {{snippet}} is not correct" + + - test: "sorted(map(str, {{snippet}}.keys()))" + fail: "keys of {{snippet}} are not correct" + + - test: "sorted(map(str, {{snippet}}.values()))" + fail: "correct keys, but values of {{snippet}} are not correct" + + - test: "{{snippet}}" + fail: "correct keys and values, but incorrect correspondence in keys and values of {{snippet}}" + + bool: + - test: "type({{snippet}})" + fail: "type of {{snippet}} is not bool. {{snippet}} should be a bool" + + - test: "{{snippet}}" + fail: "boolean value of {{snippet}} is not correct" + + type: + - test: "{{snippet}}" + fail: "type of {{snippet}} is not correct" + + pandas.core.frame.DataFrame: + - test: "type({{snippet}})" + fail: "type of {{snippet}} is not pandas.core.frame.DataFrame. {{snippet}} should be a DataFrame" + + - test: "{{snippet}}.reindex(sorted({{snippet}}.columns), axis=1)" + fail: "some or all elements of {{snippet}} are not correct" + + +# -------------------------------------------------------------------------------------------------- +ir: + setup: 'library(digest)' + hash: 'digest(paste({{snippet}}, "{{salt}}"))' + dispatch: 'class({{snippet}})' + normalize: 'toString({{snippet}})' + check: 'stopifnot("{{message}}"= setequal({{snippet}}, "{{value}}"))' + success: "print('Success!')" + + templates: + default: + - test: "class({{snippet}})" + fail: "type of {{snippet}} is not correct" + + - test: "{{snippet}}" + fail: "value of {{snippet}} is not correct" + + integer: + - test: "class({{snippet}})" + fail: "type of {{snippet}} is not integer" + + - test: "length({{snippet}})" + fail: "length of {{snippet}} is not correct" + + - test: "sort({{snippet}})" + fail: "values of {{snippet}} are not correct" + + numeric: + - test: "class({{snippet}})" + fail: "type of {{snippet}} is not double" + + - test: "round({{snippet}}, 2)" + fail: "value of {{snippet}} is not correct (rounded to 2 decimal places)" + + - test: "length({{snippet}})" + fail: "length of {{snippet}} is not correct" + + - test: "sort({{snippet}})" + fail: "values of {{snippet}} are not correct" + + list: + - test: "class({{snippet}})" + fail: "type of {{snippet}} is not list" + + - test: "length({{snippet}})" + fail: "length of {{snippet}} is not correct" + + - test: "sort(c(names({{snippet}})))" + fail: "values of {{snippet}} names are not correct" + + - test: "{{snippet}}" + fail: "order of elements of {{snippet}} is not correct" + + character: + - test: "class({{snippet}})" + fail: "type of {{snippet}} is not list" + + - test: "length({{snippet}})" + fail: "length of {{snippet}} is not correct" + + - test: "tolower({{snippet}})" + fail: "value of {{snippet}} is not correct" + + - test: "{{snippet}}" + fail: "correct string value of {{snippet}} but incorrect case of letters" + + logical: + - test: "class({{snippet}})" + fail: "type of {{snippet}} is not logical" + + - test: "{{snippet}}" + fail: "logical value of {{snippet}} is not correct" diff --git a/nbgrader/docs/source/user_guide/creating_and_grading_assignments.ipynb b/nbgrader/docs/source/user_guide/creating_and_grading_assignments.ipynb index 1699cc464..8e3256b67 100644 --- a/nbgrader/docs/source/user_guide/creating_and_grading_assignments.ipynb +++ b/nbgrader/docs/source/user_guide/creating_and_grading_assignments.ipynb @@ -426,6 +426,62 @@ " (see :ref:`autograde-assignments`)." ] }, + { + "cell_type": "raw", + "metadata": {}, + "source": [ + ".. _autograder-tests-cell-automatic-test-code:" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### \"Autograder tests\" cells with automatically generated test code" + ] + }, + { + "cell_type": "raw", + "metadata": {}, + "source": [ + ".. versionadded:: 0.9.0" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Tests in \"Autograder tests\" cells can be automatically and dynamically generated through the use of the special syntax ``### AUTOTEST`` and ``### HASHED AUTOTEST``. This syntax allows you to specify only the objects you want to test, rather than having to write the test code yourself manually; `nbgrader` will generate the test code for you. For example,\n", + "![autograder tests autotest syntax](images/autograder_tests_autotest_jlab.png)\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In this example, the instructor wants to test that the returned value of `squares(1)`, `squares(2)`, and `squares(3)` lines up with the value from the source copy. In the release copy, the above autotest statements will be converted to the following test code that the students see:\n", + "![autograder tests autotest tests](images/autograder_tests_autogenerated_tests_jlab.png)" + ] + }, + { + "cell_type": "raw", + "metadata": {}, + "source": [ + "When creating the release version (see :ref:`assign-and-release-an-assignment`), the autotest lines (lines starting with the special syntax) will transform into automatically generated test cases (i.e., assert statements). The value of the expression(s) following the special syntax will be evaluated in the solution version to generate test cases that are checked in the student version. If this special syntax is not used, then the contents of the cell will remain as-is.\n", + "\n", + ".. note::\n", + "\n", + " Lines starting with ``### AUTOTEST`` will generate test code where the answer is visible to students. In the example above, the tests for ``squares(1)`` and ``squares(2)`` can be examined by students to see the answer. To generate test code that students can run, but where the answers are not viewable by students (they are *hashed*), begin the line with the syntax ``### HASHED AUTOTEST`` instead. You can also make ``### AUTOTEST`` and ``### HASHED AUTOTEST`` statements hidden and not runnable by students by wrapping them in ``### BEGIN HIDDEN TESTS`` and ``### END HIDDEN TESTS`` as in :ref:`autograder-tests-cell-hidden-tests`\n", + " \n", + ".. note:: \n", + "\n", + " You can put multiple expressions to be tested on a single ``### AUTOTEST`` line (or ``### HASHED AUTOTEST`` line), separated by semicolons.\n", + "\n", + ".. note::\n", + "\n", + " You can follow the ``### AUTOTEST`` or ``### HASHED AUTOTEST`` syntax with any valid Python expression. Test code will be automatically generated based on the return type of that expression. See :ref:`customizing-autotests` for more technical details about how ``### AUTOTEST`` and ``### HASHED AUTOTEST`` statements are converted into test code, and how to customize this process." + ] + }, { "cell_type": "raw", "metadata": {}, diff --git a/nbgrader/docs/source/user_guide/grades.csv b/nbgrader/docs/source/user_guide/grades.csv index cc3ad44db..c80687689 100644 --- a/nbgrader/docs/source/user_guide/grades.csv +++ b/nbgrader/docs/source/user_guide/grades.csv @@ -1,3 +1,3 @@ assignment,duedate,timestamp,student_id,last_name,first_name,email,raw_score,late_submission_penalty,score,max_score -ps1,,2015-02-02 22:58:23.948203,bitdiddle,,,,1.5,0.0,1.5,13.0 -ps1,,2015-02-01 17:28:58.749302,hacker,,,,3.0,0.0,3.0,13.0 +ps1,,2015-02-02 22:58:23.948203,bitdiddle,,,,1.5,0.0,1.5,23.0 +ps1,,2015-02-01 17:28:58.749302,hacker,,,,3.0,0.0,3.0,23.0 diff --git a/nbgrader/docs/source/user_guide/images/autograder_tests_autogenerated_tests_jlab.png b/nbgrader/docs/source/user_guide/images/autograder_tests_autogenerated_tests_jlab.png new file mode 100644 index 000000000..11262fa14 Binary files /dev/null and b/nbgrader/docs/source/user_guide/images/autograder_tests_autogenerated_tests_jlab.png differ diff --git a/nbgrader/docs/source/user_guide/images/autograder_tests_autotest_jlab.png b/nbgrader/docs/source/user_guide/images/autograder_tests_autotest_jlab.png new file mode 100644 index 000000000..b90893cff Binary files /dev/null and b/nbgrader/docs/source/user_guide/images/autograder_tests_autotest_jlab.png differ diff --git a/nbgrader/docs/source/user_guide/managing_assignment_files.ipynb b/nbgrader/docs/source/user_guide/managing_assignment_files.ipynb index 4347958f6..8c75210be 100644 --- a/nbgrader/docs/source/user_guide/managing_assignment_files.ipynb +++ b/nbgrader/docs/source/user_guide/managing_assignment_files.ipynb @@ -465,6 +465,7 @@ "total ##\n", "-rw-r--r-- 1 nb_user nb_group [size] [date] [time] jupyter.png\n", "-rw-r--r-- 1 nb_user nb_group [size] [date] [time] problem1.ipynb\n", + "-rw-r--r-- 1 nb_user nb_group [size] [date] [time] problem1_autotest.ipynb\n", "-rw-r--r-- 1 nb_user nb_group [size] [date] [time] problem2.ipynb\n" ] } @@ -816,9 +817,11 @@ "[SubmitApp | WARNING] Possible missing notebooks and/or extra notebooks submitted for assignment ps1:\n", " Expected:\n", " \tproblem1.ipynb: MISSING\n", + " \tproblem1_autotest.ipynb: FOUND\n", " \tproblem2.ipynb: FOUND\n", " Submitted:\n", " \tmyproblem1.ipynb: EXTRA\n", + " \tproblem1_autotest.ipynb: OK\n", " \tproblem2.ipynb: OK\n", "[SubmitApp | INFO] Submitted as: example_course ps1 [timestamp] UTC\n" ] @@ -895,9 +898,11 @@ "[SubmitApp | CRITICAL] Assignment ps1 not submitted. There are missing notebooks for the submission:\n", " Expected:\n", " \tproblem1.ipynb: MISSING\n", + " \tproblem1_autotest.ipynb: FOUND\n", " \tproblem2.ipynb: FOUND\n", " Submitted:\n", " \tmyproblem1.ipynb: EXTRA\n", + " \tproblem1_autotest.ipynb: OK\n", " \tproblem2.ipynb: OK\n", "[SubmitApp | ERROR] nbgrader submit failed\n" ] diff --git a/nbgrader/docs/source/user_guide/source/ps1_autotest/jupyter.png b/nbgrader/docs/source/user_guide/source/ps1_autotest/jupyter.png new file mode 100644 index 000000000..201fc09ce Binary files /dev/null and b/nbgrader/docs/source/user_guide/source/ps1_autotest/jupyter.png differ diff --git a/nbgrader/tests/apps/files/test.ipynb b/nbgrader/docs/source/user_guide/source/ps1_autotest/problem1.ipynb similarity index 63% rename from nbgrader/tests/apps/files/test.ipynb rename to nbgrader/docs/source/user_guide/source/ps1_autotest/problem1.ipynb index c070f6e38..16d713402 100644 --- a/nbgrader/tests/apps/files/test.ipynb +++ b/nbgrader/docs/source/user_guide/source/ps1_autotest/problem1.ipynb @@ -31,13 +31,15 @@ "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": false, "nbgrader": { "grade": false, "grade_id": "squares", "locked": false, "schema_version": 3, "solution": true + }, + "vscode": { + "languageId": "python" } }, "outputs": [], @@ -65,7 +67,9 @@ "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": false + "vscode": { + "languageId": "python" + } }, "outputs": [], "source": [ @@ -76,55 +80,55 @@ "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": false, "nbgrader": { "grade": true, "grade_id": "correct_squares", "locked": false, - "points": 1.0, + "points": 1, "schema_version": 3, "solution": false + }, + "vscode": { + "languageId": "python" } }, "outputs": [], "source": [ "\"\"\"Check that squares returns the correct output for several inputs\"\"\"\n", - "assert squares(1) == [1]\n", - "assert squares(2) == [1, 4]\n", - "assert squares(10) == [1, 4, 9, 16, 25, 36, 49, 64, 81, 100]\n", - "assert squares(11) == [1, 4, 9, 16, 25, 36, 49, 64, 81, 100, 121]" + "### AUTOTEST squares(1); squares(2)\n", + "### HASHED AUTOTEST squares(3)" ] }, { "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": false, "nbgrader": { "grade": true, "grade_id": "squares_invalid_input", "locked": false, - "points": 1.0, + "points": 1, "schema_version": 3, "solution": false + }, + "vscode": { + "languageId": "python" } }, "outputs": [], "source": [ "\"\"\"Check that squares raises an error for invalid inputs\"\"\"\n", - "try:\n", - " squares(0)\n", - "except ValueError:\n", - " pass\n", - "else:\n", - " raise AssertionError(\"did not raise\")\n", - "\n", - "try:\n", - " squares(-4)\n", - "except ValueError:\n", - " pass\n", - "else:\n", - " raise AssertionError(\"did not raise\")" + "def test_func_throws(func, ErrorType):\n", + " try:\n", + " func()\n", + " except ErrorType:\n", + " return True\n", + " else:\n", + " print('Did not raise right type of error!')\n", + " return False\n", + " \n", + "### AUTOTEST test_func_throws(lambda : squares(0), ValueError)\n", + "### AUTOTEST test_func_throws(lambda : squares(-4), ValueError);\n" ] }, { @@ -142,13 +146,15 @@ "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": false, "nbgrader": { "grade": false, "grade_id": "sum_of_squares", "locked": false, "schema_version": 3, "solution": true + }, + "vscode": { + "languageId": "python" } }, "outputs": [], @@ -171,7 +177,9 @@ "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": false + "vscode": { + "languageId": "python" + } }, "outputs": [], "source": [ @@ -182,7 +190,6 @@ "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": false, "nbgrader": { "grade": true, "grade_id": "correct_sum_of_squares", @@ -190,22 +197,23 @@ "points": 0.5, "schema_version": 3, "solution": false + }, + "vscode": { + "languageId": "python" } }, "outputs": [], "source": [ "\"\"\"Check that sum_of_squares returns the correct answer for various inputs.\"\"\"\n", - "assert sum_of_squares(1) == 1\n", - "assert sum_of_squares(2) == 5\n", - "assert sum_of_squares(10) == 385\n", - "assert sum_of_squares(11) == 506" + "### AUTOTEST sum_of_squares(1)\n", + "### AUTOTEST sum_of_squares(2); sum_of_squares(10) \n", + "### AUTOTEST sum_of_squares(11) \n" ] }, { "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": false, "nbgrader": { "grade": true, "grade_id": "sum_of_squares_uses_squares", @@ -213,21 +221,21 @@ "points": 0.5, "schema_version": 3, "solution": false + }, + "vscode": { + "languageId": "python" } }, "outputs": [], "source": [ "\"\"\"Check that sum_of_squares relies on squares.\"\"\"\n", + "\n", "orig_squares = squares\n", "del squares\n", - "try:\n", - " sum_of_squares(1)\n", - "except NameError:\n", - " pass\n", - "else:\n", - " raise AssertionError(\"sum_of_squares does not use squares\")\n", - "finally:\n", - " squares = orig_squares" + "\n", + "### AUTOTEST test_func_throws(lambda : sum_of_squares(1), NameError)\n", + "\n", + "squares = orig_squares\n" ] }, { @@ -247,7 +255,7 @@ "grade": true, "grade_id": "sum_of_squares_equation", "locked": false, - "points": 1.0, + "points": 1, "schema_version": 3, "solution": true } @@ -270,14 +278,16 @@ "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": true, "nbgrader": { "grade": true, "grade_id": "sum_of_squares_application", "locked": false, - "points": 2.0, + "points": 2, "schema_version": 3, "solution": true + }, + "vscode": { + "languageId": "python" } }, "outputs": [], @@ -286,15 +296,104 @@ " \"\"\"Returns the n^th pyramidal number\"\"\"\n", " return sum_of_squares(n)" ] + }, + { + "cell_type": "markdown", + "metadata": { + "nbgrader": { + "grade": false, + "grade_id": "cell-938593c4a215c6cc", + "locked": true, + "points": 4, + "schema_version": 3, + "solution": false, + "task": true + } + }, + "source": [ + "---\n", + "## Part E (4 points)\n", + "\n", + "State the formulae for an arithmetic and geometric sum and verify them numerically for an example of your choice." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## Part F (1 points)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "nbgrader": { + "grade": false, + "grade_id": "cell-d3df8cd59fd0eb74", + "locked": false, + "schema_version": 3, + "solution": true, + "task": false + }, + "vscode": { + "languageId": "python" + } + }, + "outputs": [], + "source": [ + "my_dictionary = {\n", + " 'one' : 1,\n", + " 'two' : 2,\n", + " 'three' : 3\n", + "}" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "nbgrader": { + "grade": true, + "grade_id": "cell-6e9ff83aa5dfaf17", + "locked": true, + "points": 0, + "schema_version": 3, + "solution": false, + "task": false + }, + "vscode": { + "languageId": "python" + } + }, + "outputs": [], + "source": [ + "### AUTOTEST my_dictionary\n", + "### AUTOTEST my_dictionary[\"one\"]" + ] } ], "metadata": { + "celltoolbar": "Create Assignment", "kernelspec": { - "display_name": "Python", + "display_name": "Python 3 (ipykernel)", "language": "python", - "name": "python" + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.12" } }, "nbformat": 4, - "nbformat_minor": 0 + "nbformat_minor": 1 } diff --git a/nbgrader/docs/source/user_guide/source/ps1_autotest/problem2.ipynb b/nbgrader/docs/source/user_guide/source/ps1_autotest/problem2.ipynb new file mode 100644 index 000000000..a8c653699 --- /dev/null +++ b/nbgrader/docs/source/user_guide/source/ps1_autotest/problem2.ipynb @@ -0,0 +1,79 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Consider the following piece of code:\n", + "\n", + "```python\n", + "def f(x):\n", + " if x == 0 or x == 1:\n", + " return x\n", + " return f(x - 1) + f(x - 2)\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## Part A (1 point)\n", + "\n", + "Describe, in words, what this code does, and how it does it." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "nbgrader": { + "grade": true, + "grade_id": "part-a", + "locked": false, + "points": 1, + "schema_version": 3, + "solution": true + } + }, + "source": [ + "This function computes the fibonnaci sequence using recursion. The base cases are $x=0$ and $x=1$, in which case the function will return 0 or 1, respectively. In all other cases, the function will call itself to find the $x-1$ and $x-2$ fibonnaci numbers, and then add them together." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## Part B (2 points)\n", + "\n", + "For what inputs will this function not behave as expected? What will happen?" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "nbgrader": { + "grade": true, + "grade_id": "part-b", + "locked": false, + "points": 2, + "schema_version": 3, + "solution": true + } + }, + "source": [ + "The function will not work correctly for inputs less than zero. Such inputs will result in an infinite recursion, as the function will keep subtracting one but never reach a base case that stops it." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python", + "language": "python", + "name": "python" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} diff --git a/nbgrader/preprocessors/__init__.py b/nbgrader/preprocessors/__init__.py index 1507ee0aa..874fc1e69 100644 --- a/nbgrader/preprocessors/__init__.py +++ b/nbgrader/preprocessors/__init__.py @@ -8,6 +8,7 @@ from .overwritecells import OverwriteCells from .checkcellmetadata import CheckCellMetadata from .execute import Execute +from .instantiatetests import InstantiateTests from .getgrades import GetGrades from .clearoutput import ClearOutput from .limitoutput import LimitOutput @@ -28,6 +29,7 @@ "OverwriteCells", "CheckCellMetadata", "Execute", + "InstantiateTests", "GetGrades", "ClearOutput", "LimitOutput", diff --git a/nbgrader/preprocessors/clearsolutions.py b/nbgrader/preprocessors/clearsolutions.py index 03ab3c13f..85a1fb7d2 100644 --- a/nbgrader/preprocessors/clearsolutions.py +++ b/nbgrader/preprocessors/clearsolutions.py @@ -15,6 +15,7 @@ class ClearSolutions(NbGraderPreprocessor): code_stub = Dict( dict(python="# YOUR CODE HERE\nraise NotImplementedError()", + R="# YOUR CODE HERE\nfail()", matlab="% YOUR CODE HERE\nerror('No Answer Given!')", octave="% YOUR CODE HERE\nerror('No Answer Given!')", sas="/* YOUR CODE HERE */\n %notImplemented;", diff --git a/nbgrader/preprocessors/instantiatetests.py b/nbgrader/preprocessors/instantiatetests.py new file mode 100644 index 000000000..65d04fcda --- /dev/null +++ b/nbgrader/preprocessors/instantiatetests.py @@ -0,0 +1,465 @@ +import os +import yaml +import jinja2 as j2 +import re +from .. import utils +from traitlets import Bool, List, Integer, Unicode, Dict, Callable +from textwrap import dedent +import asyncio +import inspect +import hashlib +import typing as t +from nbformat import NotebookNode +from queue import Empty +import datetime +from typing import Optional +from nbclient.exceptions import ( + CellControlSignal, + CellExecutionComplete, + CellExecutionError, + CellTimeoutError, + DeadKernelError, +) +from . import NbGraderPreprocessor +from jupyter_client.manager import start_new_kernel + +try: + from time import monotonic # Py 3 +except ImportError: + from time import time as monotonic # Py 2 + +######################################################################################### +class InstantiateTests(NbGraderPreprocessor): + tests = None + + autotest_filename = Unicode( + "autotests.yml", + help="The filename where automatic testing code is stored" + ).tag(config=True) + + autotest_delimiter = Unicode( + "AUTOTEST", + help="The delimiter prior to snippets to be autotested" + ).tag(config=True) + + hashed_delimiter = Unicode( + "HASHED", + help="The delimiter prior to an autotest block if snippet results should be protected by a hash function" + ).tag(config=True) + + use_salt = Bool( + True, + help="Whether to add a salt to digested answers" + ).tag(config=True) + + enforce_metadata = Bool( + True, + help=dedent( + """ + Whether or not to complain if cells containing autotest delimiters + are not marked as grade cells. WARNING: disabling this will potentially cause + things to break if you are using the full nbgrader pipeline. ONLY + disable this option if you are only ever planning to use nbgrader + assign. + """ + ) + ).tag(config=True) + + comment_strs = Dict( + key_trait=Unicode(), + value_trait=Unicode(), + default_value={ + 'ir': '#', + 'python': '#', + 'python3': '#' + }, + help=dedent( + """ + A dictionary mapping each Jupyter kernel's name to the comment string for that kernel. + For an example, one of the entries in this dictionary is "python" : "#", because # is the comment + character in python. + """ + ) + ).tag(config=True) + + sanitizers = Dict( + key_trait=Unicode(), + value_trait=Callable(), + default_value={ + 'ir': lambda s: re.sub(r'\[\d+\]\s+', '', s).strip('"').strip("'"), + 'python': lambda s: s.strip('"').strip("'"), + 'python3': lambda s: s.strip('"').strip("'") + }, + help=dedent( + """ + A dictionary mapping each Jupyter kernel's name to the function that is used to + sanitize the output from the kernel within InstantiateTests. + """ + ) + ).tag(config=True) + + sanitizer = None + kernel_name = None + kc = None + execute_result = None + + def preprocess(self, nb, resources): + # avoid starting the kernel at all/processing the notebook if there are no autotest delimiters + for index, cell in enumerate(nb.cells): + # look for an autotest delimiter in this cell's source; if we find one, process this notebook + # short-circuit ignore non-code cells + if (cell.cell_type == 'code') and (self.autotest_delimiter in cell.source): + # get the kernel name from the notebook + kernel_name = nb.metadata.get("kernelspec", {}).get("name", "") + if kernel_name not in self.comment_strs: + raise ValueError(f"Kernel {kernel_name} has not been specified in InstantiateTests.comment_strs") + if kernel_name not in self.sanitizers: + raise ValueError(f"Kernel {kernel_name} has not been specified in InstantiateTests.sanitizers") + self.log.debug("Found kernel %s", kernel_name) + resources["kernel_name"] = kernel_name + + # get the resources path from the notebook + resources_path = resources.get('metadata', {}).get('path', None) + + # load the template tests file + self.log.debug('Loading template tests file') + self._load_test_template_file(resources) + self.global_tests_loaded = True + + # set up the sanitizer + self.log.debug('Setting sanitizer for kernel %s', kernel_name) + self.sanitizer = self.sanitizers[kernel_name] + #start the kernel with the specified kernel and in the local path of the notebook + self.log.debug('Starting client for kernel %s at path %s', kernel_name, resources_path if resources_path is not None else '') + km, self.kc = start_new_kernel(kernel_name=kernel_name, cwd=resources_path) + + self.log.error('here is the notebook before') + self.log.error(nb) + self.log.error('here is where we are') + self.log.error(os.getcwd()) + self.log.error('here is in our workdir') + self.log.error(os.listdir()) + self.log.error('here is where the kernel thinks it is') + self._execute_code_snippet("import os") + res = self._execute_code_snippet("os.getcwd()") + self.log.error(res) + self.log.error('here is what kernel sees in its workdir') + res = self._execute_code_snippet("os.listdir()") + self.log.error(res) + + + # run the preprocessor + self.log.debug('Running InstantiateTests preprocessor') + nb, resources = super(InstantiateTests, self).preprocess(nb, resources) + + self.log.error('here is the notebook after') + self.log.error(nb) + self.log.error('here is where we are') + self.log.error(os.getcwd()) + self.log.error('here is in our workdir') + self.log.error(os.listdir()) + self.log.error('here is where the kernel thinks it is') + self._execute_code_snippet("import os") + res = self._execute_code_snippet("os.getcwd()") + self.log.error(res) + self.log.error('here is what kernel sees in its workdir') + res = self._execute_code_snippet("os.listdir()") + self.log.error(res) + + # shut down and cleanup the kernel + self.log.debug('Shutting down / cleaning up kernel') + km.shutdown_kernel() + self.kc = None + self.sanitizer = None + self.execute_result = None + + # return the modified notebook + return nb, resources + + # if not, just return + return nb, resources + + def preprocess_cell(self, cell, resources, index): + # if it's not a code cell, or if the cell's source is empty, just return + if (cell.cell_type != 'code') or (len(cell.source) == 0): + return cell, resources + + # determine whether the cell is a grade cell + is_grade_flag = utils.is_grade(cell) + + # get the comment string for this language + comment_str = self.comment_strs[resources["kernel_name"]] + + # seed the salt generator for this cell + # avoid actual random seeds so that release versions are consistent across + # calls to nbgrader generate_assignment + salt_int = int(hashlib.sha256((cell.source+str(index)).encode('utf-8')).hexdigest(), 16) % 10**6 + + # split the code lines into separate strings + lines = cell.source.split("\n") + + setup_code_inserted_into_cell = False + + non_autotest_code_lines = [] + + # new_lines will store the replacement code after autotest template instantiation + new_lines = [] + + for line in lines: + # if the current line doesn't have the autotest_delimiter or is not a comment + # then just append the line to the new cell code and go to the next line + if self.autotest_delimiter not in line or line.strip()[:len(comment_str)] != comment_str: + new_lines.append(line) + non_autotest_code_lines.append(line) + continue + + # run all code lines prior to the current line containing the autotest_delimiter + self._execute_code_snippet("\n".join(non_autotest_code_lines)) + non_autotest_code_lines = [] + + # there are autotests; we should check that it is a grading cell + if not is_grade_flag: + if not self.enforce_metadata: + self.log.warning( + "AutoTest region detected in a non-grade cell; " + "please make sure all autotest regions are within " + "'Autograder tests' cells." + ) + else: + self.log.error( + "AutoTest region detected in a non-grade cell; " + "please make sure all autotest regions are within " + "'Autograder tests' cells." + ) + raise Exception + + self.log.debug('') + self.log.debug('') + self.log.debug('AutoTest delimiter found on line. Preprocessing...') + + # the first time we run into an autotest delimiter, + # append any setup code to the cell block we're in + + # if the setup_code is successfully obtained from the template file and + # the current cell does not already have the setup code, add the setup_code + if (self.setup_code is not None) and (not setup_code_inserted_into_cell): + new_lines.append(self.setup_code) + setup_code_inserted_into_cell = True + self._execute_code_snippet(self.setup_code) + + # decide whether to use hashing based on whether the self.hashed_delimiter token + # appears in the line before the self.autotest_delimiter token + use_hash = (self.hashed_delimiter in line[:line.find(self.autotest_delimiter)]) + if use_hash: + if self.hash_template is None: + raise ValueError('Found a hashing delimiter, but the hash property has not been set in autotests.yml') + self.log.debug('Hashing delimiter found, using template: %s', self.hash_template) + else: + self.log.debug('Hashing delimiter not found') + + # take everything after the autotest_delimiter as code snippets separated by semicolons + snippets = [snip.strip() for snip in + line[line.find(self.autotest_delimiter) + len(self.autotest_delimiter):].strip(';').split(';')] + + # remove empty snippets + if '' in snippets: + snippets.remove('') + + # print autotest snippets to log + self.log.debug('Found snippets to autotest: ') + for snippet in snippets: + self.log.debug(snippet) + + # generate the test for each snippet + for snippet in snippets: + self.log.debug('Running autotest generation for snippet %s', snippet) + + # create a salt for this test + if use_hash: + salt_int += 1 + salt = hex(salt_int)[2:] + self.log.debug('Using salt: %s', salt) + else: + salt = None + + # get the normalized(/hashed) template tests for this code snippet + self.log.debug( + 'Instantiating normalized%s test templates based on type', ' & hashed' if use_hash else '') + instantiated_tests, test_values, fail_messages = self._instantiate_tests(snippet, salt) + + # add all the lines to the cell + self.log.debug('Inserting test code into cell') + template = j2.Environment(loader=j2.BaseLoader).from_string(self.check_template) + for i in range(len(instantiated_tests)): + check_code = template.render(snippet=instantiated_tests[i], value=test_values[i], + message=fail_messages[i]) + self.log.debug('Test: %s', check_code) + new_lines.append(check_code) + + # add an empty line after this block of test code + new_lines.append('') + + # add the final success code and execute it + if ( + is_grade_flag + and self.global_tests_loaded + and (self.autotest_delimiter in cell.source) + and (self.success_code is not None) + ): + new_lines.append(self.success_code) + non_autotest_code_lines.append(self.success_code) + + # run the trailing non-autotest lines, if any remain + if len(non_autotest_code_lines) > 0: + self._execute_code_snippet("\n".join(non_autotest_code_lines)) + + # replace the cell source + cell.source = "\n".join(new_lines) + + # remove the execution metainfo + cell.pop('execution', None) + + return cell, resources + + # ------------------------------------------------------------------------------------- + def _load_test_template_file(self, resources): + """ + attempts to load the autotests.yml file within the assignment directory. In case such file is not found + or perhaps cannot be loaded, it will attempt to load the default_tests.yaml file with the course_directory + """ + self.log.debug('loading template autotests.yml...') + self.log.debug('kernel_name: %s', resources["kernel_name"]) + try: + with open(os.path.join(resources['metadata']['path'], self.autotest_filename), 'r') as tests_file: + tests = yaml.safe_load(tests_file) + self.log.debug(tests) + + except FileNotFoundError: + # if there is no tests file, try to load default tests dict + self.log.warning( + 'No autotests.yml file found in the assignment directory. Loading the default autotests.yml file in the course root directory') + try: + with open(os.path.join(self.autotest_filename), 'r') as tests_file: + tests = yaml.safe_load(tests_file) + except FileNotFoundError: + # if there is not even a default tests file, re-raise the FileNotFound error + self.log.error('No autotests.yml file found, but there were autotest directives found in the notebook. ') + raise + except yaml.parser.ParserError as e: + self.log.error('autotests.yml contains invalid YAML code.') + self.log.error(e.msg) + raise + + except yaml.parser.ParserError as e: + self.log.error('autotests.yml contains invalid YAML code.') + self.log.error(e.msg) + raise + + # get kernel specific data + tests = tests[resources["kernel_name"]] + + # get the test templates + self.test_templates_by_type = tests['templates'] + + # get the test dispatch code template + self.dispatch_template = tests['dispatch'] + + # get the success message template + self.success_code = tests.get('success', None) + + # get the hash code template + self.hash_template = tests.get('hash', None) + + # get the hash code template + self.check_template = tests['check'] + + # get the hash code template + self.normalize_template = tests['normalize'] + + # get the setup code if it's there + self.setup_code = tests.get('setup', None) + + # ------------------------------------------------------------------------------------- + def _instantiate_tests(self, snippet, salt=None): + # get the type of the snippet output (used to dispatch autotest) + template = j2.Environment(loader=j2.BaseLoader).from_string(self.dispatch_template) + dispatch_code = template.render(snippet=snippet) + dispatch_result = self._execute_code_snippet(dispatch_code) + self.log.debug('Dispatch result returned by kernel: %s', dispatch_result) + # get the test code; if the type isn't in our dict, just default to 'default' + # if default isn't in the tests code, this will throw an error + try: + tests = self.test_templates_by_type.get(dispatch_result, self.test_templates_by_type['default']) + except KeyError: + self.log.error('autotests.yml must contain a top-level "default" key with corresponding test code') + raise + try: + test_templs = [t['test'] for t in tests] + fail_msgs = [t['fail'] for t in tests] + except KeyError: + self.log.error('each type in autotests.yml must have a list of dictionaries with a "test" and "fail" key') + self.log.error('the "test" item should store the test template code, ' + 'and the "fail" item should store a failure message') + raise + + # + rendered_fail_msgs = [] + for templ in fail_msgs: + template = j2.Environment(loader=j2.BaseLoader).from_string(templ) + fmsg = template.render(snippet=snippet) + # escape double quotes + fmsg = fmsg.replace("\"", "\\\"") + rendered_fail_msgs.append(fmsg) + + # normalize the templates + normalized_templs = [] + for templ in test_templs: + template = j2.Environment(loader=j2.BaseLoader).from_string(self.normalize_template) + normalized_templs.append(template.render(snippet=templ)) + + # hashify the templates + processed_templs = [] + if salt is not None: + for templ in normalized_templs: + template = j2.Environment(loader=j2.BaseLoader).from_string(self.hash_template) + processed_templs.append(template.render(snippet=templ, salt=salt)) + else: + processed_templs = normalized_templs + + # instantiate and evaluate the tests + instantiated_tests = [] + test_values = [] + for templ in processed_templs: + # instantiate the template snippet + template = j2.Environment(loader=j2.BaseLoader).from_string(templ) + instantiated_test = template.render(snippet=snippet) + # run the instantiated template code + test_value = self._execute_code_snippet(instantiated_test) + instantiated_tests.append(instantiated_test) + test_values.append(test_value) + + return instantiated_tests, test_values, rendered_fail_msgs + + def _execute_code_snippet(self, code): + self.log.debug("Executing code:\n%s", code) + self.kc.execute_interactive(code, output_hook = self._execute_code_snippet_output_hook) + res = self.execute_result + self.execute_result = None + self.log.debug("Result:\n%s", res) + return res + + def _execute_code_snippet_output_hook(self, msg: t.Dict[str, t.Any]) -> None: + self.log.error('message') + self.log.error(msg) + msg_type = msg["header"]["msg_type"] + content = msg["content"] + if msg_type == "stream": + pass + #stream = getattr(sys, content["name"]) + #stream.write(content["text"]) + elif msg_type in ("display_data", "update_display_data", "execute_result"): + self.execute_result = self.sanitizer(content["data"]["text/plain"]) + elif msg_type == "error": + self.log.error("Runtime error from the kernel: \n%s\n%s\n%s", content['ename'], content['evalue'], content['traceback']) + raise CellExecutionError(content['traceback'], content['ename'], content['evalue']) + return diff --git a/nbgrader/tests/__init__.py b/nbgrader/tests/__init__.py index 5f57d10ff..5239d4cb6 100644 --- a/nbgrader/tests/__init__.py +++ b/nbgrader/tests/__init__.py @@ -251,3 +251,26 @@ def get_free_ports(n): for s in sockets: s.close() return ports + +def create_autotest_solution_cell(): + source = """ + answer = 'answer' + """ + cell = new_code_cell(source=source) + return cell + + +def create_autotest_test_cell(): + source = """ + ### AUTOTEST answer + """ + cell = new_code_cell(source=source) + return cell + +def create_file_loader_cell(filename): + source = f""" + with open('{filename}', 'r') as f: + tmp = f.read() + """ + cell = create_regular_cell(source, 'code') + return cell diff --git a/nbgrader/tests/api/__init__.py b/nbgrader/tests/api/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/nbgrader/tests/api/test_gradebook.py b/nbgrader/tests/api/test_gradebook.py deleted file mode 100644 index 0b80ae262..000000000 --- a/nbgrader/tests/api/test_gradebook.py +++ /dev/null @@ -1,1312 +0,0 @@ -import pytest - -from datetime import datetime, timedelta -from ... import api -from ... import utils -from ...api import InvalidEntry, MissingEntry -from _pytest.fixtures import SubRequest -from nbgrader.api import Gradebook - - -@pytest.fixture -def gradebook(request: SubRequest) -> Gradebook: - gb = api.Gradebook("sqlite:///:memory:") - - def fin() -> None: - gb.close() - request.addfinalizer(fin) - return gb - - -@pytest.fixture -def assignment(gradebook: Gradebook) -> Gradebook: - gradebook.add_assignment('foo') - gradebook.add_notebook('p1', 'foo') - gradebook.add_grade_cell('test1', 'p1', 'foo', max_score=1, cell_type='code') - gradebook.add_grade_cell('test2', 'p1', 'foo', max_score=2, cell_type='markdown') - gradebook.add_solution_cell('solution1', 'p1', 'foo') - gradebook.add_solution_cell('test2', 'p1', 'foo') - gradebook.add_source_cell('test1', 'p1', 'foo', cell_type='code') - gradebook.add_source_cell('test2', 'p1', 'foo', cell_type='markdown') - gradebook.add_source_cell('solution1', 'p1', 'foo', cell_type='code') - return gradebook - - -def makeAssignments(gb, na, nn, ns, grades=[1, 2, 10, 20, 100, 200]): - for si in range(ns): - sname = "s{0}".format(si + 1) - gb.add_student(sname) - for ia in range(na): - aname = 'a{0}'.format(ia + 1) - a = gb.add_assignment(aname) - for ni in range(nn): - nname = 'n{0}'.format(ni + 1) - n = gb.add_notebook(nname, aname) - gb.add_solution_cell('solution1', nname, aname) - gb.add_solution_cell('solution2', nname, aname) - gb.add_source_cell('source1', nname, aname, cell_type='code') - gb.add_source_cell('source2', nname, aname, cell_type='markdown') - gb.add_source_cell('solution1', nname, aname, cell_type='code') - gb.add_grade_cell('grade_code1', nname, aname, cell_type='code', max_score=2) - gb.add_grade_cell('grade_code2', nname, aname, cell_type='code', max_score=3) - gb.add_grade_cell('grade_written1', nname, aname, cell_type='markdown', max_score=20) - gb.add_grade_cell('grade_written2', nname, aname, cell_type='markdown', max_score=30) - gb.add_task_cell('task1', nname, aname, cell_type='markdown', max_score=200) - gb.add_task_cell('task2', nname, aname, cell_type='markdown', max_score=300) - for si in range(ns): - sname = "s{0}".format(si + 1) - sub = gb.add_submission(aname, sname) - sub.flagged = False - for ni in range(nn): - nname = 'n{0}'.format(ni + 1) - g1 = gb.find_grade("grade_code1", nname, aname, sname) - g2 = gb.find_grade("grade_code2", nname, aname, sname) - g3 = gb.find_grade("grade_written1", nname, aname, sname) - g4 = gb.find_grade("grade_written2", nname, aname, sname) - g5 = gb.find_grade("task1", nname, aname, sname) - g6 = gb.find_grade("task2", nname, aname, sname) - - (g1.manual_score, g2.manual_score, g3.manual_score, g4.manual_score, - g5.manual_score, g6.manual_score) = grades - gb.db.commit() - - return gb - - -@pytest.fixture -def FiveStudents(gradebook): - return makeAssignments(gradebook, 1, 1, 5) - - -@pytest.fixture -def FiveNotebooks(gradebook): - return makeAssignments(gradebook, 1, 5, 1) - - -@pytest.fixture -def FiveAssignments(gradebook): - return makeAssignments(gradebook, 5, 1, 1) - - -@pytest.fixture -def assignmentWithTask(gradebook: Gradebook) -> Gradebook: - for f in ['foo', 'foo2']: - gradebook.add_assignment(f) - for n in ['p1', 'p2']: - gradebook.add_notebook(n, f) - gradebook.add_solution_cell('solution1', n, f) - gradebook.add_solution_cell('test2', n, f) - gradebook.add_source_cell('test1', n, f, cell_type='code') - gradebook.add_source_cell('test2', n, f, cell_type='markdown') - gradebook.add_source_cell('solution1', n, f, cell_type='code') - gradebook.add_grade_cell('grade_code1', n, f, cell_type='code', max_score=1) - gradebook.add_grade_cell('grade_code2', n, f, cell_type='code', max_score=10) - gradebook.add_grade_cell('grade_written1', n, f, cell_type='markdown', max_score=1) - gradebook.add_grade_cell('grade_written2', n, f, cell_type='markdown', max_score=10) - gradebook.add_task_cell('task1', n, f, cell_type='markdown', max_score=2) - gradebook.add_task_cell('task2', n, f, cell_type='markdown', max_score=20) - - return gradebook - - -@pytest.fixture -def assignmentWithSubmissionNoMarks(assignmentWithTask: Gradebook) -> Gradebook: - assignmentWithTask.add_student('hacker123') - assignmentWithTask.add_student('bitdiddle') - assignmentWithTask.add_student('louisreasoner') - s1 = assignmentWithTask.add_submission('foo', 'hacker123') - s2 = assignmentWithTask.add_submission('foo', 'bitdiddle') - s1.flagged = True - s2.flagged = False - assignmentWithTask.db.commit() - return assignmentWithTask - -possiblegrades = [ - [0.5, 2, 3, 5, 1, 7, 2, 1], - [0.1, 4, 0.25, 1, 7, 0.0, 1, 1], - [0] * 8, - [2] * 8, - [0.25] * 8, -] - - -@pytest.fixture(params=possiblegrades) -def assignmentWithSubmissionWithMarks(assignmentWithSubmissionNoMarks: Gradebook, request: SubRequest) -> Gradebook: - a = assignmentWithSubmissionNoMarks - g1 = a.find_grade("grade_code1", "p1", "foo", "bitdiddle") - g2 = a.find_grade("grade_code2", "p1", "foo", "bitdiddle") - - g3 = a.find_grade("grade_written1", "p1", "foo", "hacker123") - g4 = a.find_grade("grade_written2", "p1", "foo", "hacker123") - - g5 = a.find_grade("task1", "p1", "foo", "bitdiddle") - g6 = a.find_grade("task2", "p1", "foo", "bitdiddle") - g7 = a.find_grade("task1", "p1", "foo", "hacker123") - g8 = a.find_grade("task2", "p1", "foo", "hacker123") - - (g1.manual_score, g2.manual_score, g3.manual_score, g4.manual_score, g5.manual_score, - g6.manual_score, g7.manual_score, g8.manual_score) = request.param - a.db.commit() - a.usedgrades = request.param - a.usedgrades_code = request.param[:2] - a.usedgrades_written = request.param[2:4] - a.usedgrades_task = request.param[4:] - - return a - - -@pytest.fixture -def assignmentManyStudents(assignmentWithTask, request): - a = assignmentWithTask - for s in range(50): - sname = 's{0}'.format(s) - a.add_student(sname) - sub = a.add_submission('foo', sname) - g1 = a.find_grade("grade_code1", "p1", "foo", sname) - g2 = a.find_grade("grade_written1", "p1", "foo", sname) - g3 = a.find_grade("task1", "p1", "foo", sname) - g4 = a.find_grade("task2", "p1", "foo", sname) - - ( - g1.manual_score, - g2.manual_score, - g3.manual_score, - g4.manual_score) = (1, 2, 3, 4) - a.db.commit() - - return a - - -@pytest.fixture -def assignmentTwoStudents(assignmentWithTask, request): - a = assignmentWithTask - for s in range(50): - sname = 's{0}'.format(s) - a.add_student(sname) - sub = a.add_submission('foo', sname) - g1 = a.find_grade("grade_code1", "p1", "foo", sname) - g2 = a.find_grade("grade_written1", "p1", "foo", sname) - g3 = a.find_grade("task1", "p1", "foo", sname) - g4 = a.find_grade("task2", "p1", "foo", sname) - - ( - g1.manual_score, - g2.manual_score, - g3.manual_score, - g4.manual_score) = (1, 2, 3, 4) - a.db.commit() - - return a - - -def test_init(gradebook: Gradebook) -> None: - assert gradebook.students == [] - assert gradebook.assignments == [] - - -# Test students - -def test_add_student(gradebook): - s = gradebook.add_student('12345') - assert s.id == '12345' - assert gradebook.students == [s] - - # try adding a duplicate student - with pytest.raises(InvalidEntry): - gradebook.add_student('12345') - - # try adding a student with arguments - s = gradebook.add_student('6789', last_name="Bar", first_name="Foo", email="foo@bar.com") - assert s.id == '6789' - assert s.last_name == "Bar" - assert s.first_name == "Foo" - assert s.email == "foo@bar.com" - - -def test_add_duplicate_student(gradebook): - # we also need this test because this will cause an IntegrityError - # under the hood rather than a FlushError - gradebook.add_student('12345') - with pytest.raises(InvalidEntry): - gradebook.add_student('12345') - - -def test_find_student(gradebook): - s1 = gradebook.add_student('12345') - assert gradebook.find_student('12345') == s1 - - s2 = gradebook.add_student('abcd') - assert gradebook.find_student('12345') == s1 - assert gradebook.find_student('abcd') == s2 - - -def test_find_nonexistant_student(gradebook): - with pytest.raises(MissingEntry): - gradebook.find_student('12345') - - -def test_remove_student(assignment): - assignment.add_student('hacker123') - assignment.add_submission('foo', 'hacker123') - - assignment.remove_student('hacker123') - - with pytest.raises(MissingEntry): - assignment.find_submission('foo', 'hacker123') - with pytest.raises(MissingEntry): - assignment.find_student('hacker123') - - -def test_update_or_create_student(gradebook): - # first test creating it - s1 = gradebook.update_or_create_student('hacker123') - assert gradebook.find_student('hacker123') == s1 - assert s1.first_name is None - - # now test finding/updating it - s2 = gradebook.update_or_create_student('hacker123', first_name='Alyssa') - assert s1 == s2 - assert s2.first_name == 'Alyssa' - - -# Test assignments - -def test_add_assignment(gradebook): - a = gradebook.add_assignment('foo') - assert a.name == 'foo' - assert gradebook.assignments == [a] - - # try adding a duplicate assignment - with pytest.raises(InvalidEntry): - gradebook.add_assignment('foo') - - # try adding an assignment with arguments - now = datetime.utcnow() - a = gradebook.add_assignment('bar', duedate=now) - assert a.name == 'bar' - assert a.duedate == now - - # try adding with a string timestamp - a = gradebook.add_assignment('baz', duedate=now.isoformat()) - assert a.name == 'baz' - assert a.duedate == now - - -def test_add_duplicate_assignment(gradebook): - gradebook.add_assignment('foo') - with pytest.raises(InvalidEntry): - gradebook.add_assignment('foo') - - -def test_find_assignment(gradebook): - a1 = gradebook.add_assignment('foo') - assert gradebook.find_assignment('foo') == a1 - - a2 = gradebook.add_assignment('bar') - assert gradebook.find_assignment('foo') == a1 - assert gradebook.find_assignment('bar') == a2 - - -def test_find_nonexistant_assignment(gradebook): - with pytest.raises(MissingEntry): - gradebook.find_assignment('foo') - - -def test_remove_assignment(assignment): - assignment.add_student('hacker123') - assignment.add_submission('foo', 'hacker123') - - notebooks = assignment.find_assignment('foo').notebooks - grade_cells = [x for nb in notebooks for x in nb.grade_cells] - solution_cells = [x for nb in notebooks for x in nb.solution_cells] - source_cells = [x for nb in notebooks for x in nb.source_cells] - - assignment.remove_assignment('foo') - - for nb in notebooks: - assert assignment.db.query(api.SubmittedNotebook).filter(api.SubmittedNotebook.id == nb.id).all() == [] - for grade_cell in grade_cells: - assert assignment.db.query(api.GradeCell).filter(api.GradeCell.id == grade_cell.id).all() == [] - for solution_cell in solution_cells: - assert assignment.db.query(api.SolutionCell).filter(api.SolutionCell.id == solution_cell.id).all() == [] - for source_cell in source_cells: - assert assignment.db.query(api.SourceCell).filter(api.SourceCell.id == source_cell.id).all() == [] - - with pytest.raises(MissingEntry): - assignment.find_assignment('foo') - - assert assignment.find_student('hacker123').submissions == [] - - -def test_update_or_create_assignment(gradebook): - # first test creating it - a1 = gradebook.update_or_create_assignment('foo') - assert gradebook.find_assignment('foo') == a1 - assert a1.duedate is None - - # now test finding/updating it - a2 = gradebook.update_or_create_assignment('foo', duedate="2015-02-02 14:58:23.948203 America/Los_Angeles") - assert a1 == a2 - assert a2.duedate == utils.parse_utc("2015-02-02 14:58:23.948203 America/Los_Angeles") - -# Test notebooks - - -def test_add_notebook(gradebook): - a = gradebook.add_assignment('foo') - n = gradebook.add_notebook('p1', 'foo') - assert n.name == 'p1' - assert n.assignment == a - assert a.notebooks == [n] - - # try adding a duplicate assignment - with pytest.raises(InvalidEntry): - gradebook.add_notebook('p1', 'foo') - - -def test_add_duplicate_notebook(gradebook): - # it should be ok to add a notebook with the same name, as long as - # it's for different assignments - gradebook.add_assignment('foo') - gradebook.add_assignment('bar') - n1 = gradebook.add_notebook('p1', 'foo') - n2 = gradebook.add_notebook('p1', 'bar') - assert n1.id != n2.id - - # but not ok to add a notebook with the same name for the same assignment - with pytest.raises(InvalidEntry): - gradebook.add_notebook('p1', 'foo') - - -def test_find_notebook(gradebook): - gradebook.add_assignment('foo') - n1 = gradebook.add_notebook('p1', 'foo') - assert gradebook.find_notebook('p1', 'foo') == n1 - - n2 = gradebook.add_notebook('p2', 'foo') - assert gradebook.find_notebook('p1', 'foo') == n1 - assert gradebook.find_notebook('p2', 'foo') == n2 - - -def test_find_nonexistant_notebook(gradebook: Gradebook) -> None: - # check that it doesn't find it when there is nothing in the db - with pytest.raises(MissingEntry): - gradebook.find_notebook('p1', 'foo') - - # check that it doesn't find it even if the assignment exists - gradebook.add_assignment('foo') - with pytest.raises(MissingEntry): - gradebook.find_notebook('p1', 'foo') - - -def test_update_or_create_notebook(gradebook): - # first test creating it - gradebook.add_assignment('foo') - n1 = gradebook.update_or_create_notebook('p1', 'foo') - assert gradebook.find_notebook('p1', 'foo') == n1 - - # now test finding/updating it - n2 = gradebook.update_or_create_notebook('p1', 'foo') - assert n1 == n2 - - -def test_remove_notebook(assignment): - assignment.add_student('hacker123') - assignment.add_submission('foo', 'hacker123') - - notebooks = assignment.find_assignment('foo').notebooks - - for nb in notebooks: - grade_cells = [x for x in nb.grade_cells] - solution_cells = [x for x in nb.solution_cells] - source_cells = [x for x in nb.source_cells] - - assignment.remove_notebook(nb.name, 'foo') - assert assignment.db.query(api.SubmittedNotebook).filter(api.SubmittedNotebook.id == nb.id).all() == [] - - for grade_cell in grade_cells: - assert assignment.db.query(api.GradeCell).filter(api.GradeCell.id == grade_cell.id).all() == [] - for solution_cell in solution_cells: - assert assignment.db.query(api.SolutionCell).filter(api.SolutionCell.id == solution_cell.id).all() == [] - for source_cell in source_cells: - assert assignment.db.query(api.SourceCell).filter(api.SourceCell.id == source_cell.id).all() == [] - - with pytest.raises(MissingEntry): - assignment.find_notebook(nb.name, 'foo') - - -def test_course_id_constructor(): - gb = api.Gradebook("sqlite:///:memory:") - assert gb.db.query(api.Course).first().id == "default_course" - -def test_course_id_multiple_assignments(): - course_one = "course-one" - course_two = "course-two" - - gb_one = api.Gradebook("sqlite:///:memory:", course_id=course_one) - gb_two = api.Gradebook("sqlite:///:memory:", course_id=course_two) - - assignment_one = gb_one.add_assignment('foo') - assignent_two = gb_two.add_assignment('bar') - - assert assignment_one.course_id == course_one - assert assignent_two.course_id == course_two - - assert len(gb_one.db.query(api.Course).all()) == 1 - assert gb_one.db.query(api.Course).first().id == course_one - assert gb_one.db.query(api.Assignment).first().course_id == course_one - assert gb_one.db.query(api.Assignment).first().course == gb_one.db.query(api.Course).first() - - assert len(gb_two.db.query(api.Course).all()) == 1 - assert gb_two.db.query(api.Course).first().id == course_two - assert gb_two.db.query(api.Assignment).first().course_id == course_two - assert gb_two.db.query(api.Assignment).first().course == gb_two.db.query(api.Course).first() - -# Test grade cells - -def test_add_grade_cell(gradebook): - gradebook.add_assignment('foo') - n = gradebook.add_notebook('p1', 'foo') - gc = gradebook.add_grade_cell('test1', 'p1', 'foo', max_score=2, cell_type='markdown') - assert gc.name == 'test1' - assert gc.max_score == 2 - assert gc.cell_type == 'markdown' - assert n.grade_cells == [gc] - assert gc.notebook == n - - -def test_add_grade_cell_with_args(gradebook): - gradebook.add_assignment('foo') - gradebook.add_notebook('p1', 'foo') - gc = gradebook.add_grade_cell( - 'test1', 'p1', 'foo', - max_score=3, cell_type="code") - assert gc.name == 'test1' - assert gc.max_score == 3 - assert gc.cell_type == "code" - - -def test_create_invalid_grade_cell(gradebook): - gradebook.add_assignment('foo') - gradebook.add_notebook('p1', 'foo') - with pytest.raises(InvalidEntry): - gradebook.add_grade_cell( - 'test1', 'p1', 'foo', - max_score=3, cell_type="something") - - -def test_add_duplicate_grade_cell(gradebook): - gradebook.add_assignment('foo') - gradebook.add_notebook('p1', 'foo') - gradebook.add_grade_cell('test1', 'p1', 'foo', max_score=1, cell_type='code') - with pytest.raises(InvalidEntry): - gradebook.add_grade_cell('test1', 'p1', 'foo', max_score=2, cell_type='markdown') - - -def test_find_grade_cell(gradebook): - gradebook.add_assignment('foo') - gradebook.add_notebook('p1', 'foo') - gc1 = gradebook.add_grade_cell('test1', 'p1', 'foo', max_score=1, cell_type='code') - assert gradebook.find_grade_cell('test1', 'p1', 'foo') == gc1 - - gc2 = gradebook.add_grade_cell('test2', 'p1', 'foo', max_score=2, cell_type='code') - assert gradebook.find_grade_cell('test1', 'p1', 'foo') == gc1 - assert gradebook.find_grade_cell('test2', 'p1', 'foo') == gc2 - - -def test_find_nonexistant_grade_cell(gradebook): - with pytest.raises(MissingEntry): - gradebook.find_grade_cell('test1', 'p1', 'foo') - - gradebook.add_assignment('foo') - with pytest.raises(MissingEntry): - gradebook.find_grade_cell('test1', 'p1', 'foo') - - gradebook.add_notebook('p1', 'foo') - with pytest.raises(MissingEntry): - gradebook.find_grade_cell('test1', 'p1', 'foo') - - -def test_update_or_create_grade_cell(gradebook): - # first test creating it - gradebook.add_assignment('foo') - gradebook.add_notebook('p1', 'foo') - gc1 = gradebook.update_or_create_grade_cell('test1', 'p1', 'foo', max_score=2, cell_type='code') - assert gc1.max_score == 2 - assert gc1.cell_type == 'code' - assert gradebook.find_grade_cell('test1', 'p1', 'foo') == gc1 - - # now test finding/updating it - gc2 = gradebook.update_or_create_grade_cell('test1', 'p1', 'foo', max_score=3) - assert gc1 == gc2 - assert gc1.max_score == 3 - assert gc1.cell_type == 'code' - - -# Test solution cells - -def test_add_solution_cell(gradebook): - gradebook.add_assignment('foo') - n = gradebook.add_notebook('p1', 'foo') - sc = gradebook.add_solution_cell('test1', 'p1', 'foo') - assert sc.name == 'test1' - assert n.solution_cells == [sc] - assert sc.notebook == n - - -def test_add_duplicate_solution_cell(gradebook): - gradebook.add_assignment('foo') - gradebook.add_notebook('p1', 'foo') - gradebook.add_solution_cell('test1', 'p1', 'foo') - with pytest.raises(InvalidEntry): - gradebook.add_solution_cell('test1', 'p1', 'foo') - - -def test_find_solution_cell(gradebook): - gradebook.add_assignment('foo') - gradebook.add_notebook('p1', 'foo') - sc1 = gradebook.add_solution_cell('test1', 'p1', 'foo') - assert gradebook.find_solution_cell('test1', 'p1', 'foo') == sc1 - - sc2 = gradebook.add_solution_cell('test2', 'p1', 'foo') - assert gradebook.find_solution_cell('test1', 'p1', 'foo') == sc1 - assert gradebook.find_solution_cell('test2', 'p1', 'foo') == sc2 - - -def test_find_nonexistant_solution_cell(gradebook): - with pytest.raises(MissingEntry): - gradebook.find_solution_cell('test1', 'p1', 'foo') - - gradebook.add_assignment('foo') - with pytest.raises(MissingEntry): - gradebook.find_solution_cell('test1', 'p1', 'foo') - - gradebook.add_notebook('p1', 'foo') - with pytest.raises(MissingEntry): - gradebook.find_solution_cell('test1', 'p1', 'foo') - - -def test_update_or_create_solution_cell(gradebook: Gradebook) -> None: - # first test creating it - gradebook.add_assignment('foo') - gradebook.add_notebook('p1', 'foo') - sc1 = gradebook.update_or_create_solution_cell('test1', 'p1', 'foo') - assert gradebook.find_solution_cell('test1', 'p1', 'foo') == sc1 - - # now test finding/updating it - sc2 = gradebook.update_or_create_solution_cell('test1', 'p1', 'foo') - assert sc1 == sc2 - - -# Test source cells - -def test_add_source_cell(gradebook): - gradebook.add_assignment('foo') - n = gradebook.add_notebook('p1', 'foo') - sc = gradebook.add_source_cell('test1', 'p1', 'foo', cell_type="code") - assert sc.name == 'test1' - assert sc.cell_type == 'code' - assert n.source_cells == [sc] - assert sc.notebook == n - - -def test_add_source_cell_with_args(gradebook): - gradebook.add_assignment('foo') - gradebook.add_notebook('p1', 'foo') - sc = gradebook.add_source_cell( - 'test1', 'p1', 'foo', - source="blah blah blah", - cell_type="code", checksum="abcde") - assert sc.name == 'test1' - assert sc.source == "blah blah blah" - assert sc.cell_type == "code" - assert sc.checksum == "abcde" - - -def test_create_invalid_source_cell(gradebook): - gradebook.add_assignment('foo') - gradebook.add_notebook('p1', 'foo') - with pytest.raises(InvalidEntry): - gradebook.add_source_cell( - 'test1', 'p1', 'foo', - source="blah blah blah", - cell_type="something", checksum="abcde") - - -def test_add_duplicate_source_cell(gradebook): - gradebook.add_assignment('foo') - gradebook.add_notebook('p1', 'foo') - gradebook.add_source_cell('test1', 'p1', 'foo', cell_type="code") - with pytest.raises(InvalidEntry): - gradebook.add_source_cell('test1', 'p1', 'foo', cell_type="code") - - -def test_find_source_cell(gradebook): - gradebook.add_assignment('foo') - gradebook.add_notebook('p1', 'foo') - sc1 = gradebook.add_source_cell('test1', 'p1', 'foo', cell_type="code") - assert gradebook.find_source_cell('test1', 'p1', 'foo') == sc1 - - sc2 = gradebook.add_source_cell('test2', 'p1', 'foo', cell_type="code") - assert gradebook.find_source_cell('test1', 'p1', 'foo') == sc1 - assert gradebook.find_source_cell('test2', 'p1', 'foo') == sc2 - - -def test_find_nonexistant_source_cell(gradebook): - with pytest.raises(MissingEntry): - gradebook.find_source_cell('test1', 'p1', 'foo') - - gradebook.add_assignment('foo') - with pytest.raises(MissingEntry): - gradebook.find_source_cell('test1', 'p1', 'foo') - - gradebook.add_notebook('p1', 'foo') - with pytest.raises(MissingEntry): - gradebook.find_source_cell('test1', 'p1', 'foo') - - -def test_update_or_create_source_cell(gradebook): - # first test creating it - gradebook.add_assignment('foo') - gradebook.add_notebook('p1', 'foo') - sc1 = gradebook.update_or_create_source_cell('test1', 'p1', 'foo', cell_type='code') - assert sc1.cell_type == 'code' - assert gradebook.find_source_cell('test1', 'p1', 'foo') == sc1 - - # now test finding/updating it - assert sc1.checksum is None - sc2 = gradebook.update_or_create_source_cell('test1', 'p1', 'foo', checksum="123456") - assert sc1 == sc2 - assert sc1.cell_type == 'code' - assert sc1.checksum == "123456" - - -# Test submissions - -def test_add_submission(assignment): - assignment.add_student('hacker123') - assignment.add_student('bitdiddle') - s1 = assignment.add_submission('foo', 'hacker123') - s2 = assignment.add_submission('foo', 'bitdiddle') - - assert assignment.assignment_submissions('foo') == [s2, s1] - assert assignment.student_submissions('hacker123') == [s1] - assert assignment.student_submissions('bitdiddle') == [s2] - assert assignment.find_submission('foo', 'hacker123') == s1 - assert assignment.find_submission('foo', 'bitdiddle') == s2 - - -def test_add_duplicate_submission(assignment): - assignment.add_student('hacker123') - assignment.add_submission('foo', 'hacker123') - with pytest.raises(InvalidEntry): - assignment.add_submission('foo', 'hacker123') - - -def test_remove_submission(assignment): - assignment.add_student('hacker123') - assignment.add_submission('foo', 'hacker123') - - submission = assignment.find_submission('foo', 'hacker123') - notebooks = submission.notebooks - grades = [x for nb in notebooks for x in nb.grades] - comments = [x for nb in notebooks for x in nb.comments] - - assignment.remove_submission('foo', 'hacker123') - - for nb in notebooks: - assert assignment.db.query(api.SubmittedNotebook).filter(api.SubmittedNotebook.id == nb.id).all() == [] - for grade in grades: - assert assignment.db.query(api.Grade).filter(api.Grade.id == grade.id).all() == [] - for comment in comments: - assert assignment.db.query(api.Comment).filter(api.Comment.id == comment.id).all() == [] - - with pytest.raises(MissingEntry): - assignment.find_submission('foo', 'hacker123') - - -def test_update_or_create_submission(assignment): - assignment.add_student('hacker123') - s1 = assignment.update_or_create_submission('foo', 'hacker123') - assert s1.timestamp is None - - s2 = assignment.update_or_create_submission('foo', 'hacker123', timestamp="2015-02-02 14:58:23.948203 America/Los_Angeles") - assert s1 == s2 - assert s2.timestamp == utils.parse_utc("2015-02-02 14:58:23.948203 America/Los_Angeles") - - -def test_find_submission_notebook(assignment): - assignment.add_student('hacker123') - s = assignment.add_submission('foo', 'hacker123') - n1, = s.notebooks - - with pytest.raises(MissingEntry): - assignment.find_submission_notebook('p2', 'foo', 'hacker123') - - n2 = assignment.find_submission_notebook('p1', 'foo', 'hacker123') - assert n1 == n2 - - -def test_find_submission_notebook_by_id(assignment): - assignment.add_student('hacker123') - s = assignment.add_submission('foo', 'hacker123') - n1, = s.notebooks - - with pytest.raises(MissingEntry): - assignment.find_submission_notebook_by_id('12345') - - n2 = assignment.find_submission_notebook_by_id(n1.id) - assert n1 == n2 - - -def test_remove_submission_notebook(assignment): - assignment.add_student('hacker123') - assignment.add_submission('foo', 'hacker123') - - submission = assignment.find_submission('foo', 'hacker123') - notebooks = submission.notebooks - - for nb in notebooks: - grades = [x for x in nb.grades] - comments = [x for x in nb.comments] - - assignment.remove_submission_notebook(nb.name, 'foo', 'hacker123') - assert assignment.db.query(api.SubmittedNotebook).filter(api.SubmittedNotebook.id == nb.id).all() == [] - - for grade in grades: - assert assignment.db.query(api.Grade).filter(api.Grade.id == grade.id).all() == [] - for comment in comments: - assert assignment.db.query(api.Comment).filter(api.Comment.id == comment.id).all() == [] - - with pytest.raises(MissingEntry): - assignment.find_submission_notebook(nb.name, 'foo', 'hacker123') - - -def test_find_grade(assignment): - assignment.add_student('hacker123') - s = assignment.add_submission('foo', 'hacker123') - n1, = s.notebooks - grades = n1.grades - - for g1 in grades: - g2 = assignment.find_grade(g1.name, 'p1', 'foo', 'hacker123') - assert g1 == g2 - - with pytest.raises(MissingEntry): - assignment.find_grade('asdf', 'p1', 'foo', 'hacker123') - - -def test_find_grade_by_id(assignment): - assignment.add_student('hacker123') - s = assignment.add_submission('foo', 'hacker123') - n1, = s.notebooks - grades = n1.grades - - for g1 in grades: - g2 = assignment.find_grade_by_id(g1.id) - assert g1 == g2 - - with pytest.raises(MissingEntry): - assignment.find_grade_by_id('12345') - - -def test_find_comment(assignment): - assignment.add_student('hacker123') - s = assignment.add_submission('foo', 'hacker123') - n1, = s.notebooks - comments = n1.comments - - for c1 in comments: - c2 = assignment.find_comment(c1.name, 'p1', 'foo', 'hacker123') - assert c1 == c2 - - with pytest.raises(MissingEntry): - assignment.find_comment('asdf', 'p1', 'foo', 'hacker123') - - -def test_find_comment_by_id(assignment): - assignment.add_student('hacker123') - s = assignment.add_submission('foo', 'hacker123') - n1, = s.notebooks - comments = n1.comments - - for c1 in comments: - c2 = assignment.find_comment_by_id(c1.id) - assert c1 == c2 - - with pytest.raises(MissingEntry): - assignment.find_comment_by_id('12345') - - -# Test average scores - -def test_average_assignment_score(assignment): - assert assignment.average_assignment_score('foo') == 0.0 - assert assignment.average_assignment_code_score('foo') == 0.0 - assert assignment.average_assignment_written_score('foo') == 0.0 - - assignment.add_student('hacker123') - assignment.add_student('bitdiddle') - assignment.add_submission('foo', 'hacker123') - assignment.add_submission('foo', 'bitdiddle') - - assert assignment.average_assignment_score('foo') == 0.0 - assert assignment.average_assignment_code_score('foo') == 0.0 - assert assignment.average_assignment_written_score('foo') == 0.0 - - g1 = assignment.find_grade("test1", "p1", "foo", "hacker123") - g2 = assignment.find_grade("test2", "p1", "foo", "hacker123") - g3 = assignment.find_grade("test1", "p1", "foo", "bitdiddle") - g4 = assignment.find_grade("test2", "p1", "foo", "bitdiddle") - - g1.manual_score = 0.5 - g2.manual_score = 2 - g3.manual_score = 1 - g4.manual_score = 1 - assignment.db.commit() - - assert assignment.average_assignment_score('foo') == 2.25 - assert assignment.average_assignment_code_score('foo') == 0.75 - assert assignment.average_assignment_written_score('foo') == 1.5 - - -def test_average_notebook_score(assignment: Gradebook) -> None: - assert assignment.average_notebook_score('p1', 'foo') == 0 - assert assignment.average_notebook_code_score('p1', 'foo') == 0 - assert assignment.average_notebook_written_score('p1', 'foo') == 0 - - assignment.add_student('hacker123') - assignment.add_student('bitdiddle') - assignment.add_submission('foo', 'hacker123') - assignment.add_submission('foo', 'bitdiddle') - - assert assignment.average_notebook_score('p1', 'foo') == 0.0 - assert assignment.average_notebook_code_score('p1', 'foo') == 0.0 - assert assignment.average_notebook_written_score('p1', 'foo') == 0.0 - - g1 = assignment.find_grade("test1", "p1", "foo", "hacker123") - g2 = assignment.find_grade("test2", "p1", "foo", "hacker123") - g3 = assignment.find_grade("test1", "p1", "foo", "bitdiddle") - g4 = assignment.find_grade("test2", "p1", "foo", "bitdiddle") - - g1.manual_score = 0.5 - g2.manual_score = 2 - g3.manual_score = 1 - g4.manual_score = 1 - assignment.db.commit() - - assert assignment.average_notebook_score('p1', 'foo') == 2.25 - assert assignment.average_notebook_code_score('p1', 'foo') == 0.75 - assert assignment.average_notebook_written_score('p1', 'foo') == 1.5 - - -# Test mass dictionary queries - -def test_student_dicts(assignment): - assignment.add_student('hacker123') - assignment.add_student('bitdiddle') - assignment.add_student('louisreasoner') - assignment.add_submission('foo', 'hacker123') - assignment.add_submission('foo', 'bitdiddle') - - g1 = assignment.find_grade("test1", "p1", "foo", "hacker123") - g2 = assignment.find_grade("test2", "p1", "foo", "hacker123") - g3 = assignment.find_grade("test1", "p1", "foo", "bitdiddle") - g4 = assignment.find_grade("test2", "p1", "foo", "bitdiddle") - - g1.manual_score = 0.5 - g2.manual_score = 2 - g3.manual_score = 1 - g4.manual_score = 1 - assignment.db.commit() - - students = assignment.student_dicts() - a = sorted(students, key=lambda x: x["id"]) - b = sorted([x.to_dict() for x in assignment.students], key=lambda x: x["id"]) - assert a == b - - -def test_student_dicts_zero_points(gradebook): - gradebook.add_assignment("ps1") - s = gradebook.add_student("1234") - assert gradebook.student_dicts() == [s.to_dict()] - - -def test_notebook_submission_dicts(assignment): - assignment.add_student('hacker123') - assignment.add_student('bitdiddle') - s1 = assignment.add_submission('foo', 'hacker123') - s2 = assignment.add_submission('foo', 'bitdiddle') - s1.flagged = True - s2.flagged = False - - g1 = assignment.find_grade("test1", "p1", "foo", "hacker123") - g2 = assignment.find_grade("test2", "p1", "foo", "hacker123") - g3 = assignment.find_grade("test1", "p1", "foo", "bitdiddle") - g4 = assignment.find_grade("test2", "p1", "foo", "bitdiddle") - - g1.manual_score = 0.5 - g2.manual_score = 2 - g3.manual_score = 1 - g4.manual_score = 1 - assignment.db.commit() - - notebook = assignment.find_notebook("p1", "foo") - submissions = assignment.notebook_submission_dicts("p1", "foo") - a = sorted(submissions, key=lambda x: x["id"]) - b = sorted([x.to_dict() for x in notebook.submissions], key=lambda x: x["id"]) - assert a == b - - -def test_submission_dicts(assignment): - assignment.add_student('hacker123') - assignment.add_student('bitdiddle') - s1 = assignment.add_submission('foo', 'hacker123') - s2 = assignment.add_submission('foo', 'bitdiddle') - s1.flagged = True - s2.flagged = False - - g1 = assignment.find_grade("test1", "p1", "foo", "hacker123") - g2 = assignment.find_grade("test2", "p1", "foo", "hacker123") - g3 = assignment.find_grade("test1", "p1", "foo", "bitdiddle") - g4 = assignment.find_grade("test2", "p1", "foo", "bitdiddle") - - g1.manual_score = 0.5 - g2.manual_score = 2 - g3.manual_score = 1 - g4.manual_score = 1 - assignment.db.commit() - - a = sorted(assignment.submission_dicts("foo"), key=lambda x: x["id"]) - b = sorted([x.to_dict() for x in assignment.find_assignment("foo").submissions], key=lambda x: x["id"]) - assert a == b - - -def test_grant_extension(gradebook): - gradebook.add_assignment("ps1", duedate="2018-05-09 10:00:00") - gradebook.add_student("hacker123") - s1 = gradebook.add_submission("ps1", "hacker123") - assert s1.extension is None - assert s1.duedate == datetime(2018, 5, 9, 10, 0, 0) - - gradebook.grant_extension('ps1', 'hacker123', minutes=10) - assert s1.extension == timedelta(minutes=10) - assert s1.duedate == datetime(2018, 5, 9, 10, 10, 0) - - gradebook.grant_extension('ps1', 'hacker123', hours=1) - assert s1.extension == timedelta(hours=1) - assert s1.duedate == datetime(2018, 5, 9, 11, 0, 0) - - gradebook.grant_extension('ps1', 'hacker123', days=2) - assert s1.extension == timedelta(days=2) - assert s1.duedate == datetime(2018, 5, 11, 10, 0, 0) - - gradebook.grant_extension('ps1', 'hacker123', weeks=3) - assert s1.extension == timedelta(weeks=3) - assert s1.duedate == datetime(2018, 5, 30, 10, 0, 0) - - gradebook.grant_extension('ps1', 'hacker123') - assert s1.extension is None - assert s1.duedate == datetime(2018, 5, 9, 10, 0, 0) - - -# Test task cells - -def test_add_task_cell(gradebook): - gradebook.add_assignment('foo') - n = gradebook.add_notebook('p1', 'foo') - gc = gradebook.add_task_cell('test1', 'p1', 'foo', max_score=2, cell_type='markdown') - assert gc.name == 'test1' - assert gc.max_score == 2 - assert gc.cell_type == 'markdown' - assert n.task_cells == [gc] - assert gc.notebook == n - - -def test_add_task_cell_with_args(gradebook): - gradebook.add_assignment('foo') - gradebook.add_notebook('p1', 'foo') - gc = gradebook.add_task_cell( - 'test1', 'p1', 'foo', - max_score=3, cell_type="code") - assert gc.name == 'test1' - assert gc.max_score == 3 - assert gc.cell_type == "code" - - -def test_create_invalid_task_cell(gradebook): - gradebook.add_assignment('foo') - gradebook.add_notebook('p1', 'foo') - with pytest.raises(InvalidEntry): - gradebook.add_task_cell( - 'test1', 'p1', 'foo', - max_score=3, cell_type="something") - - -def test_add_duplicate_task_cell(gradebook): - gradebook.add_assignment('foo') - gradebook.add_notebook('p1', 'foo') - gradebook.add_task_cell('test1', 'p1', 'foo', max_score=1, cell_type='code') - with pytest.raises(InvalidEntry): - gradebook.add_task_cell('test1', 'p1', 'foo', max_score=2, cell_type='markdown') - - -def test_find_task_cell(gradebook): - gradebook.add_assignment('foo') - gradebook.add_notebook('p1', 'foo') - gc1 = gradebook.add_task_cell('test1', 'p1', 'foo', max_score=1, cell_type='code') - assert gradebook.find_task_cell('test1', 'p1', 'foo') == gc1 - - gc2 = gradebook.add_task_cell('test2', 'p1', 'foo', max_score=2, cell_type='code') - assert gradebook.find_task_cell('test1', 'p1', 'foo') == gc1 - assert gradebook.find_task_cell('test2', 'p1', 'foo') == gc2 - - -def test_find_nonexistant_task_cell(gradebook): - with pytest.raises(MissingEntry): - gradebook.find_task_cell('test1', 'p1', 'foo') - - gradebook.add_assignment('foo') - with pytest.raises(MissingEntry): - gradebook.find_task_cell('test1', 'p1', 'foo') - - gradebook.add_notebook('p1', 'foo') - with pytest.raises(MissingEntry): - gradebook.find_task_cell('test1', 'p1', 'foo') - - -def test_update_or_create_task_cell(gradebook): - # first test creating it - gradebook.add_assignment('foo') - gradebook.add_notebook('p1', 'foo') - gc1 = gradebook.update_or_create_task_cell('test1', 'p1', 'foo', max_score=2, cell_type='markdown') - assert gc1.max_score == 2 - assert gc1.cell_type == 'markdown' - assert gradebook.find_task_cell('test1', 'p1', 'foo') == gc1 - - # now test finding/updating it - gc2 = gradebook.update_or_create_task_cell('test1', 'p1', 'foo', max_score=3) - assert gc1 == gc2 - assert gc1.max_score == 3 - assert gc1.cell_type == 'markdown' - - -def test_find_graded_cell(gradebook): - # first test creating it - gradebook.add_assignment('foo') - gradebook.add_assignment('foo2') - gradebook.add_notebook('p1', 'foo') - gradebook.add_notebook('p2', 'foo2') - gc1 = gradebook.update_or_create_task_cell('test1', 'p1', 'foo', max_score=2, cell_type='markdown') - assert gc1.max_score == 2 - assert gc1.cell_type == 'markdown' - assert gradebook.find_graded_cell('test1', 'p1', 'foo') == gc1 - gc2 = gradebook.update_or_create_grade_cell('test2', 'p2', 'foo2', max_score=2, cell_type='code') - assert gc2.max_score == 2 - assert gc2.cell_type == 'code' - assert gradebook.find_grade_cell('test2', 'p2', 'foo2') == gc2 - assert gradebook.find_graded_cell('test2', 'p2', 'foo2') == gc2 - - -def test_grade_cell_maxscore(gradebook): - # first test creating it - gradebook.add_assignment('foo') - gradebook.add_notebook('p1', 'foo') - gc1 = gradebook.update_or_create_task_cell('test1', 'p1', 'foo', max_score=1000, cell_type='markdown') - gc1a = gradebook.update_or_create_task_cell('test1a', 'p1', 'foo', max_score=3000, cell_type='markdown') - gc2 = gradebook.update_or_create_grade_cell('test2', 'p1', 'foo', max_score=5, cell_type='code') - gc3 = gradebook.update_or_create_grade_cell('test3', 'p1', 'foo', max_score=7, cell_type='code') - gc4 = gradebook.update_or_create_grade_cell('test4', 'p1', 'foo', max_score=13, cell_type='code') - gc5 = gradebook.update_or_create_grade_cell('test5', 'p1', 'foo', max_score=10, cell_type='code') - # assert gc2.max_score == 5 - n1 = gradebook.find_notebook('p1', 'foo') - assert n1.max_score_gradecell == 35 - assert n1.max_score_taskcell == 4000 - assert n1.max_score == 4035 - - -def test_grades_include_taskcells(assignmentWithSubmissionWithMarks: Gradebook) -> None: - s = assignmentWithSubmissionWithMarks.find_submission('foo', 'hacker123') - for n in s.notebooks: - grades = n.grades - assert len(grades) == 6 - - -# next 4 same as in normal tests, but with an assignment with tasks -def test_find_grade(assignmentWithSubmissionWithMarks): - s = assignmentWithSubmissionWithMarks.find_submission('foo', 'hacker123') - for n in s.notebooks: - grades = n.grades - for g1 in grades: - g2 = assignmentWithSubmissionWithMarks.find_grade(g1.name, n.name, 'foo', 'hacker123') - assert g1 == g2 - - with pytest.raises(MissingEntry): - assignmentWithSubmissionWithMarks.find_grade('asdf', 'p1', 'foo', 'hacker123') - - -def test_find_grade_by_id(assignmentWithSubmissionWithMarks): - s = assignmentWithSubmissionWithMarks.find_submission('foo', 'hacker123') - for n in s.notebooks: - grades = n.grades - - for g1 in grades: - g2 = assignmentWithSubmissionWithMarks.find_grade_by_id(g1.id) - assert g1 == g2 - - with pytest.raises(MissingEntry): - assignmentWithSubmissionWithMarks.find_grade_by_id('12345') - - -def test_find_comment(assignmentWithSubmissionWithMarks: Gradebook) -> None: - s = assignmentWithSubmissionWithMarks.find_submission('foo', 'hacker123') - for n in s.notebooks: - comments = n.comments - - for c1 in comments: - c2 = assignmentWithSubmissionWithMarks.find_comment(c1.name, n.name, 'foo', 'hacker123') - assert c1 == c2 - - with pytest.raises(MissingEntry): - assignmentWithSubmissionWithMarks.find_comment('asdf', n.name, 'foo', 'hacker123') - - -def test_find_comment_by_id(assignmentWithSubmissionWithMarks): - s = assignmentWithSubmissionWithMarks.find_submission('foo', 'hacker123') - for n in s.notebooks: - comments = n.comments - - for c1 in comments: - c2 = assignmentWithSubmissionWithMarks.find_comment_by_id(c1.id) - assert c1 == c2 - - with pytest.raises(MissingEntry): - assignmentWithSubmissionWithMarks.find_comment_by_id('12345') - - -def test_average_assignment_score_empty(assignment): - assert assignment.average_assignment_score('foo') == 0.0 - assert assignment.average_assignment_code_score('foo') == 0.0 - assert assignment.average_assignment_written_score('foo') == 0.0 - assert assignment.average_assignment_task_score('foo') == 0.0 - - -def test_average_assignment_no_score(assignmentWithSubmissionNoMarks): - assert assignmentWithSubmissionNoMarks.average_assignment_score('foo') == 0.0 - assert assignmentWithSubmissionNoMarks.average_assignment_code_score('foo') == 0.0 - assert assignmentWithSubmissionNoMarks.average_assignment_written_score('foo') == 0.0 - assert assignmentWithSubmissionNoMarks.average_assignment_task_score('foo') == 0.0 - - -def test_average_assignment_with_score(assignmentWithSubmissionWithMarks): - assert assignmentWithSubmissionWithMarks.average_assignment_score('foo') == sum(assignmentWithSubmissionWithMarks.usedgrades) / 2.0 - assert assignmentWithSubmissionWithMarks.average_assignment_code_score('foo') == sum(assignmentWithSubmissionWithMarks.usedgrades_code) / 2.0 - assert assignmentWithSubmissionWithMarks.average_assignment_written_score('foo') == sum(assignmentWithSubmissionWithMarks.usedgrades_written) / 2.0 - assert assignmentWithSubmissionWithMarks.average_assignment_task_score('foo') == sum(assignmentWithSubmissionWithMarks.usedgrades_task) / 2.0 - - -def test_average_notebook_score_empty(assignment): - assert assignment.average_notebook_score('p1', 'foo') == 0.0 - assert assignment.average_notebook_code_score('p1', 'foo') == 0.0 - assert assignment.average_notebook_written_score('p1', 'foo') == 0.0 - assert assignment.average_notebook_task_score('p1', 'foo') == 0.0 - - -def test_average_notebook_no_score(assignmentWithSubmissionNoMarks): - assert assignmentWithSubmissionNoMarks.average_notebook_score('p1', 'foo') == 0.0 - assert assignmentWithSubmissionNoMarks.average_notebook_code_score('p1', 'foo') == 0.0 - assert assignmentWithSubmissionNoMarks.average_notebook_written_score('p1', 'foo') == 0.0 - assert assignmentWithSubmissionNoMarks.average_notebook_task_score('p1', 'foo') == 0.0 - - -def test_average_notebook_with_score(assignmentWithSubmissionWithMarks: Gradebook) -> None: - assert assignmentWithSubmissionWithMarks.average_notebook_score('p1', 'foo') == sum(assignmentWithSubmissionWithMarks.usedgrades) / 2.0 - assert assignmentWithSubmissionWithMarks.average_notebook_code_score('p1', 'foo') == sum(assignmentWithSubmissionWithMarks.usedgrades_code) / 2.0 - assert assignmentWithSubmissionWithMarks.average_notebook_written_score('p1', 'foo') == sum(assignmentWithSubmissionWithMarks.usedgrades_written) / 2.0 - assert assignmentWithSubmissionWithMarks.average_notebook_task_score('p1', 'foo') == sum(assignmentWithSubmissionWithMarks.usedgrades_task) / 2.0 - - -def test_student_dicts(assignmentWithSubmissionWithMarks): - assign = assignmentWithSubmissionWithMarks - students = assign.student_dicts() - a = sorted(students, key=lambda x: x["id"]) - b = sorted([x.to_dict() for x in assign.students], key=lambda x: x["id"]) - assert a == b - - -def test_notebook_max_score(assignmentManyStudents): - assign = assignmentManyStudents - notebook = assign.find_notebook("p1", "foo") - assert notebook.max_score == 44 - - -def test_notebook_max_score_multiple_notebooks(FiveNotebooks): - assign = FiveNotebooks - notebook = assign.find_notebook("n1", "a1") - assert notebook.max_score == 555 - - -def test_submission_max_score(assignmentManyStudents): - assign = assignmentManyStudents - s = assign.find_submission('foo', 's1') - assert s.max_score == 88 - for n in s.notebooks: - assert n.max_score == 44 - - -def test_submission_max_score_multiple_notebooks(FiveNotebooks): - assign = FiveNotebooks - s = assign.find_submission('a1', 's1') - assert s.max_score == 5 * 555 - for n in s.notebooks: - assert n.max_score == 555 - - -def test_notebook_submission_dicts_multiple_students(FiveStudents): - assign = FiveStudents - notebook = assign.find_notebook("n1", "a1") - submissions = assign.notebook_submission_dicts("n1", "a1") - a = sorted(submissions, key=lambda x: x["id"]) - b = sorted([x.to_dict() for x in notebook.submissions], key=lambda x: x["id"]) - assert a == b - - -def test_notebook_submission_dicts_multiple_notebooks(FiveNotebooks): - assign = FiveNotebooks - notebook = assign.find_notebook("n1", "a1") - submissions = assign.notebook_submission_dicts("n1", "a1") - a = sorted(submissions, key=lambda x: x["id"]) - b = sorted([x.to_dict() for x in notebook.submissions], key=lambda x: x["id"]) - assert a == b - - -def test_notebook_submission_dicts_multiple_assignments(FiveAssignments): - assign = FiveAssignments - notebook = assign.find_notebook("n1", "a1") - submissions = assign.notebook_submission_dicts("n1", "a1") - a = sorted(submissions, key=lambda x: x["id"]) - b = sorted([x.to_dict() for x in notebook.submissions], key=lambda x: x["id"]) - assert a == b - - -def test_notebook_submission_dicts(assignmentWithSubmissionWithMarks): - assign = assignmentWithSubmissionWithMarks - notebook = assign.find_notebook("p1", "foo") - submissions = assign.notebook_submission_dicts("p1", "foo") - a = sorted(submissions, key=lambda x: x["id"]) - b = sorted([x.to_dict() for x in notebook.submissions], key=lambda x: x["id"]) - assert a == b - - -def test_submission_dicts_multiple_students(FiveStudents): - assign = FiveStudents - a = sorted(assign.submission_dicts("a1"), key=lambda x: x["id"]) - b = sorted([x.to_dict() for x in assign.find_assignment("a1").submissions], key=lambda x: x["id"]) - assert a == b - - -def test_submission_dicts_multiple_notebooks(FiveNotebooks): - assign = FiveNotebooks - a = sorted(assign.submission_dicts("a1"), key=lambda x: x["id"]) - b = sorted([x.to_dict() for x in assign.find_assignment("a1").submissions], key=lambda x: x["id"]) - assert a == b diff --git a/nbgrader/tests/api/test_models.py b/nbgrader/tests/api/test_models.py deleted file mode 100644 index a38e0f53c..000000000 --- a/nbgrader/tests/api/test_models.py +++ /dev/null @@ -1,1278 +0,0 @@ -import datetime -import pytest -import json - -from sqlalchemy import create_engine -from sqlalchemy.orm import sessionmaker, scoped_session -from sqlalchemy.sql import and_ - -from ... import api - - -@pytest.fixture -def db(request): - engine = create_engine("sqlite:///:memory:") - db = scoped_session(sessionmaker(autoflush=True, bind=engine)) - api.Base.query = db.query_property() - api.Base.metadata.create_all(bind=engine) - - def fin(): - db.remove() - engine.dispose() - request.addfinalizer(fin) - - return db - - -@pytest.fixture -def submissions(db): - now = datetime.datetime.utcnow() - a = api.Assignment(name='foo', duedate=now) - n = api.Notebook(name='blah', assignment=a) - gc1 = api.GradeCell(name='foo', max_score=10, notebook=n, cell_type="markdown") - gc2 = api.GradeCell(name='bar', max_score=5, notebook=n, cell_type="code") - sc = api.SolutionCell(name='foo', notebook=n) - api.SourceCell( - name='foo', cell_type='markdown', notebook=n, - source='waoiefjwoweifjw', checksum='12345', locked=True) - api.SourceCell( - name='bar', cell_type='code', notebook=n, - source='afejfwejfwe', checksum='567890', locked=False) - db.add(a) - db.commit() - - s = api.Student(id="12345", first_name='Jane', last_name='Doe', email='janedoe@nowhere', lms_user_id='230') - sa = api.SubmittedAssignment(assignment=a, student=s) - sn = api.SubmittedNotebook(assignment=sa, notebook=n) - g1a = api.Grade(cell=gc1, notebook=sn) - g2a = api.Grade(cell=gc2, notebook=sn) - ca = api.Comment(cell=sc, notebook=sn) - - db.add(s) - db.commit() - - s = api.Student(id="6789", first_name='John', last_name='Doe', email='johndoe@nowhere', lms_user_id='230') - sa = api.SubmittedAssignment(assignment=a, student=s) - sn = api.SubmittedNotebook(assignment=sa, notebook=n) - g1b = api.Grade(cell=gc1, notebook=sn) - g2b = api.Grade(cell=gc2, notebook=sn) - cb = api.Comment(cell=sc, notebook=sn) - - db.add(s) - db.commit() - - return db, (g1a, g2a, g1b, g2b), (ca, cb) - - -def test_create_assignment(db): - now = datetime.datetime.utcnow() - a = api.Assignment(name='foo', duedate=now) - db.add(a) - db.commit() - - assert a.id - assert a.name == 'foo' - assert a.duedate == now - assert a.notebooks == [] - assert a.submissions == [] - - assert a.max_score == 0 - assert a.max_code_score == 0 - assert a.max_written_score == 0 - assert a.num_submissions == 0 - - assert repr(a) == "Assignment" - - -def test_create_notebook(db): - now = datetime.datetime.utcnow() - a = api.Assignment(name='foo', duedate=now) - n = api.Notebook(name='blah', assignment=a) - db.add(a) - db.commit() - - assert n.id - assert n.name == 'blah' - assert n.assignment == a - assert n.grade_cells == [] - assert n.solution_cells == [] - assert n.source_cells == [] - assert n.submissions == [] - assert a.notebooks == [n] - - assert n.max_score == 0 - assert n.max_code_score == 0 - assert n.max_written_score == 0 - - assert repr(n) == "Notebook" - - -def test_create_grade_cell(db): - now = datetime.datetime.utcnow() - a = api.Assignment(name='foo', duedate=now) - n = api.Notebook(name='blah', assignment=a) - g = api.GradeCell(name='foo', max_score=10, notebook=n, cell_type="code") - db.add(a) - db.commit() - - assert g.id - assert g.name == 'foo' - assert g.max_score == 10 - assert g.cell_type == "code" - assert g.assignment == a - assert g.notebook == n - assert g.grades == [] - assert n.grade_cells == [g] - - assert n.max_score == 10 - assert n.max_code_score == 10 - assert n.max_written_score == 0 - - assert repr(g) == "GradeCell" - - -def test_create_solution_cell(db): - now = datetime.datetime.utcnow() - a = api.Assignment(name='foo', duedate=now) - n = api.Notebook(name='blah', assignment=a) - s = api.SolutionCell(name='foo', notebook=n) - db.add(a) - db.commit() - - assert s.id - assert s.name == 'foo' - assert s.assignment == a - assert s.notebook == n - assert s.comments == [] - assert n.solution_cells == [s] - - assert repr(s) == "SolutionCell" - - -def test_create_source_cell(db): - now = datetime.datetime.utcnow() - a = api.Assignment(name='foo', duedate=now) - n = api.Notebook(name='blah', assignment=a) - s = api.SourceCell( - name='foo', notebook=n, source="hello", - cell_type="code", checksum="12345") - db.add(a) - db.commit() - - assert s.id - assert s.name == 'foo' - assert not s.locked - assert s.cell_type == "code" - assert s.source == "hello" - assert s.checksum == "12345" - assert s.assignment == a - assert s.notebook == n - assert n.source_cells == [s] - - assert repr(s) == "SourceCell" - - -def test_create_student(db): - s = api.Student(id="12345", first_name='Jane', last_name='Doe', email='janedoe@nowhere') - db.add(s) - db.commit() - - assert s.id == "12345" - assert s.first_name == 'Jane' - assert s.last_name == 'Doe' - assert s.email == 'janedoe@nowhere' - assert s.submissions == [] - - assert s.score == 0 - assert s.max_score == 0 - - assert repr(s) == "Student<12345>" - - -def test_create_submitted_assignment(db): - a = api.Assignment(name='foo') - s = api.Student(id="12345", first_name='Jane', last_name='Doe', email='janedoe@nowhere') - sa = api.SubmittedAssignment(assignment=a, student=s) - db.add(sa) - db.commit() - - assert sa.id - assert sa.assignment == a - assert sa.student == s - assert sa.notebooks == [] - assert s.submissions == [sa] - assert a.submissions == [sa] - - assert sa.score == 0 - assert sa.max_score == 0 - assert sa.code_score == 0 - assert sa.max_code_score == 0 - assert sa.written_score == 0 - assert sa.max_written_score == 0 - assert not sa.needs_manual_grade - - assert sa.duedate is None - assert sa.timestamp is None - assert sa.extension is None - assert sa.total_seconds_late == 0 - - d = sa.to_dict() - assert d['id'] == sa.id - assert d['name'] == 'foo' - assert d['student'] == '12345' - assert d['timestamp'] == None - assert d['score'] == 0 - assert d['max_score'] == 0 - assert d['code_score'] == 0 - assert d['max_code_score'] == 0 - assert d['written_score'] == 0 - assert d['max_written_score'] == 0 - assert not d['needs_manual_grade'] - - assert repr(sa) == "SubmittedAssignment" - - -def test_submission_timestamp_ontime(db): - duedate = datetime.datetime.utcnow() - timestamp = duedate - datetime.timedelta(days=2) - - a = api.Assignment(name='foo', duedate=duedate) - s = api.Student(id="12345", first_name='Jane', last_name='Doe', email='janedoe@nowhere') - sa = api.SubmittedAssignment(assignment=a, student=s, timestamp=timestamp) - db.add(sa) - db.commit() - - assert sa.duedate == duedate - assert sa.timestamp == timestamp - assert sa.extension is None - assert sa.total_seconds_late == 0 - - -def test_submission_timestamp_late(db): - duedate = datetime.datetime.utcnow() - timestamp = duedate + datetime.timedelta(days=2) - - a = api.Assignment(name='foo', duedate=duedate) - s = api.Student(id="12345", first_name='Jane', last_name='Doe', email='janedoe@nowhere') - sa = api.SubmittedAssignment(assignment=a, student=s, timestamp=timestamp) - db.add(sa) - db.commit() - - assert sa.duedate == duedate - assert sa.timestamp == timestamp - assert sa.extension is None - assert sa.total_seconds_late == 172800 - - -def test_submission_timestamp_with_extension(db): - duedate = datetime.datetime.utcnow() - timestamp = duedate + datetime.timedelta(days=2) - extension = datetime.timedelta(days=3) - - a = api.Assignment(name='foo', duedate=duedate) - s = api.Student(id="12345", first_name='Jane', last_name='Doe', email='janedoe@nowhere') - sa = api.SubmittedAssignment(assignment=a, student=s, timestamp=timestamp, extension=extension) - db.add(sa) - db.commit() - - assert sa.duedate == (duedate + extension) - assert sa.timestamp == timestamp - assert sa.extension == extension - assert sa.total_seconds_late == 0 - - -def test_submission_timestamp_late_with_extension(db): - duedate = datetime.datetime.utcnow() - timestamp = duedate + datetime.timedelta(days=5) - extension = datetime.timedelta(days=3) - - a = api.Assignment(name='foo', duedate=duedate) - s = api.Student(id="12345", first_name='Jane', last_name='Doe', email='janedoe@nowhere') - sa = api.SubmittedAssignment(assignment=a, student=s, timestamp=timestamp, extension=extension) - db.add(sa) - db.commit() - - assert sa.duedate == (duedate + extension) - assert sa.timestamp == timestamp - assert sa.extension == extension - assert sa.total_seconds_late == 172800 - - -def test_create_submitted_notebook(db): - now = datetime.datetime.utcnow() - a = api.Assignment(name='foo', duedate=now) - n1 = api.Notebook(name='blah', assignment=a) - n2 = api.Notebook(name='blah2', assignment=a) - s = api.Student(id="12345", first_name='Jane', last_name='Doe', email='janedoe@nowhere') - sa = api.SubmittedAssignment(assignment=a, student=s) - sn1 = api.SubmittedNotebook(assignment=sa, notebook=n1, late_submission_penalty=5) - sn2 = api.SubmittedNotebook(assignment=sa, notebook=n2, late_submission_penalty=1) - db.add(sn1) - db.add(sn2) - db.commit() - - assert sn1.id - assert sn1.notebook == n1 - assert sn1.assignment == sa - assert sn1.grades == [] - assert sn1.comments == [] - assert sn1.student == s - assert sa.notebooks == [sn1, sn2] - assert n1.submissions == [sn1] - - assert sn1.score == 0 - assert sn1.max_score == 0 - assert sn1.code_score == 0 - assert sn1.max_code_score == 0 - assert sn1.written_score == 0 - assert sn1.max_written_score == 0 - assert sn1.late_submission_penalty == 5 - assert sn2.late_submission_penalty == 1 - assert sa.late_submission_penalty == 6 - assert not sn1.needs_manual_grade - - assert repr(sn1) == "SubmittedNotebook" - - -def test_create_code_grade(db): - now = datetime.datetime.utcnow() - a = api.Assignment(name='foo', duedate=now) - n = api.Notebook(name='blah', assignment=a) - gc = api.GradeCell(name='foo', max_score=10, notebook=n, cell_type="code") - s = api.Student(id="12345", first_name='Jane', last_name='Doe', email='janedoe@nowhere') - sa = api.SubmittedAssignment(assignment=a, student=s) - sn = api.SubmittedNotebook(assignment=sa, notebook=n) - g = api.Grade(cell=gc, notebook=sn, auto_score=5) - db.add(g) - db.commit() - - assert g.id - assert g.cell == gc - assert g.notebook == sn - assert g.auto_score == 5 - assert g.manual_score is None - assert g.assignment == sa - assert g.student == s - assert g.max_score == 10 - - assert g.needs_manual_grade - assert sn.needs_manual_grade - assert sa.needs_manual_grade - - assert g.score == 5 - assert sn.score == 5 - assert sn.code_score == 5 - assert sn.written_score == 0 - assert sa.score == 5 - assert sa.code_score == 5 - assert sa.written_score == 0 - assert s.score == 5 - - g.manual_score = 7.5 - db.commit() - - assert g.needs_manual_grade - assert sn.needs_manual_grade - assert sa.needs_manual_grade - - assert g.score == 7.5 - assert sn.score == 7.5 - assert sn.code_score == 7.5 - assert sn.written_score == 0 - assert sa.score == 7.5 - assert sa.code_score == 7.5 - assert sa.written_score == 0 - assert s.score == 7.5 - - g.needs_manual_grade = False - db.commit() - - assert not g.needs_manual_grade - assert not sn.needs_manual_grade - assert not sa.needs_manual_grade - - assert repr(g) == "Grade" - - -def test_create_written_grade(db): - now = datetime.datetime.utcnow() - a = api.Assignment(name='foo', duedate=now) - n = api.Notebook(name='blah', assignment=a) - gc = api.GradeCell(name='foo', max_score=10, notebook=n, cell_type="markdown") - s = api.Student(id="12345", first_name='Jane', last_name='Doe', email='janedoe@nowhere') - sa = api.SubmittedAssignment(assignment=a, student=s) - sn = api.SubmittedNotebook(assignment=sa, notebook=n) - g = api.Grade(cell=gc, notebook=sn) - db.add(g) - db.commit() - - assert g.id - assert g.cell == gc - assert g.notebook == sn - assert g.auto_score is None - assert g.manual_score is None - assert g.assignment == sa - assert g.student == s - assert g.max_score == 10 - - assert g.needs_manual_grade - assert sn.needs_manual_grade - assert sa.needs_manual_grade - - assert g.score == 0 - assert sn.score == 0 - assert sn.code_score == 0 - assert sn.written_score == 0 - assert sa.score == 0 - assert sa.code_score == 0 - assert sa.written_score == 0 - assert s.score == 0 - - g.manual_score = 7.5 - db.commit() - - assert g.needs_manual_grade - assert sn.needs_manual_grade - assert sa.needs_manual_grade - - assert g.score == 7.5 - assert sn.score == 7.5 - assert sn.code_score == 0 - assert sn.written_score == 7.5 - assert sa.score == 7.5 - assert sa.code_score == 0 - assert sa.written_score == 7.5 - assert s.score == 7.5 - - g.needs_manual_grade = False - db.commit() - - assert not g.needs_manual_grade - assert not sn.needs_manual_grade - assert not sa.needs_manual_grade - - assert repr(g) == "Grade" - - -def test_create_comment(db): - now = datetime.datetime.utcnow() - a = api.Assignment(name='foo', duedate=now) - n = api.Notebook(name='blah', assignment=a) - sc = api.SolutionCell(name='foo', notebook=n) - s = api.Student(id="12345", first_name='Jane', last_name='Doe', email='janedoe@nowhere') - sa = api.SubmittedAssignment(assignment=a, student=s) - sn = api.SubmittedNotebook(assignment=sa, notebook=n) - c = api.Comment(cell=sc, notebook=sn, auto_comment="something") - db.add(c) - db.commit() - - assert c.id - assert c.cell == sc - assert c.notebook == sn - assert c.comment == "something" - assert c.assignment == sa - assert c.student == s - - assert repr(c) == "Comment" - - -def test_query_needs_manual_grade_ungraded(submissions): - db = submissions[0] - - # do all the cells need grading? - a = db.query(api.Grade)\ - .filter(api.Grade.needs_manual_grade)\ - .order_by(api.Grade.id)\ - .all() - b = db.query(api.Grade)\ - .order_by(api.Grade.id)\ - .all() - assert a == b - - # do all the submitted notebooks need grading? - a = db.query(api.SubmittedNotebook)\ - .filter(api.SubmittedNotebook.needs_manual_grade)\ - .order_by(api.SubmittedNotebook.id)\ - .all() - b = db.query(api.SubmittedNotebook)\ - .order_by(api.SubmittedNotebook.id)\ - .all() - assert a == b - - # do all the notebooks need grading? - a = db.query(api.Notebook)\ - .filter(api.Notebook.needs_manual_grade)\ - .order_by(api.Notebook.id)\ - .all() - b = db.query(api.Notebook)\ - .order_by(api.Notebook.id)\ - .all() - assert a == b - - # do all the assignments need grading? - a = db.query(api.SubmittedAssignment)\ - .join(api.SubmittedNotebook).join(api.Grade)\ - .filter(api.SubmittedNotebook.needs_manual_grade)\ - .order_by(api.SubmittedAssignment.id)\ - .all() - b = db.query(api.SubmittedAssignment)\ - .order_by(api.SubmittedAssignment.id)\ - .all() - assert a == b - - -def test_query_needs_manual_grade_autograded(submissions): - db, grades, _ = submissions - - for grade in grades: - grade.auto_score = grade.max_score - db.commit() - - # do all the cells need grading? - a = db.query(api.Grade)\ - .filter(api.Grade.needs_manual_grade)\ - .order_by(api.Grade.id)\ - .all() - b = db.query(api.Grade)\ - .order_by(api.Grade.id)\ - .all() - assert a == b - - # do all the submitted notebooks need grading? - a = db.query(api.SubmittedNotebook)\ - .filter(api.SubmittedNotebook.needs_manual_grade)\ - .order_by(api.SubmittedNotebook.id)\ - .all() - b = db.query(api.SubmittedNotebook)\ - .order_by(api.SubmittedNotebook.id)\ - .all() - assert a == b - - # do all the notebooks need grading? - a = db.query(api.Notebook)\ - .filter(api.Notebook.needs_manual_grade)\ - .order_by(api.Notebook.id)\ - .all() - b = db.query(api.Notebook)\ - .order_by(api.Notebook.id)\ - .all() - assert a == b - - # do all the assignments need grading? - a = db.query(api.SubmittedAssignment)\ - .join(api.SubmittedNotebook).join(api.Grade)\ - .filter(api.SubmittedNotebook.needs_manual_grade)\ - .order_by(api.SubmittedAssignment.id)\ - .all() - b = db.query(api.SubmittedAssignment)\ - .order_by(api.SubmittedAssignment.id)\ - .all() - assert a == b - - for grade in grades: - grade.needs_manual_grade = False - db.commit() - - # do none of the cells need grading? - assert [] == db.query(api.Grade)\ - .filter(api.Grade.needs_manual_grade)\ - .all() - - # do none of the submitted notebooks need grading? - assert [] == db.query(api.SubmittedNotebook)\ - .filter(api.SubmittedNotebook.needs_manual_grade)\ - .all() - - # do none of the notebooks need grading? - assert [] == db.query(api.Notebook)\ - .filter(api.Notebook.needs_manual_grade)\ - .all() - - # do none of the assignments need grading? - assert [] == db.query(api.SubmittedAssignment)\ - .join(api.SubmittedNotebook).join(api.Grade)\ - .filter(api.SubmittedNotebook.needs_manual_grade)\ - .all() - - -def test_query_needs_manual_grade_manualgraded(submissions): - db, grades, _ = submissions - - for grade in grades: - grade.auto_score = None - grade.manual_score = grade.max_score / 2.0 - db.commit() - - # do all the cells need grading? - a = db.query(api.Grade)\ - .filter(api.Grade.needs_manual_grade)\ - .order_by(api.Grade.id)\ - .all() - b = db.query(api.Grade)\ - .order_by(api.Grade.id)\ - .all() - assert a == b - - # do all the submitted notebooks need grading? - a = db.query(api.SubmittedNotebook)\ - .filter(api.SubmittedNotebook.needs_manual_grade)\ - .order_by(api.SubmittedNotebook.id)\ - .all() - b = db.query(api.SubmittedNotebook)\ - .order_by(api.SubmittedNotebook.id)\ - .all() - assert a == b - - # do all the notebooks need grading? - a = db.query(api.Notebook)\ - .filter(api.Notebook.needs_manual_grade)\ - .order_by(api.Notebook.id)\ - .all() - b = db.query(api.Notebook)\ - .order_by(api.Notebook.id)\ - .all() - assert a == b - - # do all the assignments need grading? - a = db.query(api.SubmittedAssignment)\ - .join(api.SubmittedNotebook).join(api.Grade)\ - .filter(api.SubmittedNotebook.needs_manual_grade)\ - .order_by(api.SubmittedAssignment.id)\ - .all() - b = db.query(api.SubmittedAssignment)\ - .order_by(api.SubmittedAssignment.id)\ - .all() - assert a == b - - for grade in grades: - grade.needs_manual_grade = False - db.commit() - - # do none of the cells need grading? - assert [] == db.query(api.Grade)\ - .filter(api.Grade.needs_manual_grade)\ - .all() - - # do none of the submitted notebooks need grading? - assert [] == db.query(api.SubmittedNotebook)\ - .filter(api.SubmittedNotebook.needs_manual_grade)\ - .all() - - # do none of the notebooks need grading? - assert [] == db.query(api.Notebook)\ - .filter(api.Notebook.needs_manual_grade)\ - .all() - - # do none of the assignments need grading? - assert [] == db.query(api.SubmittedAssignment)\ - .join(api.SubmittedNotebook).join(api.Grade)\ - .filter(api.SubmittedNotebook.needs_manual_grade)\ - .all() - - -def test_query_max_score(submissions): - db = submissions[0] - - assert [5, 10] == sorted([x[1] for x in db.query( - api.GradeCell.id, api.GradeCell.max_score).group_by(api.GradeCell.id).all()]) - assert [5, 5, 10, 10] == sorted([x[1] for x in db.query( - api.Grade.id, api.Grade.max_score).group_by(api.Grade.id).all()]) - assert [15] == sorted([x[1] for x in db.query( - api.Notebook.id, api.Notebook.max_score).group_by(api.Notebook.id).all()]) - assert [15, 15] == sorted([x[1] for x in db.query( - api.SubmittedNotebook.id, api.SubmittedNotebook.max_score).group_by(api.SubmittedNotebook.id).all()]) - assert [15] == sorted([x[1] for x in db.query( - api.Assignment.id, api.Assignment.max_score).group_by(api.Assignment.id).all()]) - assert [15, 15] == sorted([x[1] for x in db.query( - api.SubmittedAssignment.id, api.SubmittedAssignment.max_score).group_by(api.SubmittedAssignment.id).all()]) - assert [15, 15] == sorted([x[1] for x in db.query - (api.Student.id, api.Student.max_score).group_by(api.Student.id).all()]) - - -def test_query_score_ungraded(submissions): - db = submissions[0] - - assert [x[0] for x in db.query(api.Grade.score).all()] == [0.0, 0.0, 0.0, 0.0] - assert [x[1] for x in db.query(api.SubmittedNotebook.id, api.SubmittedNotebook.score).all()] == [0.0, 0.0] - assert [x[1] for x in db.query(api.SubmittedAssignment.id, api.SubmittedAssignment.score).all()] == [0.0, 0.0] - assert [x[1] for x in db.query(api.Student.id, api.Student.score).all()] == [0.0, 0.0] - - -def test_query_comment_unchanged(submissions): - db = submissions[0] - - assert [x[0] for x in db.query(api.Comment.comment).all()] == [None, None] - - -def test_query_score_autograded(submissions): - db, grades, _ = submissions - - grades[0].auto_score = 10 - grades[1].auto_score = 0 - grades[2].auto_score = 5 - grades[3].auto_score = 2.5 - db.commit() - - assert sorted(x[0] for x in db.query(api.Grade.score).all()) == [0, 2.5, 5, 10] - assert sorted(x[1] for x in db.query(api.SubmittedNotebook.id, api.SubmittedNotebook.score).all()) == [7.5, 10] - assert sorted(x[1] for x in db.query(api.SubmittedAssignment.id, api.SubmittedAssignment.score).all()) == [7.5, 10] - assert sorted(x[1] for x in db.query(api.Student.id, api.Student.score).all()) == [7.5, 10] - - -def test_query_auto_comment(submissions): - db, _, comments = submissions - - comments[0].auto_comment = "foo" - comments[1].auto_comment = "bar" - db.commit() - - assert sorted(x[0] for x in db.query(api.Comment.comment).all()) == ["bar", "foo"] - - -def test_query_score_manualgraded(submissions): - db, grades, _ = submissions - - grades[0].auto_score = 10 - grades[1].auto_score = 0 - grades[2].auto_score = 5 - grades[3].auto_score = 2.5 - grades[0].manual_score = 4 - grades[1].manual_score = 1.5 - grades[2].manual_score = 9 - grades[3].manual_score = 3 - db.commit() - - assert sorted(x[0] for x in db.query(api.Grade.score).all()) == [1.5, 3, 4, 9] - assert sorted(x[1] for x in db.query(api.SubmittedNotebook.id, api.SubmittedNotebook.score).all()) == [5.5, 12] - assert sorted(x[1] for x in db.query(api.SubmittedAssignment.id, api.SubmittedAssignment.score).all()) == [5.5, 12] - assert sorted(x[1] for x in db.query(api.Student.id, api.Student.score).all()) == [5.5, 12] - - -def test_query_manual_comment(submissions): - db, _, comments = submissions - - comments[0].auto_comment = "foo" - comments[1].auto_comment = "bar" - comments[0].manual_comment = "baz" - comments[1].manual_comment = "quux" - db.commit() - - assert sorted(x[0] for x in db.query(api.Comment.comment).all()) == ["baz", "quux"] - - -def test_query_max_written_score(submissions): - db = submissions[0] - - assert [10] == sorted([x[1] for x in db.query(api.Notebook.id, api.Notebook.max_written_score).all()]) - assert [10, 10] == sorted([x[1] for x in db.query(api.SubmittedNotebook.id, api.SubmittedNotebook.max_written_score).all()]) - assert [10] == sorted([x[1] for x in db.query(api.Assignment.id, api.Assignment.max_written_score).all()]) - assert [10, 10] == sorted([x[1] for x in db.query(api.SubmittedAssignment.id, api.SubmittedAssignment.max_written_score).all()]) - - -def test_query_written_score_ungraded(submissions): - db = submissions[0] - - assert [0.0, 0.0] == [x[1] for x in db.query(api.SubmittedNotebook.id, api.SubmittedNotebook.written_score).all()] - assert [0.0, 0.0] == [x[1] for x in db.query(api.SubmittedAssignment.id, api.SubmittedAssignment.written_score).all()] - - -def test_query_written_score_autograded(submissions): - db, grades, _ = submissions - - grades[0].auto_score = 10 - grades[1].auto_score = 0 - grades[2].auto_score = 5 - grades[3].auto_score = 2.5 - db.commit() - - assert [5, 10] == sorted(x[1] for x in db.query(api.SubmittedNotebook.id, api.SubmittedNotebook.written_score).all()) - assert [5, 10] == sorted(x[1] for x in db.query(api.SubmittedAssignment.id, api.SubmittedAssignment.written_score).all()) - - -def test_query_written_score_manualgraded(submissions): - db, grades, _ = submissions - - grades[0].auto_score = 10 - grades[1].auto_score = 0 - grades[2].auto_score = 5 - grades[3].auto_score = 2.5 - grades[0].manual_score = 4 - grades[1].manual_score = 1.5 - grades[2].manual_score = 9 - grades[3].manual_score = 3 - db.commit() - - assert [4, 9] == sorted(x[1] for x in db.query(api.SubmittedNotebook.id, api.SubmittedNotebook.written_score).all()) - assert [4, 9] == sorted(x[1] for x in db.query(api.SubmittedAssignment.id, api.SubmittedAssignment.written_score).all()) - - -def test_query_max_code_score(submissions): - db = submissions[0] - - assert [5] == sorted([x[1] for x in db.query(api.Notebook.id, api.Notebook.max_code_score).all()]) - assert [5, 5] == sorted([x[1] for x in db.query(api.SubmittedNotebook.id, api.SubmittedNotebook.max_code_score).all()]) - assert [5] == sorted([x[1] for x in db.query(api.Assignment.id, api.Assignment.max_code_score).all()]) - assert [5, 5] == sorted([x[1] for x in db.query(api.SubmittedAssignment.id, api.SubmittedAssignment.max_code_score).all()]) - - -def test_query_code_score_ungraded(submissions): - db = submissions[0] - - assert [0.0, 0.0] == [x[1] for x in db.query(api.SubmittedNotebook.id, api.SubmittedNotebook.code_score).all()] - assert [0.0, 0.0] == [x[1] for x in db.query(api.SubmittedAssignment.id, api.SubmittedAssignment.code_score).all()] - - -def test_query_code_score_autograded(submissions): - db, grades, _ = submissions - - grades[0].auto_score = 10 - grades[1].auto_score = 0 - grades[2].auto_score = 5 - grades[3].auto_score = 2.5 - db.commit() - - assert [0, 2.5] == sorted(x[1] for x in db.query(api.SubmittedNotebook.id, api.SubmittedNotebook.code_score).all()) - assert [0, 2.5] == sorted(x[1] for x in db.query(api.SubmittedAssignment.id, api.SubmittedAssignment.code_score).all()) - - -def test_query_code_score_manualgraded(submissions): - db, grades, _ = submissions - - grades[0].auto_score = 10 - grades[1].auto_score = 0 - grades[2].auto_score = 5 - grades[3].auto_score = 2.5 - grades[0].manual_score = 4 - grades[1].manual_score = 1.5 - grades[2].manual_score = 9 - grades[3].manual_score = 3 - db.commit() - - assert [1.5, 3] == sorted(x[1] for x in db.query(api.SubmittedNotebook.id, api.SubmittedNotebook.code_score).all()) - assert [1.5, 3] == sorted(x[1] for x in db.query(api.SubmittedAssignment.id, api.SubmittedAssignment.code_score).all()) - - -def test_query_auto_score_extra_credit(submissions): - db, grades, _ = submissions - - grades[0].auto_score = 10 - grades[1].auto_score = 0 - grades[2].auto_score = 5 - grades[3].auto_score = 2.5 - - grades[0].extra_credit = 0.5 - grades[1].extra_credit = 0 - grades[2].extra_credit = 2.3 - grades[3].extra_credit = 1.1 - db.commit() - - assert sorted(x[0] for x in db.query(api.Grade.score).all()) == [0, 3.6, 7.3, 10.5] - assert sorted(x[1] for x in db.query(api.SubmittedNotebook.id, api.SubmittedNotebook.score).all()) == [10.5, 10.9] - assert sorted(x[1] for x in db.query(api.SubmittedAssignment.id, api.SubmittedAssignment.score).all()) == [10.5, 10.9] - assert sorted(x[1] for x in db.query(api.Student.id, api.Student.score).all()) == [10.5, 10.9] - - -def test_query_manual_score_extra_credit(submissions): - db, grades, _ = submissions - - grades[0].auto_score = 10 - grades[1].auto_score = 0 - grades[2].auto_score = 5 - grades[3].auto_score = 2.5 - - grades[0].manual_score = 4 - grades[1].manual_score = 1.5 - grades[2].manual_score = 9 - grades[3].manual_score = 3 - - grades[0].extra_credit = 0.5 - grades[1].extra_credit = 0 - grades[2].extra_credit = 2.3 - grades[3].extra_credit = 1.1 - db.commit() - - assert sorted(x[0] for x in db.query(api.Grade.score).all()) == [1.5, 4.1, 4.5, 11.3] - assert sorted(x[1] for x in db.query(api.SubmittedNotebook.id, api.SubmittedNotebook.score).all()) == [6, 15.4] - assert sorted(x[1] for x in db.query(api.SubmittedAssignment.id, api.SubmittedAssignment.score).all()) == [6, 15.4] - assert sorted(x[1] for x in db.query(api.Student.id, api.Student.score).all()) == [6, 15.4] - - -def test_query_num_submissions(submissions): - db = submissions[0] - - assert [2] == [x[0] for x in db.query(api.Assignment.num_submissions).all()] - assert [2] == [x[0] for x in db.query(api.Notebook.num_submissions).all()] - - -def test_student_max_score(db): - now = datetime.datetime.utcnow() - a = api.Assignment(name='foo', duedate=now) - n = api.Notebook(name='blah', assignment=a) - api.GradeCell(name='foo', max_score=10, notebook=n, cell_type="markdown") - api.GradeCell(name='bar', max_score=5, notebook=n, cell_type="code") - db.add(a) - db.commit() - - s = api.Student(id="12345", first_name='Jane', last_name='Doe', email='janedoe@nowhere') - db.add(s) - db.commit() - - assert s.max_score == 15 - - -def test_query_grade_cell_types(submissions): - db = submissions[0] - - a = db.query(api.Grade)\ - .filter(api.Grade.cell_type == "code")\ - .order_by(api.Grade.id)\ - .all() - b = db.query(api.Grade)\ - .join(api.GradeCell)\ - .filter(api.GradeCell.cell_type == "code")\ - .order_by(api.Grade.id)\ - .all() - assert a == b - - a = db.query(api.Grade)\ - .filter(api.Grade.cell_type == "markdown")\ - .order_by(api.Grade.id)\ - .all() - b = db.query(api.Grade)\ - .join(api.GradeCell)\ - .filter(api.GradeCell.cell_type == "markdown")\ - .order_by(api.Grade.id)\ - .all() - assert a == b - - -def test_query_failed_tests_failed(submissions): - db, grades, _ = submissions - - for grade in grades: - if grade.cell.cell_type == "code": - grade.auto_score = 0 - db.commit() - - # have all the cells failed? - a = db.query(api.Grade)\ - .filter(api.Grade.failed_tests)\ - .order_by(api.Grade.id)\ - .all() - b = db.query(api.Grade)\ - .filter(api.Grade.cell_type == "code")\ - .order_by(api.Grade.id)\ - .all() - assert a == b - - # have all the notebooks failed? - a = db.query(api.SubmittedNotebook)\ - .filter(api.SubmittedNotebook.failed_tests)\ - .order_by(api.SubmittedNotebook.id)\ - .all() - b = db.query(api.SubmittedNotebook)\ - .order_by(api.SubmittedNotebook.id)\ - .all() - - -def test_query_failed_tests_ok(submissions): - db, all_grades, _ = submissions - - for grade in all_grades: - if grade.cell.cell_type == "code": - grade.auto_score = grade.max_score - db.commit() - - # are all the grades ok? - assert [] == db.query(api.Grade)\ - .filter(api.Grade.failed_tests)\ - .all() - - # are all the notebooks ok? - assert [] == db.query(api.SubmittedNotebook)\ - .filter(api.SubmittedNotebook.failed_tests)\ - .all() - - -def test_assignment_to_dict(submissions): - db = submissions[0] - - a = db.query(api.Assignment).one() - ad = a.to_dict() - - assert set(ad.keys()) == { - 'id', 'name', 'duedate', 'num_submissions', 'max_score', - 'max_code_score', 'max_written_score', 'max_task_score'} - - assert ad['id'] == a.id - assert ad['name'] == "foo" - assert ad['duedate'] == a.duedate.isoformat() - assert ad['num_submissions'] == 2 - assert ad['max_score'] == 15 - assert ad['max_code_score'] == 5 - assert ad['max_written_score'] == 10 - - # make sure it can be JSONified - json.dumps(ad) - - -def test_notebook_to_dict(submissions): - db = submissions[0] - - a = db.query(api.Assignment).one() - n, = a.notebooks - nd = n.to_dict() - - assert set(nd.keys()) == { - 'id', 'name', 'num_submissions', 'max_score', 'max_code_score', - 'max_written_score', 'needs_manual_grade', 'max_task_score'} - - assert nd['id'] == n.id - assert nd['name'] == 'blah' - assert nd['num_submissions'] == 2 - assert nd['max_score'] == 15 - assert nd['max_code_score'] == 5 - assert nd['max_written_score'] == 10 - assert nd['needs_manual_grade'] - - # make sure it can be JSONified - json.dumps(nd) - - -def test_gradecell_to_dict(submissions): - db = submissions[0] - - gc1 = db.query(api.GradeCell).filter(api.GradeCell.name == 'foo').one() - gc2 = db.query(api.GradeCell).filter(api.GradeCell.name == 'bar').one() - - gc1d = gc1.to_dict() - gc2d = gc2.to_dict() - - assert set(gc1d.keys()) == set(gc2d.keys()) - assert set(gc1d.keys()) == { - 'id', 'name', 'max_score', 'cell_type', 'notebook', 'assignment'} - - assert gc1d['id'] == gc1.id - assert gc1d['name'] == 'foo' - assert gc1d['max_score'] == 10 - assert gc1d['cell_type'] == 'markdown' - assert gc1d['notebook'] == 'blah' - assert gc1d['assignment'] == 'foo' - - assert gc2d['id'] == gc2.id - assert gc2d['name'] == 'bar' - assert gc2d['max_score'] == 5 - assert gc2d['cell_type'] == 'code' - assert gc2d['notebook'] == 'blah' - assert gc2d['assignment'] == 'foo' - - # make sure it can be JSONified - json.dumps(gc1d) - json.dumps(gc2d) - - -def test_solutioncell_to_dict(submissions): - db = submissions[0] - - sc = db.query(api.SolutionCell).one() - scd = sc.to_dict() - - assert set(scd.keys()) == {'id', 'name', 'notebook', 'assignment'} - - assert scd['id'] == sc.id - assert scd['name'] == 'foo' - assert scd['notebook'] == 'blah' - assert scd['assignment'] == 'foo' - - # make sure it can be JSONified - json.dumps(scd) - - -def test_sourcecell_to_dict(submissions): - db = submissions[0] - - sc1 = db.query(api.SourceCell).filter(api.SourceCell.name == 'foo').one() - sc2 = db.query(api.SourceCell).filter(api.SourceCell.name == 'bar').one() - - sc1d = sc1.to_dict() - sc2d = sc2.to_dict() - - assert set(sc1d.keys()) == set(sc2d.keys()) - assert set(sc1d.keys()) == { - 'id', 'name', 'cell_type', 'source', 'checksum', 'locked', - 'notebook', 'assignment'} - - assert sc1d['id'] == sc1.id - assert sc1d['name'] == 'foo' - assert sc1d['cell_type'] == 'markdown' - assert sc1d['source'] == 'waoiefjwoweifjw' - assert sc1d['checksum'] == '12345' - assert sc1d['notebook'] == 'blah' - assert sc1d['assignment'] == 'foo' - assert sc1d['locked'] - - assert sc2d['id'] == sc2.id - assert sc2d['name'] == 'bar' - assert sc2d['cell_type'] == 'code' - assert sc2d['source'] == 'afejfwejfwe' - assert sc2d['checksum'] == '567890' - assert sc2d['notebook'] == 'blah' - assert sc2d['assignment'] == 'foo' - assert not sc2d['locked'] - - # make sure it can be JSONified - json.dumps(sc1d) - json.dumps(sc2d) - - -def test_student_to_dict(submissions): - db = submissions[0] - - s1 = db.query(api.Student).filter(api.Student.id == '12345').one() - s2 = db.query(api.Student).filter(api.Student.id == '6789').one() - - s1d = s1.to_dict() - s2d = s2.to_dict() - - assert set(s1d.keys()) == set(s2d.keys()) - assert set(s1d.keys()) == { - 'id', 'first_name', 'last_name', 'email', 'score', 'max_score', 'lms_user_id'} - - assert s1d['id'] == '12345' - assert s1d['first_name'] == 'Jane' - assert s1d['last_name'] == 'Doe' - assert s1d['email'] == 'janedoe@nowhere' - assert s1d['score'] == 0 - assert s1d['max_score'] == 15 - assert s1d['lms_user_id'] == '230' - - assert s2d['id'] == '6789' - assert s2d['first_name'] == 'John' - assert s2d['last_name'] == 'Doe' - assert s2d['email'] == 'johndoe@nowhere' - assert s2d['score'] == 0 - assert s2d['max_score'] == 15 - assert s2d['lms_user_id'] == '230' - - # make sure it can be JSONified - json.dumps(s1d) - json.dumps(s2d) - - -def test_submittedassignment_to_dict(submissions): - db = submissions[0] - - sa = db.query(api.SubmittedAssignment)\ - .join(api.Student)\ - .filter(api.Student.id == '12345')\ - .one() - - sad = sa.to_dict() - - assert set(sad.keys()) == { - 'id', 'name', 'student', 'timestamp', 'score', 'max_score', 'code_score', - 'max_code_score', 'written_score', 'max_written_score', - 'task_score', 'max_task_score', - 'needs_manual_grade', 'last_name', 'first_name'} - - assert sad['id'] == sa.id - assert sad['name'] == 'foo' - assert sad['student'] == '12345' - assert sad['last_name'] == 'Doe' - assert sad['first_name'] == 'Jane' - assert sad['timestamp'] is None - assert sad['score'] == 0 - assert sad['max_score'] == 15 - assert sad['code_score'] == 0 - assert sad['max_code_score'] == 5 - assert sad['written_score'] == 0 - assert sad['max_written_score'] == 10 - assert sad['needs_manual_grade'] - - # make sure it can be JSONified - json.dumps(sad) - - -def test_submittednotebook_to_dict(submissions): - db = submissions[0] - - sn = db.query(api.SubmittedNotebook)\ - .join(api.Notebook).join(api.SubmittedAssignment).join(api.Student)\ - .filter(and_( - api.Student.id == '12345', - api.Notebook.name == 'blah'))\ - .one() - - snd = sn.to_dict() - - assert set(snd.keys()) == { - 'id', 'name', 'student', 'last_name', 'first_name', - 'score', 'max_score', 'code_score', - 'max_code_score', 'written_score', 'max_written_score', - 'task_score', 'max_task_score', - 'needs_manual_grade', 'failed_tests', 'flagged'} - - assert snd['id'] == sn.id - assert snd['name'] == 'blah' - assert snd['student'] == '12345' - assert snd['last_name'] == 'Doe' - assert snd['first_name'] == 'Jane' - assert snd['score'] == 0 - assert snd['max_score'] == 15 - assert snd['code_score'] == 0 - assert snd['max_code_score'] == 5 - assert snd['written_score'] == 0 - assert snd['max_written_score'] == 10 - assert snd['needs_manual_grade'] - assert not snd['failed_tests'] - assert not snd['flagged'] - - # make sure it can be JSONified - json.dumps(snd) - - -def test_grade_to_dict(submissions): - _, grades, _ = submissions - - for g in grades: - gd = g.to_dict() - assert set(gd.keys()) == { - 'id', 'name', 'notebook', 'assignment', 'student', 'auto_score', - 'manual_score', 'max_score', 'needs_manual_grade', 'failed_tests', - 'cell_type', 'extra_credit'} - - assert gd['id'] == g.id - assert gd['name'] == g.name - assert gd['notebook'] == 'blah' - assert gd['assignment'] == 'foo' - assert gd['student'] == g.student.id - assert gd['auto_score'] is None - assert gd['manual_score'] is None - assert gd['extra_credit'] is None - assert gd['needs_manual_grade'] - assert not gd['failed_tests'] - assert gd['cell_type'] == g.cell_type - - # make sure it can be JSONified - json.dumps(gd) - - -def test_comment_to_dict(submissions): - _, _, comments = submissions - - for c in comments: - cd = c.to_dict() - assert set(cd.keys()) == { - 'id', 'name', 'notebook', 'assignment', 'student', 'auto_comment', - 'manual_comment'} - - assert cd['id'] == c.id - assert cd['name'] == c.name - assert cd['notebook'] == 'blah' - assert cd['assignment'] == 'foo' - assert cd['student'] == c.student.id - assert cd['auto_comment'] is None - assert cd['manual_comment'] is None - - # make sure it can be JSONified - json.dumps(cd) diff --git a/nbgrader/tests/apps/__init__.py b/nbgrader/tests/apps/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/nbgrader/tests/apps/base.py b/nbgrader/tests/apps/base.py deleted file mode 100644 index 9e5c6b8a2..000000000 --- a/nbgrader/tests/apps/base.py +++ /dev/null @@ -1,71 +0,0 @@ -# -*- coding: utf-8 -*- - -import io -import os -import shutil -import pytest - -from nbformat import write as write_nb -from nbformat.v4 import new_notebook - -from ...utils import remove - - -@pytest.mark.usefixtures("temp_cwd") -class BaseTestApp(object): - - def _empty_notebook(self, path, kernel=None): - nb = new_notebook() - if kernel is not None: - nb.metadata.kernelspec = { - "display_name": "kernel", - "language": kernel, - "name": kernel - } - - full_dest = os.path.abspath(path) - if not os.path.exists(os.path.dirname(full_dest)): - os.makedirs(os.path.dirname(full_dest)) - if os.path.exists(full_dest): - remove(full_dest) - with io.open(full_dest, mode='w', encoding='utf-8') as f: - write_nb(nb, f, 4) - - def _copy_file(self, src: str, dest: str) -> None: - full_src = os.path.join(os.path.dirname(__file__), src) - full_dest = os.path.abspath(dest) - if not os.path.exists(os.path.dirname(full_dest)): - os.makedirs(os.path.dirname(full_dest)) - if os.path.exists(full_dest): - remove(full_dest) - shutil.copy(full_src, full_dest) - - def _move_file(self, src, dest): - full_src = os.path.abspath(src) - full_dest = os.path.abspath(dest) - if not os.path.exists(os.path.dirname(full_dest)): - os.makedirs(os.path.dirname(full_dest)) - if os.path.exists(full_dest): - remove(full_dest) - shutil.move(full_src, full_dest) - - def _make_file(self, path: str, contents: str = "") -> None: - full_dest = os.path.abspath(path) - if not os.path.exists(os.path.dirname(full_dest)): - os.makedirs(os.path.dirname(full_dest)) - if os.path.exists(full_dest): - remove(full_dest) - with open(path, "w") as fh: - fh.write(contents) - - def _get_permissions(self, filename): - st_mode = os.stat(filename).st_mode - # If setgid is true, return four bytes. For testing CourseDirectory.groupshared. - if st_mode & 0o2000: - return oct(st_mode)[-4:] - return oct(st_mode)[-3:] - - def _file_contents(self, path): - with open(path, "r") as fh: - contents = fh.read() - return contents diff --git a/nbgrader/tests/apps/conftest.py b/nbgrader/tests/apps/conftest.py deleted file mode 100644 index e1173224b..000000000 --- a/nbgrader/tests/apps/conftest.py +++ /dev/null @@ -1,134 +0,0 @@ -import os -import tempfile -import shutil -import pytest -import sys - -from textwrap import dedent - -from _pytest.fixtures import SubRequest - -from ...api import Gradebook -from ...utils import rmtree - - -@pytest.fixture -def db(request: SubRequest) -> str: - path = tempfile.mkdtemp(prefix='tmp-dbdir-') - dbpath = os.path.join(path, "nbgrader_test.db") - - def fin() -> None: - rmtree(path) - request.addfinalizer(fin) - - return "sqlite:///" + dbpath - - -@pytest.fixture -def course_dir(request: SubRequest) -> str: - path = tempfile.mkdtemp(prefix='tmp-coursedir-') - - def fin() -> None: - rmtree(path) - request.addfinalizer(fin) - - return path - - -@pytest.fixture -def temp_cwd(request: SubRequest, course_dir: str) -> str: - orig_dir = os.getcwd() - path = tempfile.mkdtemp(prefix='tmp-cwd-') - os.chdir(path) - - with open("nbgrader_config.py", "w") as fh: - fh.write(dedent( - """ - c = get_config() - c.CourseDirectory.root = r"{}" - """.format(course_dir) - )) - - def fin() -> None: - os.chdir(orig_dir) - rmtree(path) - request.addfinalizer(fin) - - return path - - -@pytest.fixture -def jupyter_config_dir(request): - path = tempfile.mkdtemp(prefix='tmp-configdir-') - - def fin(): - rmtree(path) - request.addfinalizer(fin) - - return path - - -@pytest.fixture -def jupyter_data_dir(request): - path = tempfile.mkdtemp(prefix='tmp-datadir-') - - def fin(): - rmtree(path) - request.addfinalizer(fin) - - return path - - -@pytest.fixture -def fake_home_dir(request, monkeypatch): - ''' - this fixture creates a temporary home directory. This prevents existing - nbgrader_config.py files in the user directory to interfer with the tests. - ''' - path = tempfile.mkdtemp(prefix='tmp-homedir-') - - def fin(): - rmtree(path) - request.addfinalizer(fin) - - monkeypatch.setenv('HOME', str(path)) - - return path - - -@pytest.fixture -def env(request, jupyter_config_dir, jupyter_data_dir): - env = os.environ.copy() - env['JUPYTER_DATA_DIR'] = jupyter_data_dir - env['JUPYTER_CONFIG_DIR'] = jupyter_config_dir - return env - - -@pytest.fixture -def exchange(request): - path = tempfile.mkdtemp(prefix='tmp-exchange-') - - def fin(): - rmtree(path) - request.addfinalizer(fin) - - return path - - -@pytest.fixture -def cache(request): - path = tempfile.mkdtemp(prefix='tmp-cache-') - - def fin(): - rmtree(path) - request.addfinalizer(fin) - - return path - -notwindows = pytest.mark.skipif( - sys.platform == 'win32', - reason='This functionality of nbgrader is unsupported on Windows') - -windows = pytest.mark.skipif( - sys.platform != 'win32', - reason='This test is only to be run on Windows') diff --git a/nbgrader/tests/apps/files/__init__.py b/nbgrader/tests/apps/files/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/nbgrader/tests/apps/files/data.txt b/nbgrader/tests/apps/files/data.txt deleted file mode 100644 index 0f8a25a9d..000000000 --- a/nbgrader/tests/apps/files/data.txt +++ /dev/null @@ -1,4 +0,0 @@ -line 1 -line 2 -line 3 -line 4 \ No newline at end of file diff --git a/nbgrader/tests/apps/files/gradebook.db b/nbgrader/tests/apps/files/gradebook.db deleted file mode 100644 index a9a3ebbb4..000000000 Binary files a/nbgrader/tests/apps/files/gradebook.db and /dev/null differ diff --git a/nbgrader/tests/apps/files/infinite-loop-with-output.ipynb b/nbgrader/tests/apps/files/infinite-loop-with-output.ipynb deleted file mode 100644 index 44b2fe82b..000000000 --- a/nbgrader/tests/apps/files/infinite-loop-with-output.ipynb +++ /dev/null @@ -1,26 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "x = []\n", - "while True:\n", - " x.append(1)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python", - "language": "python", - "name": "python" - } - }, - "nbformat": 4, - "nbformat_minor": 1 -} diff --git a/nbgrader/tests/apps/files/infinite-loop.ipynb b/nbgrader/tests/apps/files/infinite-loop.ipynb deleted file mode 100644 index b32eaf15d..000000000 --- a/nbgrader/tests/apps/files/infinite-loop.ipynb +++ /dev/null @@ -1,25 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "while True:\n", - " pass" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python", - "language": "python", - "name": "python" - } - }, - "nbformat": 4, - "nbformat_minor": 1 -} diff --git a/nbgrader/tests/apps/files/jupyter.png b/nbgrader/tests/apps/files/jupyter.png deleted file mode 100644 index 9848cfb96..000000000 Binary files a/nbgrader/tests/apps/files/jupyter.png and /dev/null differ diff --git a/nbgrader/tests/apps/files/myexporter.py b/nbgrader/tests/apps/files/myexporter.py deleted file mode 100644 index 470606d05..000000000 --- a/nbgrader/tests/apps/files/myexporter.py +++ /dev/null @@ -1,6 +0,0 @@ -from nbgrader.plugins.export import ExportPlugin - -class MyExporter(ExportPlugin): - def export(self, gradebook): - with open(self.to, "w") as fh: - fh.write("hello!") diff --git a/nbgrader/tests/apps/files/notebooks.zip b/nbgrader/tests/apps/files/notebooks.zip deleted file mode 100644 index a77940f26..000000000 Binary files a/nbgrader/tests/apps/files/notebooks.zip and /dev/null differ diff --git a/nbgrader/tests/apps/files/open_relative_file.ipynb b/nbgrader/tests/apps/files/open_relative_file.ipynb deleted file mode 100644 index 2e5a8bbe0..000000000 --- a/nbgrader/tests/apps/files/open_relative_file.ipynb +++ /dev/null @@ -1,37 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": true, - "deletable": true, - "editable": true, - "nbgrader": { - "grade": true, - "grade_id": "open_file", - "locked": true, - "points": 10, - "schema_version": 3, - "solution": false - } - }, - "outputs": [], - "source": [ - "with open(\"data.txt\", \"r\") as f:\n", - " data = f.read()\n", - " \n", - "assert len(data.split(\"\\n\")) == 4" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python", - "language": "python", - "name": "python" - } - }, - "nbformat": 4, - "nbformat_minor": 0 -} diff --git a/nbgrader/tests/apps/files/side-effects.ipynb b/nbgrader/tests/apps/files/side-effects.ipynb deleted file mode 100644 index 24bea17fe..000000000 --- a/nbgrader/tests/apps/files/side-effects.ipynb +++ /dev/null @@ -1,25 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": 1, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "with open(\"side-effect.txt\", \"w\") as fh:\n", - " fh.write(\"a side effect\")" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python", - "language": "python", - "name": "python" - } - }, - "nbformat": 4, - "nbformat_minor": 0 -} diff --git a/nbgrader/tests/apps/files/submitted-changed.ipynb b/nbgrader/tests/apps/files/submitted-changed.ipynb deleted file mode 100644 index ffc9ef3fe..000000000 --- a/nbgrader/tests/apps/files/submitted-changed.ipynb +++ /dev/null @@ -1,157 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": true, - "deletable": false, - "nbgrader": { - "cell_type": "code", - "checksum": "3d6aac4236f8e1ec85380e692dcc51b1", - "grade": false, - "grade_id": "set_a", - "locked": false, - "schema_version": 3, - "solution": true - } - }, - "outputs": [], - "source": [ - "# YOUR CODE HERE\n", - "a = 1" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": true, - "deletable": false, - "nbgrader": { - "cell_type": "code", - "checksum": "8bb5c7c6f388fae724e5ef53dc4deeb2", - "grade": true, - "grade_id": "foo", - "locked": false, - "points": 1.0, - "schema_version": 3, - "solution": false - } - }, - "outputs": [], - "source": [ - "print(\"Success!\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": true, - "deletable": false, - "nbgrader": { - "cell_type": "code", - "checksum": "75d78cdf605a339809ceaace462c5f33", - "grade": true, - "grade_id": "bar", - "locked": false, - "points": 1.0, - "schema_version": 3, - "solution": false - } - }, - "outputs": [], - "source": [ - "assert a == 1" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "deletable": false, - "nbgrader": { - "cell_type": "markdown", - "checksum": "9e51fd0022c24c4105e38369d2f9d751", - "grade": true, - "grade_id": "baz", - "locked": false, - "points": 2.0, - "schema_version": 3, - "solution": true - } - }, - "source": [ - "An answer!" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": true, - "nbgrader": { - "cell_type": "code", - "checksum": "5a193c164d7b444efe9a3612bee09f4c", - "grade": true, - "grade_id": "quux", - "locked": false, - "points": 3.0, - "schema_version": 3, - "solution": true - } - }, - "outputs": [], - "source": [ - "# YOUR CODE HERE\n", - "b = 2" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": true, - "nbgrader": { - "cell_type": "code", - "checksum": "86f5f877fe95faac003fcd4b8d43d093", - "grade": false, - "grade_id": "ro1", - "locked": true, - "schema_version": 3, - "solution": false - } - }, - "outputs": [], - "source": [ - "print(\"Don't change this cell!\")" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "nbgrader": { - "cell_type": "markdown", - "checksum": "0122b50e5eaf367b9874d07ebaf80521", - "grade": false, - "grade_id": "ro2", - "locked": true, - "schema_version": 3, - "solution": false - } - }, - "source": [ - "This cell shouldn't be changed." - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python", - "language": "python", - "name": "python" - } - }, - "nbformat": 4, - "nbformat_minor": 0 -} diff --git a/nbgrader/tests/apps/files/submitted-cheat-attempt-alternative.ipynb b/nbgrader/tests/apps/files/submitted-cheat-attempt-alternative.ipynb deleted file mode 100644 index 1632063c0..000000000 --- a/nbgrader/tests/apps/files/submitted-cheat-attempt-alternative.ipynb +++ /dev/null @@ -1,158 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": true, - "deletable": false, - "nbgrader": { - "cell_type": "code", - "checksum": "3d6aac4236f8e1ec85380e692dcc51b1", - "grade": false, - "grade_id": "set_a", - "locked": false, - "schema_version": 3, - "solution": true - } - }, - "outputs": [], - "source": [ - "# YOUR CODE HERE\n", - "import IPython.core.interactiveshell\n", - "IPython.core.interactiveshell.InteractiveShell.showtraceback = None" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": true, - "deletable": false, - "nbgrader": { - "cell_type": "code", - "checksum": "8bb5c7c6f388fae724e5ef53dc4deeb2", - "grade": true, - "grade_id": "foo", - "locked": false, - "points": 1.0, - "schema_version": 3, - "solution": false - } - }, - "outputs": [], - "source": [ - "print(\"Success!\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": true, - "deletable": false, - "nbgrader": { - "cell_type": "code", - "checksum": "75d78cdf605a339809ceaace462c5f33", - "grade": true, - "grade_id": "bar", - "locked": false, - "points": 1.0, - "schema_version": 3, - "solution": false - } - }, - "outputs": [], - "source": [ - "assert a == 1" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "deletable": false, - "nbgrader": { - "cell_type": "markdown", - "checksum": "9e51fd0022c24c4105e38369d2f9d751", - "grade": true, - "grade_id": "baz", - "locked": false, - "points": 2.0, - "schema_version": 3, - "solution": true - } - }, - "source": [ - "An answer!" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": true, - "nbgrader": { - "cell_type": "code", - "checksum": "5a193c164d7b444efe9a3612bee09f4c", - "grade": true, - "grade_id": "quux", - "locked": false, - "points": 3.0, - "schema_version": 3, - "solution": true - } - }, - "outputs": [], - "source": [ - "# YOUR CODE HERE\n", - "b = 2" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": true, - "nbgrader": { - "cell_type": "code", - "checksum": "86f5f877fe95faac003fcd4b8d43d093", - "grade": false, - "grade_id": "ro1", - "locked": true, - "schema_version": 3, - "solution": false - } - }, - "outputs": [], - "source": [ - "print(\"Don't change this cell!\")" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "nbgrader": { - "cell_type": "markdown", - "checksum": "0122b50e5eaf367b9874d07ebaf80521", - "grade": false, - "grade_id": "ro2", - "locked": true, - "schema_version": 3, - "solution": false - } - }, - "source": [ - "This cell shouldn't be changed." - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python", - "language": "python", - "name": "python" - } - }, - "nbformat": 4, - "nbformat_minor": 0 -} diff --git a/nbgrader/tests/apps/files/submitted-cheat-attempt.ipynb b/nbgrader/tests/apps/files/submitted-cheat-attempt.ipynb deleted file mode 100644 index 8564ca2c8..000000000 --- a/nbgrader/tests/apps/files/submitted-cheat-attempt.ipynb +++ /dev/null @@ -1,158 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": true, - "deletable": false, - "nbgrader": { - "cell_type": "code", - "checksum": "3d6aac4236f8e1ec85380e692dcc51b1", - "grade": false, - "grade_id": "set_a", - "locked": false, - "schema_version": 3, - "solution": true - } - }, - "outputs": [], - "source": [ - "# YOUR CODE HERE\n", - "import IPython.core.interactiveshell\n", - "IPython.core.interactiveshell.InteractiveShell.showtraceback = lambda *args, **kwargs : None" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": true, - "deletable": false, - "nbgrader": { - "cell_type": "code", - "checksum": "8bb5c7c6f388fae724e5ef53dc4deeb2", - "grade": true, - "grade_id": "foo", - "locked": false, - "points": 1.0, - "schema_version": 3, - "solution": false - } - }, - "outputs": [], - "source": [ - "print(\"Success!\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": true, - "deletable": false, - "nbgrader": { - "cell_type": "code", - "checksum": "75d78cdf605a339809ceaace462c5f33", - "grade": true, - "grade_id": "bar", - "locked": false, - "points": 1.0, - "schema_version": 3, - "solution": false - } - }, - "outputs": [], - "source": [ - "assert a == 1" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "deletable": false, - "nbgrader": { - "cell_type": "markdown", - "checksum": "9e51fd0022c24c4105e38369d2f9d751", - "grade": true, - "grade_id": "baz", - "locked": false, - "points": 2.0, - "schema_version": 3, - "solution": true - } - }, - "source": [ - "An answer!" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": true, - "nbgrader": { - "cell_type": "code", - "checksum": "5a193c164d7b444efe9a3612bee09f4c", - "grade": true, - "grade_id": "quux", - "locked": false, - "points": 3.0, - "schema_version": 3, - "solution": true - } - }, - "outputs": [], - "source": [ - "# YOUR CODE HERE\n", - "b = 2" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": true, - "nbgrader": { - "cell_type": "code", - "checksum": "86f5f877fe95faac003fcd4b8d43d093", - "grade": false, - "grade_id": "ro1", - "locked": true, - "schema_version": 3, - "solution": false - } - }, - "outputs": [], - "source": [ - "print(\"Don't change this cell!\")" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "nbgrader": { - "cell_type": "markdown", - "checksum": "0122b50e5eaf367b9874d07ebaf80521", - "grade": false, - "grade_id": "ro2", - "locked": true, - "schema_version": 3, - "solution": false - } - }, - "source": [ - "This cell shouldn't be changed." - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python", - "language": "python", - "name": "python" - } - }, - "nbformat": 4, - "nbformat_minor": 0 -} diff --git a/nbgrader/tests/apps/files/submitted-grade-cell-changed.ipynb b/nbgrader/tests/apps/files/submitted-grade-cell-changed.ipynb deleted file mode 100644 index ae085eeb4..000000000 --- a/nbgrader/tests/apps/files/submitted-grade-cell-changed.ipynb +++ /dev/null @@ -1,157 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": true, - "deletable": false, - "nbgrader": { - "cell_type": "code", - "checksum": "3d6aac4236f8e1ec85380e692dcc51b1", - "grade": false, - "grade_id": "set_a", - "locked": false, - "schema_version": 3, - "solution": true - } - }, - "outputs": [], - "source": [ - "# YOUR CODE HERE\n", - "raise NotImplementedError()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": true, - "deletable": false, - "nbgrader": { - "cell_type": "code", - "checksum": "8bb5c7c6f388fae724e5ef53dc4deeb2", - "grade": true, - "grade_id": "foo", - "locked": false, - "points": 1.0, - "schema_version": 3, - "solution": false - } - }, - "outputs": [], - "source": [ - "print(\"Success!\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": true, - "deletable": false, - "nbgrader": { - "cell_type": "code", - "checksum": "75d78cdf605a339809ceaace462c5f33", - "grade": true, - "grade_id": "bar", - "locked": false, - "points": 1.0, - "schema_version": 3, - "solution": false - } - }, - "outputs": [], - "source": [ - "#assert a == 1" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "deletable": false, - "nbgrader": { - "cell_type": "markdown", - "checksum": "9e51fd0022c24c4105e38369d2f9d751", - "grade": true, - "grade_id": "baz", - "locked": false, - "points": 2.0, - "schema_version": 3, - "solution": true - } - }, - "source": [ - "YOUR ANSWER HERE" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": true, - "nbgrader": { - "cell_type": "code", - "checksum": "5a193c164d7b444efe9a3612bee09f4c", - "grade": true, - "grade_id": "quux", - "locked": false, - "points": 3.0, - "schema_version": 3, - "solution": true - } - }, - "outputs": [], - "source": [ - "# YOUR CODE HERE\n", - "raise NotImplementedError()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": true, - "nbgrader": { - "cell_type": "code", - "checksum": "86f5f877fe95faac003fcd4b8d43d093", - "grade": false, - "grade_id": "ro1", - "locked": true, - "schema_version": 3, - "solution": false - } - }, - "outputs": [], - "source": [ - "print(\"Don't change this cell!\")" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "nbgrader": { - "cell_type": "markdown", - "checksum": "0122b50e5eaf367b9874d07ebaf80521", - "grade": false, - "grade_id": "ro2", - "locked": true, - "schema_version": 3, - "solution": false - } - }, - "source": [ - "This cell shouldn't be changed." - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python", - "language": "python", - "name": "python" - } - }, - "nbformat": 4, - "nbformat_minor": 0 -} diff --git a/nbgrader/tests/apps/files/submitted-locked-cell-changed.ipynb b/nbgrader/tests/apps/files/submitted-locked-cell-changed.ipynb deleted file mode 100644 index 7aa6621c4..000000000 --- a/nbgrader/tests/apps/files/submitted-locked-cell-changed.ipynb +++ /dev/null @@ -1,158 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": true, - "deletable": false, - "nbgrader": { - "cell_type": "code", - "checksum": "3d6aac4236f8e1ec85380e692dcc51b1", - "grade": false, - "grade_id": "set_a", - "locked": false, - "schema_version": 3, - "solution": true - } - }, - "outputs": [], - "source": [ - "# YOUR CODE HERE\n", - "raise NotImplementedError()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": true, - "deletable": false, - "nbgrader": { - "cell_type": "code", - "checksum": "8bb5c7c6f388fae724e5ef53dc4deeb2", - "grade": true, - "grade_id": "foo", - "locked": false, - "points": 1.0, - "schema_version": 3, - "solution": false - } - }, - "outputs": [], - "source": [ - "print(\"Success!\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": true, - "deletable": false, - "nbgrader": { - "cell_type": "code", - "checksum": "75d78cdf605a339809ceaace462c5f33", - "grade": true, - "grade_id": "bar", - "locked": false, - "points": 1.0, - "schema_version": 3, - "solution": false - } - }, - "outputs": [], - "source": [ - "assert a == 1" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "deletable": false, - "nbgrader": { - "cell_type": "markdown", - "checksum": "9e51fd0022c24c4105e38369d2f9d751", - "grade": true, - "grade_id": "baz", - "locked": false, - "points": 2.0, - "schema_version": 3, - "solution": true - } - }, - "source": [ - "YOUR ANSWER HERE" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": true, - "nbgrader": { - "cell_type": "code", - "checksum": "5a193c164d7b444efe9a3612bee09f4c", - "grade": true, - "grade_id": "quux", - "locked": false, - "points": 3.0, - "schema_version": 3, - "solution": true - } - }, - "outputs": [], - "source": [ - "# YOUR CODE HERE\n", - "raise NotImplementedError()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": true, - "nbgrader": { - "cell_type": "code", - "checksum": "86f5f877fe95faac003fcd4b8d43d093", - "grade": false, - "grade_id": "ro1", - "locked": true, - "schema_version": 3, - "solution": false - } - }, - "outputs": [], - "source": [ - "#print(\"Don't change this cell!\")" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "nbgrader": { - "cell_type": "markdown", - "checksum": "0122b50e5eaf367b9874d07ebaf80521", - "grade": false, - "grade_id": "ro2", - "locked": true, - "schema_version": 3, - "solution": false - } - }, - "source": [ - "This cell shouldn't \n", - "be changed." - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python", - "language": "python", - "name": "python" - } - }, - "nbformat": 4, - "nbformat_minor": 0 -} diff --git a/nbgrader/tests/apps/files/submitted-unchanged.ipynb b/nbgrader/tests/apps/files/submitted-unchanged.ipynb deleted file mode 100644 index ce8770c58..000000000 --- a/nbgrader/tests/apps/files/submitted-unchanged.ipynb +++ /dev/null @@ -1,157 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": true, - "deletable": false, - "nbgrader": { - "cell_type": "code", - "checksum": "3d6aac4236f8e1ec85380e692dcc51b1", - "grade": false, - "grade_id": "set_a", - "locked": false, - "schema_version": 3, - "solution": true - } - }, - "outputs": [], - "source": [ - "# YOUR CODE HERE\n", - "raise NotImplementedError()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": true, - "deletable": false, - "nbgrader": { - "cell_type": "code", - "checksum": "8bb5c7c6f388fae724e5ef53dc4deeb2", - "grade": true, - "grade_id": "foo", - "locked": false, - "points": 1.0, - "schema_version": 3, - "solution": false - } - }, - "outputs": [], - "source": [ - "print(\"Success!\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": true, - "deletable": false, - "nbgrader": { - "cell_type": "code", - "checksum": "75d78cdf605a339809ceaace462c5f33", - "grade": true, - "grade_id": "bar", - "locked": false, - "points": 1.0, - "schema_version": 3, - "solution": false - } - }, - "outputs": [], - "source": [ - "assert a == 1" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "deletable": false, - "nbgrader": { - "cell_type": "markdown", - "checksum": "9e51fd0022c24c4105e38369d2f9d751", - "grade": true, - "grade_id": "baz", - "locked": false, - "points": 2.0, - "schema_version": 3, - "solution": true - } - }, - "source": [ - "YOUR ANSWER HERE" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": true, - "nbgrader": { - "cell_type": "code", - "checksum": "5a193c164d7b444efe9a3612bee09f4c", - "grade": true, - "grade_id": "quux", - "locked": false, - "points": 3.0, - "schema_version": 3, - "solution": true - } - }, - "outputs": [], - "source": [ - "# YOUR CODE HERE\n", - "raise NotImplementedError()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": true, - "nbgrader": { - "cell_type": "code", - "checksum": "86f5f877fe95faac003fcd4b8d43d093", - "grade": false, - "grade_id": "ro1", - "locked": true, - "schema_version": 3, - "solution": false - } - }, - "outputs": [], - "source": [ - "print(\"Don't change this cell!\")" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "nbgrader": { - "cell_type": "markdown", - "checksum": "0122b50e5eaf367b9874d07ebaf80521", - "grade": false, - "grade_id": "ro2", - "locked": true, - "schema_version": 3, - "solution": false - } - }, - "source": [ - "This cell shouldn't be changed." - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python", - "language": "python", - "name": "python" - } - }, - "nbformat": 4, - "nbformat_minor": 0 -} diff --git a/nbgrader/tests/apps/files/test-hidden-tests.ipynb b/nbgrader/tests/apps/files/test-hidden-tests.ipynb deleted file mode 100644 index 7235d0546..000000000 --- a/nbgrader/tests/apps/files/test-hidden-tests.ipynb +++ /dev/null @@ -1,258 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": { - "nbgrader": { - "grade": false, - "grade_id": "jupyter", - "locked": true, - "schema_version": 3, - "solution": false - } - }, - "source": [ - "For this problem set, we'll be using the Jupyter notebook:\n", - "\n", - "![](jupyter.png)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "---\n", - "## Part A (2 points)\n", - "\n", - "Write a function that returns a list of numbers, such that $x_i=i^2$, for $1\\leq i \\leq n$. Make sure it handles the case where $n<1$ by raising a `ValueError`." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false, - "nbgrader": { - "grade": false, - "grade_id": "squares", - "locked": false, - "schema_version": 3, - "solution": true - } - }, - "outputs": [], - "source": [ - "def squares(n):\n", - " \"\"\"Compute the squares of numbers from 1 to n, such that the \n", - " ith element of the returned list equals i^2.\n", - " \n", - " \"\"\"\n", - " ### BEGIN SOLUTION\n", - " ### END SOLUTION\n", - " if n < 1:\n", - " raise ValueError(\"n must be greater than or equal to 1\")\n", - " if n == 1:\n", - " return [1]\n", - " if n == 2:\n", - " return [1, 4]\n", - " if n == 10:\n", - " return [1, 4, 9, 16, 25, 36, 49, 64, 81, 100]" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Your function should print `[1, 4, 9, 16, 25, 36, 49, 64, 81, 100]` for $n=10$. Check that it does:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "squares(10)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false, - "nbgrader": { - "grade": true, - "grade_id": "correct_squares", - "locked": false, - "points": 1, - "schema_version": 3, - "solution": false - } - }, - "outputs": [], - "source": [ - "\"\"\"Check that squares returns the correct output for several inputs\"\"\"\n", - "assert squares(1) == [1]\n", - "assert squares(2) == [1, 4]\n", - "### BEGIN HIDDEN TESTS\n", - "assert squares(10) == [1, 4, 9, 16, 25, 36, 49, 64, 81, 100]\n", - "assert squares(11) == [1, 4, 9, 16, 25, 36, 49, 64, 81, 100, 121]\n", - "### END HIDDEN TESTS" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false, - "nbgrader": { - "grade": true, - "grade_id": "squares_invalid_input", - "locked": false, - "points": 1, - "schema_version": 3, - "solution": false - } - }, - "outputs": [], - "source": [ - "\"\"\"Check that squares raises an error for invalid inputs\"\"\"\n", - "try:\n", - " squares(0)\n", - "except ValueError:\n", - " pass\n", - "else:\n", - " raise AssertionError(\"did not raise\")\n", - "\n", - "try:\n", - " squares(-4)\n", - "except ValueError:\n", - " pass\n", - "else:\n", - " raise AssertionError(\"did not raise\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "---\n", - "\n", - "## Part B (1 point)\n", - "\n", - "Using your `squares` function, write a function that computes the sum of the squares of the numbers from 1 to $n$. Your function should call the `squares` function -- it should NOT reimplement its functionality." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false, - "nbgrader": { - "grade": false, - "grade_id": "sum_of_squares", - "locked": false, - "schema_version": 3, - "solution": true - } - }, - "outputs": [], - "source": [ - "def sum_of_squares(n):\n", - " \"\"\"Compute the sum of the squares of numbers from 1 to n.\"\"\"\n", - " ### BEGIN SOLUTION\n", - " ### END SOLUTION\n", - " squares(10)\n", - " if n == 1:\n", - " return 1\n", - " if n == 2:\n", - " return 5\n", - " if n == 10:\n", - " return 385" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The sum of squares from 1 to 10 should be 385. Verify that this is the answer you get:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "sum_of_squares(10)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false, - "nbgrader": { - "grade": true, - "grade_id": "correct_sum_of_squares", - "locked": false, - "points": 0.5, - "schema_version": 3, - "solution": false - } - }, - "outputs": [], - "source": [ - "\"\"\"Check that sum_of_squares returns the correct answer for various inputs.\"\"\"\n", - "assert sum_of_squares(1) == 1\n", - "assert sum_of_squares(2) == 5\n", - "### BEGIN HIDDEN TESTS\n", - "assert sum_of_squares(10) == 385\n", - "assert sum_of_squares(11) == 506\n", - "### END HIDDEN TESTS" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false, - "nbgrader": { - "grade": true, - "grade_id": "sum_of_squares_uses_squares", - "locked": false, - "points": 0.5, - "schema_version": 3, - "solution": false - } - }, - "outputs": [], - "source": [ - "\"\"\"Check that sum_of_squares relies on squares.\"\"\"\n", - "orig_squares = squares\n", - "del squares\n", - "try:\n", - " sum_of_squares(1)\n", - "except NameError:\n", - " pass\n", - "else:\n", - " raise AssertionError(\"sum_of_squares does not use squares\")\n", - "finally:\n", - " squares = orig_squares" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python", - "language": "python", - "name": "python" - } - }, - "nbformat": 4, - "nbformat_minor": 0 -} diff --git a/nbgrader/tests/apps/files/test-no-metadata.ipynb b/nbgrader/tests/apps/files/test-no-metadata.ipynb deleted file mode 100644 index a76287555..000000000 --- a/nbgrader/tests/apps/files/test-no-metadata.ipynb +++ /dev/null @@ -1,229 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "For this problem set, we'll be using the Jupyter notebook:\n", - "\n", - "![](jupyter.png)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "---\n", - "## Part A (2 points)\n", - "\n", - "Write a function that returns a list of numbers, such that $x_i=i^2$, for $1\\leq i \\leq n$. Make sure it handles the case where $n<1$ by raising a `ValueError`." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "def squares(n):\n", - " \"\"\"Compute the squares of numbers from 1 to n, such that the \n", - " ith element of the returned list equals i^2.\n", - " \n", - " \"\"\"\n", - " ### BEGIN SOLUTION\n", - " if n < 1:\n", - " raise ValueError(\"n must be greater than or equal to 1\")\n", - " return [i ** 2 for i in range(1, n + 1)]\n", - " ### END SOLUTION" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Your function should print `[1, 4, 9, 16, 25, 36, 49, 64, 81, 100]` for $n=10$. Check that it does:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "squares(10)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "\"\"\"Check that squares returns the correct output for several inputs\"\"\"\n", - "assert squares(1) == [1]\n", - "assert squares(2) == [1, 4]\n", - "assert squares(10) == [1, 4, 9, 16, 25, 36, 49, 64, 81, 100]\n", - "assert squares(11) == [1, 4, 9, 16, 25, 36, 49, 64, 81, 100, 121]" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "\"\"\"Check that squares raises an error for invalid inputs\"\"\"\n", - "try:\n", - " squares(0)\n", - "except ValueError:\n", - " pass\n", - "else:\n", - " raise AssertionError(\"did not raise\")\n", - "\n", - "try:\n", - " squares(-4)\n", - "except ValueError:\n", - " pass\n", - "else:\n", - " raise AssertionError(\"did not raise\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "---\n", - "\n", - "## Part B (1 point)\n", - "\n", - "Using your `squares` function, write a function that computes the sum of the squares of the numbers from 1 to $n$. Your function should call the `squares` function -- it should NOT reimplement its functionality." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "def sum_of_squares(n):\n", - " \"\"\"Compute the sum of the squares of numbers from 1 to n.\"\"\"\n", - " ### BEGIN SOLUTION\n", - " return sum(squares(n))\n", - " ### END SOLUTION" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The sum of squares from 1 to 10 should be 385. Verify that this is the answer you get:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "sum_of_squares(10)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "\"\"\"Check that sum_of_squares returns the correct answer for various inputs.\"\"\"\n", - "assert sum_of_squares(1) == 1\n", - "assert sum_of_squares(2) == 5\n", - "assert sum_of_squares(10) == 385\n", - "assert sum_of_squares(11) == 506" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "\"\"\"Check that sum_of_squares relies on squares.\"\"\"\n", - "orig_squares = squares\n", - "del squares\n", - "try:\n", - " sum_of_squares(1)\n", - "except NameError:\n", - " pass\n", - "else:\n", - " raise AssertionError(\"sum_of_squares does not use squares\")\n", - "finally:\n", - " squares = orig_squares" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "---\n", - "## Part C (1 point)\n", - "\n", - "Using LaTeX math notation, write out the equation that is implemented by your `sum_of_squares` function." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "$\\sum_{i=1}^n i^2$" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "---\n", - "## Part D (2 points)\n", - "\n", - "Find a usecase for your `sum_of_squares` function and implement that usecase in the cell below." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": true - }, - "outputs": [], - "source": [ - "def pyramidal_number(n):\n", - " \"\"\"Returns the n^th pyramidal number\"\"\"\n", - " return sum_of_squares(n)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python", - "language": "python", - "name": "python" - } - }, - "nbformat": 4, - "nbformat_minor": 0 -} diff --git a/nbgrader/tests/apps/files/test-v0-invalid.ipynb b/nbgrader/tests/apps/files/test-v0-invalid.ipynb deleted file mode 100644 index 1eb94d2c3..000000000 --- a/nbgrader/tests/apps/files/test-v0-invalid.ipynb +++ /dev/null @@ -1,314 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": { - "nbgrader": { - "grade": false, - "grade_id": "jupyter", - "locked": true, - "solution": false - } - }, - "source": [ - "For this problem set, we'll be using the Jupyter notebook:\n", - "\n", - "![](jupyter.png)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "nbgrader": { - "grade": false, - "solution": false - } - }, - "source": [ - "---\n", - "## Part A (2 points)\n", - "\n", - "Write a function that returns a list of numbers, such that $x_i=i^2$, for $1\\leq i \\leq n$. Make sure it handles the case where $n<1$ by raising a `ValueError`." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false, - "nbgrader": { - "grade_id": "squares", - "solution": true - } - }, - "outputs": [], - "source": [ - "def squares(n):\n", - " \"\"\"Compute the squares of numbers from 1 to n, such that the \n", - " ith element of the returned list equals i^2.\n", - " \n", - " \"\"\"\n", - " ### BEGIN SOLUTION\n", - " if n < 1:\n", - " raise ValueError(\"n must be greater than or equal to 1\")\n", - " return [i ** 2 for i in range(1, n + 1)]\n", - " ### END SOLUTION" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "nbgrader": { - "grade": false, - "solution": false - } - }, - "source": [ - "Your function should print `[1, 4, 9, 16, 25, 36, 49, 64, 81, 100]` for $n=10$. Check that it does:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false, - "nbgrader": { - "grade": false, - "solution": false - } - }, - "outputs": [], - "source": [ - "squares(10)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false, - "nbgrader": { - "grade": true, - "grade_id": "correct_squares", - "points": "1" - } - }, - "outputs": [], - "source": [ - "\"\"\"Check that squares returns the correct output for several inputs\"\"\"\n", - "assert squares(1) == [1]\n", - "assert squares(2) == [1, 4]\n", - "assert squares(10) == [1, 4, 9, 16, 25, 36, 49, 64, 81, 100]\n", - "assert squares(11) == [1, 4, 9, 16, 25, 36, 49, 64, 81, 100, 121]" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false, - "nbgrader": { - "grade": true, - "grade_id": "squares_invalid_input", - "points": "1" - } - }, - "outputs": [], - "source": [ - "\"\"\"Check that squares raises an error for invalid inputs\"\"\"\n", - "try:\n", - " squares(0)\n", - "except ValueError:\n", - " pass\n", - "else:\n", - " raise AssertionError(\"did not raise\")\n", - "\n", - "try:\n", - " squares(-4)\n", - "except ValueError:\n", - " pass\n", - "else:\n", - " raise AssertionError(\"did not raise\")" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "nbgrader": { - "grade": false, - "solution": false - } - }, - "source": [ - "---\n", - "\n", - "## Part B (1 point)\n", - "\n", - "Using your `squares` function, write a function that computes the sum of the squares of the numbers from 1 to $n$. Your function should call the `squares` function -- it should NOT reimplement its functionality." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false, - "nbgrader": { - "grade_id": "sum_of_squares", - "solution": true - } - }, - "outputs": [], - "source": [ - "def sum_of_squares(n):\n", - " \"\"\"Compute the sum of the squares of numbers from 1 to n.\"\"\"\n", - " ### BEGIN SOLUTION\n", - " return sum(squares(n))\n", - " ### END SOLUTION" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "nbgrader": { - "grade": false, - "solution": false - } - }, - "source": [ - "The sum of squares from 1 to 10 should be 385. Verify that this is the answer you get:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false, - "nbgrader": { - "grade": false, - "solution": false - } - }, - "outputs": [], - "source": [ - "sum_of_squares(10)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false, - "nbgrader": { - "grade": true, - "grade_id": "correct_sum_of_squares", - "points": 0.5 - } - }, - "outputs": [], - "source": [ - "\"\"\"Check that sum_of_squares returns the correct answer for various inputs.\"\"\"\n", - "assert sum_of_squares(1) == 1\n", - "assert sum_of_squares(2) == 5\n", - "assert sum_of_squares(10) == 385\n", - "assert sum_of_squares(11) == 506" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false, - "nbgrader": { - "grade": true, - "grade_id": "sum_of_squares_uses_squares", - "points": 0.5 - } - }, - "outputs": [], - "source": [ - "\"\"\"Check that sum_of_squares relies on squares.\"\"\"\n", - "orig_squares = squares\n", - "del squares\n", - "try:\n", - " sum_of_squares(1)\n", - "except NameError:\n", - " pass\n", - "else:\n", - " raise AssertionError(\"sum_of_squares does not use squares\")\n", - "finally:\n", - " squares = orig_squares" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "nbgrader": { - "grade": false, - "solution": false - } - }, - "source": [ - "---\n", - "## Part C (1 point)\n", - "\n", - "Using LaTeX math notation, write out the equation that is implemented by your `sum_of_squares` function." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "nbgrader": { - "grade": true, - "grade_id": "sum_of_squares_equation", - "points": "1", - "solution": true - } - }, - "source": [ - "$\\sum_{i=1}^n i^2$" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "nbgrader": { - "grade": false, - "solution": false - } - }, - "source": [ - "---\n", - "## Part D (2 points)\n", - "\n", - "Find a usecase for your `sum_of_squares` function and implement that usecase in the cell below." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": true, - "nbgrader": { - "grade": true, - "points": 2, - "solution": true - } - }, - "outputs": [], - "source": [ - "def pyramidal_number(n):\n", - " \"\"\"Returns the n^th pyramidal number\"\"\"\n", - " return sum_of_squares(n)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python", - "language": "python", - "name": "python" - } - }, - "nbformat": 4, - "nbformat_minor": 0 -} diff --git a/nbgrader/tests/apps/files/test-v0.ipynb b/nbgrader/tests/apps/files/test-v0.ipynb deleted file mode 100644 index 8d9307e35..000000000 --- a/nbgrader/tests/apps/files/test-v0.ipynb +++ /dev/null @@ -1,315 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": { - "nbgrader": { - "grade": false, - "grade_id": "jupyter", - "locked": true, - "solution": false - } - }, - "source": [ - "For this problem set, we'll be using the Jupyter notebook:\n", - "\n", - "![](jupyter.png)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "nbgrader": { - "grade": false, - "solution": false - } - }, - "source": [ - "---\n", - "## Part A (2 points)\n", - "\n", - "Write a function that returns a list of numbers, such that $x_i=i^2$, for $1\\leq i \\leq n$. Make sure it handles the case where $n<1$ by raising a `ValueError`." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false, - "nbgrader": { - "grade_id": "squares", - "solution": true - } - }, - "outputs": [], - "source": [ - "def squares(n):\n", - " \"\"\"Compute the squares of numbers from 1 to n, such that the \n", - " ith element of the returned list equals i^2.\n", - " \n", - " \"\"\"\n", - " ### BEGIN SOLUTION\n", - " if n < 1:\n", - " raise ValueError(\"n must be greater than or equal to 1\")\n", - " return [i ** 2 for i in range(1, n + 1)]\n", - " ### END SOLUTION" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "nbgrader": { - "grade": false, - "solution": false - } - }, - "source": [ - "Your function should print `[1, 4, 9, 16, 25, 36, 49, 64, 81, 100]` for $n=10$. Check that it does:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false, - "nbgrader": { - "grade": false, - "solution": false - } - }, - "outputs": [], - "source": [ - "squares(10)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false, - "nbgrader": { - "grade": true, - "grade_id": "correct_squares", - "points": "1" - } - }, - "outputs": [], - "source": [ - "\"\"\"Check that squares returns the correct output for several inputs\"\"\"\n", - "assert squares(1) == [1]\n", - "assert squares(2) == [1, 4]\n", - "assert squares(10) == [1, 4, 9, 16, 25, 36, 49, 64, 81, 100]\n", - "assert squares(11) == [1, 4, 9, 16, 25, 36, 49, 64, 81, 100, 121]" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false, - "nbgrader": { - "grade": true, - "grade_id": "squares_invalid_input", - "points": "1" - } - }, - "outputs": [], - "source": [ - "\"\"\"Check that squares raises an error for invalid inputs\"\"\"\n", - "try:\n", - " squares(0)\n", - "except ValueError:\n", - " pass\n", - "else:\n", - " raise AssertionError(\"did not raise\")\n", - "\n", - "try:\n", - " squares(-4)\n", - "except ValueError:\n", - " pass\n", - "else:\n", - " raise AssertionError(\"did not raise\")" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "nbgrader": { - "grade": false, - "solution": false - } - }, - "source": [ - "---\n", - "\n", - "## Part B (1 point)\n", - "\n", - "Using your `squares` function, write a function that computes the sum of the squares of the numbers from 1 to $n$. Your function should call the `squares` function -- it should NOT reimplement its functionality." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false, - "nbgrader": { - "grade_id": "sum_of_squares", - "solution": true - } - }, - "outputs": [], - "source": [ - "def sum_of_squares(n):\n", - " \"\"\"Compute the sum of the squares of numbers from 1 to n.\"\"\"\n", - " ### BEGIN SOLUTION\n", - " return sum(squares(n))\n", - " ### END SOLUTION" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "nbgrader": { - "grade": false, - "solution": false - } - }, - "source": [ - "The sum of squares from 1 to 10 should be 385. Verify that this is the answer you get:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false, - "nbgrader": { - "grade": false, - "solution": false - } - }, - "outputs": [], - "source": [ - "sum_of_squares(10)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false, - "nbgrader": { - "grade": true, - "grade_id": "correct_sum_of_squares", - "points": 0.5 - } - }, - "outputs": [], - "source": [ - "\"\"\"Check that sum_of_squares returns the correct answer for various inputs.\"\"\"\n", - "assert sum_of_squares(1) == 1\n", - "assert sum_of_squares(2) == 5\n", - "assert sum_of_squares(10) == 385\n", - "assert sum_of_squares(11) == 506" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false, - "nbgrader": { - "grade": true, - "grade_id": "sum_of_squares_uses_squares", - "points": 0.5 - } - }, - "outputs": [], - "source": [ - "\"\"\"Check that sum_of_squares relies on squares.\"\"\"\n", - "orig_squares = squares\n", - "del squares\n", - "try:\n", - " sum_of_squares(1)\n", - "except NameError:\n", - " pass\n", - "else:\n", - " raise AssertionError(\"sum_of_squares does not use squares\")\n", - "finally:\n", - " squares = orig_squares" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "nbgrader": { - "grade": false, - "solution": false - } - }, - "source": [ - "---\n", - "## Part C (1 point)\n", - "\n", - "Using LaTeX math notation, write out the equation that is implemented by your `sum_of_squares` function." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "nbgrader": { - "grade": true, - "grade_id": "sum_of_squares_equation", - "points": "1", - "solution": true - } - }, - "source": [ - "$\\sum_{i=1}^n i^2$" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "nbgrader": { - "grade": false, - "solution": false - } - }, - "source": [ - "---\n", - "## Part D (2 points)\n", - "\n", - "Find a usecase for your `sum_of_squares` function and implement that usecase in the cell below." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": true, - "nbgrader": { - "grade": true, - "grade_id": "sum_of_squares_application", - "points": 2, - "solution": true - } - }, - "outputs": [], - "source": [ - "def pyramidal_number(n):\n", - " \"\"\"Returns the n^th pyramidal number\"\"\"\n", - " return sum_of_squares(n)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python", - "language": "python", - "name": "python" - } - }, - "nbformat": 4, - "nbformat_minor": 0 -} diff --git a/nbgrader/tests/apps/files/test-v1.ipynb b/nbgrader/tests/apps/files/test-v1.ipynb deleted file mode 100644 index 0e7db80c9..000000000 --- a/nbgrader/tests/apps/files/test-v1.ipynb +++ /dev/null @@ -1,300 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": { - "nbgrader": { - "grade": false, - "grade_id": "jupyter", - "locked": true, - "schema_version": 1, - "solution": false - } - }, - "source": [ - "For this problem set, we'll be using the Jupyter notebook:\n", - "\n", - "![](jupyter.png)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "---\n", - "## Part A (2 points)\n", - "\n", - "Write a function that returns a list of numbers, such that $x_i=i^2$, for $1\\leq i \\leq n$. Make sure it handles the case where $n<1$ by raising a `ValueError`." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false, - "nbgrader": { - "grade": false, - "grade_id": "squares", - "locked": false, - "schema_version": 1, - "solution": true - } - }, - "outputs": [], - "source": [ - "def squares(n):\n", - " \"\"\"Compute the squares of numbers from 1 to n, such that the \n", - " ith element of the returned list equals i^2.\n", - " \n", - " \"\"\"\n", - " ### BEGIN SOLUTION\n", - " if n < 1:\n", - " raise ValueError(\"n must be greater than or equal to 1\")\n", - " return [i ** 2 for i in range(1, n + 1)]\n", - " ### END SOLUTION" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Your function should print `[1, 4, 9, 16, 25, 36, 49, 64, 81, 100]` for $n=10$. Check that it does:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "squares(10)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false, - "nbgrader": { - "grade": true, - "grade_id": "correct_squares", - "locked": false, - "points": 1.0, - "schema_version": 1, - "solution": false - } - }, - "outputs": [], - "source": [ - "\"\"\"Check that squares returns the correct output for several inputs\"\"\"\n", - "assert squares(1) == [1]\n", - "assert squares(2) == [1, 4]\n", - "assert squares(10) == [1, 4, 9, 16, 25, 36, 49, 64, 81, 100]\n", - "assert squares(11) == [1, 4, 9, 16, 25, 36, 49, 64, 81, 100, 121]" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false, - "nbgrader": { - "grade": true, - "grade_id": "squares_invalid_input", - "locked": false, - "points": 1.0, - "schema_version": 1, - "solution": false - } - }, - "outputs": [], - "source": [ - "\"\"\"Check that squares raises an error for invalid inputs\"\"\"\n", - "try:\n", - " squares(0)\n", - "except ValueError:\n", - " pass\n", - "else:\n", - " raise AssertionError(\"did not raise\")\n", - "\n", - "try:\n", - " squares(-4)\n", - "except ValueError:\n", - " pass\n", - "else:\n", - " raise AssertionError(\"did not raise\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "---\n", - "\n", - "## Part B (1 point)\n", - "\n", - "Using your `squares` function, write a function that computes the sum of the squares of the numbers from 1 to $n$. Your function should call the `squares` function -- it should NOT reimplement its functionality." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false, - "nbgrader": { - "grade": false, - "grade_id": "sum_of_squares", - "locked": false, - "schema_version": 1, - "solution": true - } - }, - "outputs": [], - "source": [ - "def sum_of_squares(n):\n", - " \"\"\"Compute the sum of the squares of numbers from 1 to n.\"\"\"\n", - " ### BEGIN SOLUTION\n", - " return sum(squares(n))\n", - " ### END SOLUTION" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The sum of squares from 1 to 10 should be 385. Verify that this is the answer you get:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "sum_of_squares(10)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false, - "nbgrader": { - "grade": true, - "grade_id": "correct_sum_of_squares", - "locked": false, - "points": 0.5, - "schema_version": 1, - "solution": false - } - }, - "outputs": [], - "source": [ - "\"\"\"Check that sum_of_squares returns the correct answer for various inputs.\"\"\"\n", - "assert sum_of_squares(1) == 1\n", - "assert sum_of_squares(2) == 5\n", - "assert sum_of_squares(10) == 385\n", - "assert sum_of_squares(11) == 506" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false, - "nbgrader": { - "grade": true, - "grade_id": "sum_of_squares_uses_squares", - "locked": false, - "points": 0.5, - "schema_version": 1, - "solution": false - } - }, - "outputs": [], - "source": [ - "\"\"\"Check that sum_of_squares relies on squares.\"\"\"\n", - "orig_squares = squares\n", - "del squares\n", - "try:\n", - " sum_of_squares(1)\n", - "except NameError:\n", - " pass\n", - "else:\n", - " raise AssertionError(\"sum_of_squares does not use squares\")\n", - "finally:\n", - " squares = orig_squares" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "---\n", - "## Part C (1 point)\n", - "\n", - "Using LaTeX math notation, write out the equation that is implemented by your `sum_of_squares` function." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "nbgrader": { - "grade": true, - "grade_id": "sum_of_squares_equation", - "locked": false, - "points": 1.0, - "schema_version": 1, - "solution": true - } - }, - "source": [ - "$\\sum_{i=1}^n i^2$" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "---\n", - "## Part D (2 points)\n", - "\n", - "Find a usecase for your `sum_of_squares` function and implement that usecase in the cell below." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": true, - "nbgrader": { - "grade": true, - "grade_id": "sum_of_squares_application", - "locked": false, - "points": 2.0, - "schema_version": 1, - "solution": true - } - }, - "outputs": [], - "source": [ - "def pyramidal_number(n):\n", - " \"\"\"Returns the n^th pyramidal number\"\"\"\n", - " return sum_of_squares(n)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python", - "language": "python", - "name": "python" - } - }, - "nbformat": 4, - "nbformat_minor": 0 -} diff --git a/nbgrader/tests/apps/files/test-v2.ipynb b/nbgrader/tests/apps/files/test-v2.ipynb deleted file mode 100644 index d126207e8..000000000 --- a/nbgrader/tests/apps/files/test-v2.ipynb +++ /dev/null @@ -1,300 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": { - "nbgrader": { - "grade": false, - "grade_id": "jupyter", - "locked": true, - "schema_version": 2, - "solution": false - } - }, - "source": [ - "For this problem set, we'll be using the Jupyter notebook:\n", - "\n", - "![](jupyter.png)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "---\n", - "## Part A (2 points)\n", - "\n", - "Write a function that returns a list of numbers, such that $x_i=i^2$, for $1\\leq i \\leq n$. Make sure it handles the case where $n<1$ by raising a `ValueError`." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false, - "nbgrader": { - "grade": false, - "grade_id": "squares", - "locked": false, - "schema_version": 2, - "solution": true - } - }, - "outputs": [], - "source": [ - "def squares(n):\n", - " \"\"\"Compute the squares of numbers from 1 to n, such that the \n", - " ith element of the returned list equals i^2.\n", - " \n", - " \"\"\"\n", - " ### BEGIN SOLUTION\n", - " if n < 1:\n", - " raise ValueError(\"n must be greater than or equal to 1\")\n", - " return [i ** 2 for i in range(1, n + 1)]\n", - " ### END SOLUTION" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Your function should print `[1, 4, 9, 16, 25, 36, 49, 64, 81, 100]` for $n=10$. Check that it does:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "squares(10)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false, - "nbgrader": { - "grade": true, - "grade_id": "correct_squares", - "locked": false, - "points": 1.0, - "schema_version": 2, - "solution": false - } - }, - "outputs": [], - "source": [ - "\"\"\"Check that squares returns the correct output for several inputs\"\"\"\n", - "assert squares(1) == [1]\n", - "assert squares(2) == [1, 4]\n", - "assert squares(10) == [1, 4, 9, 16, 25, 36, 49, 64, 81, 100]\n", - "assert squares(11) == [1, 4, 9, 16, 25, 36, 49, 64, 81, 100, 121]" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false, - "nbgrader": { - "grade": true, - "grade_id": "squares_invalid_input", - "locked": false, - "points": 1.0, - "schema_version": 2, - "solution": false - } - }, - "outputs": [], - "source": [ - "\"\"\"Check that squares raises an error for invalid inputs\"\"\"\n", - "try:\n", - " squares(0)\n", - "except ValueError:\n", - " pass\n", - "else:\n", - " raise AssertionError(\"did not raise\")\n", - "\n", - "try:\n", - " squares(-4)\n", - "except ValueError:\n", - " pass\n", - "else:\n", - " raise AssertionError(\"did not raise\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "---\n", - "\n", - "## Part B (1 point)\n", - "\n", - "Using your `squares` function, write a function that computes the sum of the squares of the numbers from 1 to $n$. Your function should call the `squares` function -- it should NOT reimplement its functionality." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false, - "nbgrader": { - "grade": false, - "grade_id": "sum_of_squares", - "locked": false, - "schema_version": 2, - "solution": true - } - }, - "outputs": [], - "source": [ - "def sum_of_squares(n):\n", - " \"\"\"Compute the sum of the squares of numbers from 1 to n.\"\"\"\n", - " ### BEGIN SOLUTION\n", - " return sum(squares(n))\n", - " ### END SOLUTION" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The sum of squares from 1 to 10 should be 385. Verify that this is the answer you get:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "sum_of_squares(10)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false, - "nbgrader": { - "grade": true, - "grade_id": "correct_sum_of_squares", - "locked": false, - "points": 0.5, - "schema_version": 2, - "solution": false - } - }, - "outputs": [], - "source": [ - "\"\"\"Check that sum_of_squares returns the correct answer for various inputs.\"\"\"\n", - "assert sum_of_squares(1) == 1\n", - "assert sum_of_squares(2) == 5\n", - "assert sum_of_squares(10) == 385\n", - "assert sum_of_squares(11) == 506" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false, - "nbgrader": { - "grade": true, - "grade_id": "sum_of_squares_uses_squares", - "locked": false, - "points": 0.5, - "schema_version": 2, - "solution": false - } - }, - "outputs": [], - "source": [ - "\"\"\"Check that sum_of_squares relies on squares.\"\"\"\n", - "orig_squares = squares\n", - "del squares\n", - "try:\n", - " sum_of_squares(1)\n", - "except NameError:\n", - " pass\n", - "else:\n", - " raise AssertionError(\"sum_of_squares does not use squares\")\n", - "finally:\n", - " squares = orig_squares" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "---\n", - "## Part C (1 point)\n", - "\n", - "Using LaTeX math notation, write out the equation that is implemented by your `sum_of_squares` function." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "nbgrader": { - "grade": true, - "grade_id": "sum_of_squares_equation", - "locked": false, - "points": 1.0, - "schema_version": 2, - "solution": true - } - }, - "source": [ - "$\\sum_{i=1}^n i^2$" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "---\n", - "## Part D (2 points)\n", - "\n", - "Find a usecase for your `sum_of_squares` function and implement that usecase in the cell below." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": true, - "nbgrader": { - "grade": true, - "grade_id": "sum_of_squares_application", - "locked": false, - "points": 2.0, - "schema_version": 2, - "solution": true - } - }, - "outputs": [], - "source": [ - "def pyramidal_number(n):\n", - " \"\"\"Returns the n^th pyramidal number\"\"\"\n", - " return sum_of_squares(n)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python", - "language": "python", - "name": "python" - } - }, - "nbformat": 4, - "nbformat_minor": 0 -} diff --git a/nbgrader/tests/apps/files/test-with-output.ipynb b/nbgrader/tests/apps/files/test-with-output.ipynb deleted file mode 100644 index a9cbf7cce..000000000 --- a/nbgrader/tests/apps/files/test-with-output.ipynb +++ /dev/null @@ -1,322 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": { - "nbgrader": { - "grade": false, - "grade_id": "jupyter", - "locked": true, - "schema_version": 3, - "solution": false - } - }, - "source": [ - "For this problem set, we'll be using the Jupyter notebook:\n", - "\n", - "![](jupyter.png)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "---\n", - "## Part A (2 points)\n", - "\n", - "Write a function that returns a list of numbers, such that $x_i=i^2$, for $1\\leq i \\leq n$. Make sure it handles the case where $n<1$ by raising a `ValueError`." - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": { - "collapsed": false, - "nbgrader": { - "grade": false, - "grade_id": "squares", - "locked": false, - "schema_version": 3, - "solution": true - } - }, - "outputs": [], - "source": [ - "def squares(n):\n", - " \"\"\"Compute the squares of numbers from 1 to n, such that the \n", - " ith element of the returned list equals i^2.\n", - " \n", - " \"\"\"\n", - " ### BEGIN SOLUTION\n", - " if n < 1:\n", - " raise ValueError(\"n must be greater than or equal to 1\")\n", - " return [i ** 2 for i in range(1, n + 1)]\n", - " ### END SOLUTION" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Your function should print `[1, 4, 9, 16, 25, 36, 49, 64, 81, 100]` for $n=10$. Check that it does:" - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "metadata": { - "collapsed": false - }, - "outputs": [ - { - "data": { - "text/plain": [ - "[1, 4, 9, 16, 25, 36, 49, 64, 81]" - ] - }, - "execution_count": 10, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "squares(10)" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": { - "collapsed": false, - "nbgrader": { - "grade": true, - "grade_id": "correct_squares", - "locked": false, - "points": 1.0, - "schema_version": 3, - "solution": false - } - }, - "outputs": [], - "source": [ - "\"\"\"Check that squares returns the correct output for several inputs\"\"\"\n", - "assert squares(1) == [1]\n", - "assert squares(2) == [1, 4]\n", - "assert squares(10) == [1, 4, 9, 16, 25, 36, 49, 64, 81, 100]\n", - "assert squares(11) == [1, 4, 9, 16, 25, 36, 49, 64, 81, 100, 121]" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": { - "collapsed": false, - "nbgrader": { - "grade": true, - "grade_id": "squares_invalid_input", - "locked": false, - "points": 1.0, - "schema_version": 3, - "solution": false - } - }, - "outputs": [], - "source": [ - "\"\"\"Check that squares raises an error for invalid inputs\"\"\"\n", - "try:\n", - " squares(0)\n", - "except ValueError:\n", - " pass\n", - "else:\n", - " raise AssertionError(\"did not raise\")\n", - "\n", - "try:\n", - " squares(-4)\n", - "except ValueError:\n", - " pass\n", - "else:\n", - " raise AssertionError(\"did not raise\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "---\n", - "\n", - "## Part B (1 point)\n", - "\n", - "Using your `squares` function, write a function that computes the sum of the squares of the numbers from 1 to $n$. Your function should call the `squares` function -- it should NOT reimplement its functionality." - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": { - "collapsed": false, - "nbgrader": { - "grade": false, - "grade_id": "sum_of_squares", - "locked": false, - "schema_version": 3, - "solution": true - } - }, - "outputs": [], - "source": [ - "def sum_of_squares(n):\n", - " \"\"\"Compute the sum of the squares of numbers from 1 to n.\"\"\"\n", - " ### BEGIN SOLUTION\n", - " return sum(squares(n))\n", - " ### END SOLUTION" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The sum of squares from 1 to 10 should be 385. Verify that this is the answer you get:" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": { - "collapsed": false - }, - "outputs": [ - { - "data": { - "text/plain": [ - "385" - ] - }, - "execution_count": 6, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "sum_of_squares(10)" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": { - "collapsed": false, - "nbgrader": { - "grade": true, - "grade_id": "correct_sum_of_squares", - "locked": false, - "points": 0.5, - "schema_version": 3, - "solution": false - } - }, - "outputs": [], - "source": [ - "\"\"\"Check that sum_of_squares returns the correct answer for various inputs.\"\"\"\n", - "assert sum_of_squares(1) == 1\n", - "assert sum_of_squares(2) == 5\n", - "assert sum_of_squares(10) == 385\n", - "assert sum_of_squares(11) == 506" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "metadata": { - "collapsed": false, - "nbgrader": { - "grade": true, - "grade_id": "sum_of_squares_uses_squares", - "locked": false, - "points": 0.5, - "schema_version": 3, - "solution": false - } - }, - "outputs": [], - "source": [ - "\"\"\"Check that sum_of_squares relies on squares.\"\"\"\n", - "orig_squares = squares\n", - "del squares\n", - "try:\n", - " sum_of_squares(1)\n", - "except NameError:\n", - " pass\n", - "else:\n", - " raise AssertionError(\"sum_of_squares does not use squares\")\n", - "finally:\n", - " squares = orig_squares" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "---\n", - "## Part C (1 point)\n", - "\n", - "Using LaTeX math notation, write out the equation that is implemented by your `sum_of_squares` function." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "nbgrader": { - "grade": true, - "grade_id": "sum_of_squares_equation", - "locked": false, - "points": 1.0, - "schema_version": 3, - "solution": true - } - }, - "source": [ - "$\\sum_{i=1}^n i^2$" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "---\n", - "## Part D (2 points)\n", - "\n", - "Find a usecase for your `sum_of_squares` function and implement that usecase in the cell below." - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "metadata": { - "collapsed": true, - "nbgrader": { - "grade": true, - "grade_id": "sum_of_squares_application", - "locked": false, - "points": 2.0, - "schema_version": 3, - "solution": true - } - }, - "outputs": [], - "source": [ - "def pyramidal_number(n):\n", - " \"\"\"Returns the n^th pyramidal number\"\"\"\n", - " return sum_of_squares(n)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python", - "language": "python", - "name": "python" - } - }, - "nbformat": 4, - "nbformat_minor": 0 -} diff --git a/nbgrader/tests/apps/files/timeout.ipynb b/nbgrader/tests/apps/files/timeout.ipynb deleted file mode 100644 index 1ac989b83..000000000 --- a/nbgrader/tests/apps/files/timeout.ipynb +++ /dev/null @@ -1,70 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "deletable": false, - "nbgrader": { - "cell_type": "code", - "checksum": "cc6463a7b1770423bdf8bc8b01bee931", - "grade": false, - "grade_id": "squares", - "locked": false, - "schema_version": 3, - "solution": true - } - }, - "outputs": [], - "source": [ - "import time\n", - "def foo():\n", - " time.sleep(5)\n", - " return True" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "deletable": false, - "editable": false, - "nbgrader": { - "cell_type": "code", - "checksum": "a6a51275965b572fa21c2f06be0fccff", - "grade": true, - "grade_id": "correct_squares", - "locked": false, - "points": 1, - "schema_version": 3, - "solution": false - } - }, - "outputs": [], - "source": [ - "assert foo()" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.10.10" - } - }, - "nbformat": 4, - "nbformat_minor": 4 -} diff --git a/nbgrader/tests/apps/files/timestamp.txt b/nbgrader/tests/apps/files/timestamp.txt deleted file mode 100644 index 992edac9e..000000000 --- a/nbgrader/tests/apps/files/timestamp.txt +++ /dev/null @@ -1 +0,0 @@ -2019-05-30 11:44:01.911849 UTC \ No newline at end of file diff --git a/nbgrader/tests/apps/files/too-new.ipynb b/nbgrader/tests/apps/files/too-new.ipynb deleted file mode 100644 index 1df27f545..000000000 --- a/nbgrader/tests/apps/files/too-new.ipynb +++ /dev/null @@ -1,32 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "nbgrader": { - "grade": false, - "grade_id": "cell-2298c977f4deff2b", - "locked": true, - "points": 10, - "schema_version": 10, - "solution": false, - "task": true - } - }, - "outputs": [], - "source": [ - "# this has metadata which has a nbgrader schema that is too new!" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python", - "language": "python", - "name": "python" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/nbgrader/tests/apps/files/validating-environment-variable.ipynb b/nbgrader/tests/apps/files/validating-environment-variable.ipynb deleted file mode 100644 index ebd35c03b..000000000 --- a/nbgrader/tests/apps/files/validating-environment-variable.ipynb +++ /dev/null @@ -1,55 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [], - "source": [ - "import os" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": { - "nbgrader": { - "grade": true, - "grade_id": "cell-bfbdf1784663b6b6", - "locked": true, - "points": 1, - "schema_version": 3, - "solution": false, - "task": false - } - }, - "outputs": [], - "source": [ - "if os.getenv('NBGRADER_EXECUTION'):\n", - " assert False" - ] - } - ], - "metadata": { - "celltoolbar": "Create Assignment", - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.2" - } - }, - "nbformat": 4, - "nbformat_minor": 1 -} diff --git a/nbgrader/tests/apps/files/validation-zero-points.ipynb b/nbgrader/tests/apps/files/validation-zero-points.ipynb deleted file mode 100644 index 3fc7a8a1a..000000000 --- a/nbgrader/tests/apps/files/validation-zero-points.ipynb +++ /dev/null @@ -1,75 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Validating this file should fail even though the assignment has 0 points." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "deletable": false, - "nbgrader": { - "checksum": "8f1eab8d02a9520920aa06f8a86a2492", - "grade": false, - "grade_id": "squares", - "locked": false, - "schema_version": 3, - "solution": true - } - }, - "outputs": [], - "source": [ - "def squares(n):\n", - " # YOUR CODE HERE\n", - " return [0]" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "deletable": false, - "editable": false, - "nbgrader": { - "grade": true, - "grade_id": "correct_squares", - "locked": true, - "points": 0, - "schema_version": 3, - "solution": false, - "task": false - }, - "tags": [] - }, - "outputs": [], - "source": [ - "assert squares(1) == [1]" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.10.10" - } - }, - "nbformat": 4, - "nbformat_minor": 4 -} diff --git a/nbgrader/tests/apps/test_api.py b/nbgrader/tests/apps/test_api.py deleted file mode 100644 index b838dbb52..000000000 --- a/nbgrader/tests/apps/test_api.py +++ /dev/null @@ -1,818 +0,0 @@ -import pytest -import sys -import os -import shutil -import filecmp - -from os.path import join -from traitlets.config import Config -from datetime import datetime - -from ...apps.api import NbGraderAPI -from ...coursedir import CourseDirectory -from ...utils import rmtree, get_username, parse_utc -from .. import run_nbgrader -from .base import BaseTestApp -from .conftest import notwindows, windows - - -@pytest.fixture -def api(request, course_dir, db, exchange, cache): - config = Config() - config.CourseDirectory.course_id = "abc101" - config.Exchange.root = exchange - config.Exchange.cache = cache - config.CourseDirectory.root = course_dir - config.CourseDirectory.db_url = db - - coursedir = CourseDirectory(config=config) - api = NbGraderAPI(coursedir, config=config) - - return api - - -class TestNbGraderAPI(BaseTestApp): - - if sys.platform == 'win32': - tz = "Coordinated Universal Time" - else: - tz = "UTC" - - def test_get_source_assignments(self, api, course_dir): - assert api.get_source_assignments() == set([]) - - self._empty_notebook(join(course_dir, "source", "ps1", "problem1.ipynb")) - self._empty_notebook(join(course_dir, "source", "ps2", "problem1.ipynb")) - self._make_file(join(course_dir, "source", "blah")) - assert api.get_source_assignments() == {"ps1", "ps2"} - - @notwindows - def test_get_released_assignments(self, api, exchange, course_dir): - assert api.get_released_assignments() == set([]) - - self._copy_file(join("files", "test.ipynb"), join(course_dir, "release", "ps1", "p1.ipynb")) - run_nbgrader(["release_assignment", "ps1", "--course", "abc101", "--Exchange.root={}".format(exchange)]) - assert api.get_released_assignments() == {"ps1"} - - api.course_id = None - assert api.get_released_assignments() == set([]) - - @windows - def test_get_released_assignments_windows(self, api, exchange, course_dir): - assert api.get_released_assignments() == set([]) - - api.course_id = 'abc101' - assert api.get_released_assignments() == set([]) - - def test_get_submitted_students(self, api, course_dir): - assert api.get_submitted_students("ps1") == set([]) - - self._empty_notebook(join(course_dir, "submitted", "foo", "ps1", "problem1.ipynb")) - self._empty_notebook(join(course_dir, "submitted", "bar", "ps1", "problem1.ipynb")) - self._make_file(join(course_dir, "submitted", "blah")) - assert api.get_submitted_students("ps1") == {"foo", "bar"} - assert api.get_submitted_students("*") == {"foo", "bar"} - - def test_get_submitted_timestamp(self, api, course_dir): - assert api.get_submitted_timestamp("ps1", "foo") is None - - self._empty_notebook(join(course_dir, "submitted", "foo", "ps1", "problem1.ipynb")) - assert api.get_submitted_timestamp("ps1", "foo") is None - - timestamp = datetime.now() - self._make_file(join(course_dir, "submitted", "foo", "ps1", "timestamp.txt"), contents=timestamp.isoformat()) - assert api.get_submitted_timestamp("ps1", "foo") == timestamp - - def test_get_autograded_students(self, api, course_dir, db): - self._empty_notebook(join(course_dir, "source", "ps1", "problem1.ipynb")) - run_nbgrader(["generate_assignment", "ps1", "--db", db]) - - # submitted and autograded exist, but not in the database - self._empty_notebook(join(course_dir, "submitted", "foo", "ps1", "problem1.ipynb")) - timestamp = datetime.now() - self._make_file(join(course_dir, "submitted", "foo", "ps1", "timestamp.txt"), contents=timestamp.isoformat()) - self._empty_notebook(join(course_dir, "autograded", "foo", "ps1", "problem1.ipynb")) - self._make_file(join(course_dir, "submitted", "foo", "ps1", "timestamp.txt"), contents=timestamp.isoformat()) - assert api.get_autograded_students("ps1") == set([]) - - # run autograde so things are consistent - run_nbgrader(["autograde", "ps1", "--no-execute", "--force", "--db", db]) - assert api.get_autograded_students("ps1") == {"foo"} - - # updated submission - timestamp = datetime.now() - self._make_file(join(course_dir, "submitted", "foo", "ps1", "timestamp.txt"), contents=timestamp.isoformat()) - assert api.get_autograded_students("ps1") == set([]) - - def test_get_autograded_students_no_timestamps(self, api, course_dir, db): - self._empty_notebook(join(course_dir, "source", "ps1", "problem1.ipynb")) - run_nbgrader(["generate_assignment", "ps1", "--db", db]) - - # submitted and autograded exist, but not in the database - self._empty_notebook(join(course_dir, "submitted", "foo", "ps1", "problem1.ipynb")) - self._empty_notebook(join(course_dir, "autograded", "foo", "ps1", "problem1.ipynb")) - assert api.get_autograded_students("ps1") == set([]) - - # run autograde so things are consistent - run_nbgrader(["autograde", "ps1", "--no-execute", "--force", "--db", db]) - assert api.get_autograded_students("ps1") == {"foo"} - - # updated submission - timestamp = datetime.now() - self._make_file(join(course_dir, "submitted", "foo", "ps1", "timestamp.txt"), contents=timestamp.isoformat()) - assert api.get_autograded_students("ps1") == set([]) - - def test_get_assignment(self, api, course_dir, db, exchange): - keys = set([ - 'average_code_score', 'average_score', 'average_written_score', - 'duedate', 'name', 'num_submissions', 'release_path', 'releaseable', - 'source_path', 'status', 'id', 'max_code_score', 'max_score', - 'max_written_score', 'display_duedate', 'duedate_timezone', - 'duedate_notimezone', - 'max_task_score', 'average_task_score']) - - default = { - "average_code_score": 0, - "average_score": 0, - "average_written_score": 0, - "average_task_score": 0, - "duedate": None, - "display_duedate": None, - "duedate_timezone": "+0000", - "duedate_notimezone": None, - "name": "ps1", - "num_submissions": 0, - "release_path": None, - "releaseable": True if sys.platform != 'win32' else False, - "source_path": join("source", "ps1"), - "status": "draft", - "id": None, - "max_code_score": 0, - "max_score": 0, - "max_written_score": 0, - "max_task_score": 0 - } - - # check that return value is None when there is no assignment - a = api.get_assignment("ps1") - assert a is None - - # check the values when the source assignment exists, but hasn't been - # released yet - self._copy_file(join("files", "test.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb")) - a = api.get_assignment("ps1") - assert set(a.keys()) == keys - target = default.copy() - assert a == target - - # check that it is not releasable if the course id isn't set - api.course_id = None - a = api.get_assignment("ps1") - assert set(a.keys()) == keys - target = default.copy() - target["releaseable"] = False - assert a == target - - # check the values once the student version of the assignment has been created - api.course_id = "abc101" - run_nbgrader(["generate_assignment", "ps1", "--db", db]) - a = api.get_assignment("ps1") - assert set(a.keys()) == keys - target = default.copy() - target["release_path"] = join("release", "ps1") - target["id"] = a["id"] - target["max_code_score"] = 5 - target["max_score"] = 6 - target["max_written_score"] = 1 - target["max_task_score"] = 1 - assert a == target - - # check that timestamps are handled correctly - with api.gradebook as gb: - assignment = gb.find_assignment("ps1") - assignment.duedate = parse_utc("2017-07-05 12:22:08 UTC") - gb.db.commit() - - a = api.get_assignment("ps1") - default["duedate"] = "2017-07-05T12:22:08" - default["display_duedate"] = "2017-07-05 12:22:08 {}".format(self.tz) - default["duedate_notimezone"] = "2017-07-05T12:22:08" - assert a["duedate"] == default["duedate"] - assert a["display_duedate"] == default["display_duedate"] - assert a["duedate_notimezone"] == default["duedate_notimezone"] - assert a["duedate_timezone"] == default["duedate_timezone"] - - # check the values once the assignment has been released and unreleased - if sys.platform != "win32": - run_nbgrader(["release_assignment", "ps1", "--course", "abc101", "--Exchange.root={}".format(exchange)]) - a = api.get_assignment("ps1") - assert set(a.keys()) == keys - target = default.copy() - target["release_path"] = join("release", "ps1") - target["id"] = a["id"] - target["max_code_score"] = 5 - target["max_score"] = 6 - target["max_written_score"] = 1 - target["max_task_score"] = 1 - target["releaseable"] = True - target["status"] = "released" - assert a == target - - run_nbgrader(["list", "ps1", "--course", "abc101", "--Exchange.root={}".format(exchange), "--remove"]) - a = api.get_assignment("ps1") - assert set(a.keys()) == keys - target = default.copy() - target["release_path"] = join("release", "ps1") - target["id"] = a["id"] - target["max_code_score"] = 5 - target["max_score"] = 6 - target["max_written_score"] = 1 - target["max_task_score"] = 1 - assert a == target - - # check the values once there are submissions as well - self._empty_notebook(join(course_dir, "submitted", "foo", "ps1", "problem1.ipynb")) - self._empty_notebook(join(course_dir, "submitted", "bar", "ps1", "problem1.ipynb")) - a = api.get_assignment("ps1") - assert set(a.keys()) == keys - target = default.copy() - target["release_path"] = join("release", "ps1") - target["id"] = a["id"] - target["max_code_score"] = 5 - target["max_score"] = 6 - target["max_written_score"] = 1 - target["max_task_score"] = 1 - target["num_submissions"] = 2 - assert a == target - - def test_get_assignments(self, api, course_dir): - assert api.get_assignments() == [] - - self._empty_notebook(join(course_dir, "source", "ps1", "problem1.ipynb")) - self._empty_notebook(join(course_dir, "source", "ps2", "problem1.ipynb")) - a = api.get_assignments() - assert len(a) == 2 - assert a[0] == api.get_assignment("ps1") - assert a[1] == api.get_assignment("ps2") - - def test_get_notebooks(self, api, course_dir, db): - keys = set([ - 'average_code_score', 'average_score', 'average_written_score', - 'name', 'id', 'max_code_score', 'max_score', 'max_written_score', - 'max_task_score', 'average_task_score', - 'needs_manual_grade', 'num_submissions']) - - default = { - "name": "p1", - "id": None, - "average_code_score": 0, - "max_code_score": 0, - "average_score": 0, - "max_score": 0, - "average_written_score": 0, - "max_written_score": 0, - "average_task_score": 0, - "max_task_score": 0, - "needs_manual_grade": False, - "num_submissions": 0 - } - - # check that return value is None when there is no assignment - n = api.get_notebooks("ps1") - assert n == [] - - # check values before nbgrader generate_assignment is run - self._copy_file(join("files", "test.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb")) - n1, = api.get_notebooks("ps1") - assert set(n1.keys()) == keys - assert n1 == default.copy() - - # add it to the database (but don't assign yet) - with api.gradebook as gb: - gb.update_or_create_assignment("ps1") - n1, = api.get_notebooks("ps1") - assert set(n1.keys()) == keys - assert n1 == default.copy() - - # check values after nbgrader generate_assignment is run - run_nbgrader(["generate_assignment", "ps1", "--db", db, "--force"]) - n1, = api.get_notebooks("ps1") - assert set(n1.keys()) == keys - target = default.copy() - target["id"] = n1["id"] - target["max_code_score"] = 5 - target["max_score"] = 6 - target["max_written_score"] = 1 - assert n1 == target - - def test_get_submission(self, api, course_dir, db): - keys = set([ - "id", "name", "student", "last_name", "first_name", "score", - "max_score", "code_score", "max_code_score", "written_score", - "max_written_score", "task_score", "max_task_score", "needs_manual_grade", "autograded", - "timestamp", "submitted", "display_timestamp"]) - - default = { - "id": None, - "name": "ps1", - "student": "foo", - "last_name": None, - "first_name": None, - "score": 0, - "max_score": 0, - "code_score": 0, - "max_code_score": 0, - "written_score": 0, - "max_written_score": 0, - "task_score": 0, - "max_task_score": 0, - "needs_manual_grade": False, - "autograded": False, - "timestamp": None, - "display_timestamp": None, - "submitted": False - } - - s = api.get_submission("ps1", "foo") - assert s == default.copy() - - self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb")) - run_nbgrader(["generate_assignment", "ps1", "--db", db]) - - self._copy_file(join("files", "submitted-changed.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p1.ipynb")) - self._make_file(join(course_dir, "submitted", "foo", "ps1", "timestamp.txt"), contents="2017-07-05T12:32:56.123456") - s = api.get_submission("ps1", "foo") - assert set(s.keys()) == keys - target = default.copy() - target["submitted"] = True - target["timestamp"] = "2017-07-05T12:32:56.123456" - target["display_timestamp"] = "2017-07-05 12:32:56 {}".format(self.tz) - assert s == target - - run_nbgrader(["autograde", "ps1", "--no-execute", "--force", "--db", db]) - s = api.get_submission("ps1", "foo") - target = default.copy() - target["id"] = s["id"] - target["autograded"] = True - target["submitted"] = True - target["timestamp"] = "2017-07-05T12:32:56.123456" - target["display_timestamp"] = "2017-07-05 12:32:56 {}".format(self.tz) - target["code_score"] = 2 - target["max_code_score"] = 5 - target["score"] = 2 - target["max_score"] = 7 - target["written_score"] = 0 - target["max_written_score"] = 2 - target["needs_manual_grade"] = True - assert s == target - - def test_get_submission_no_timestamp(self, api, course_dir, db): - keys = set([ - "id", "name", "student", "last_name", "first_name", "score", - "max_score", "code_score", "max_code_score", "written_score", - "max_written_score", "task_score", "max_task_score", "needs_manual_grade", "autograded", - "timestamp", "submitted", "display_timestamp"]) - - default = { - "id": None, - "name": "ps1", - "student": "foo", - "last_name": None, - "first_name": None, - "score": 0, - "max_score": 0, - "code_score": 0, - "max_code_score": 0, - "written_score": 0, - "max_written_score": 0, - "task_score": 0, - "max_task_score": 0, - "needs_manual_grade": False, - "autograded": False, - "timestamp": None, - "display_timestamp": None, - "submitted": False - } - - s = api.get_submission("ps1", "foo") - assert s == default.copy() - - self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb")) - run_nbgrader(["generate_assignment", "ps1", "--db", db]) - - self._copy_file(join("files", "submitted-changed.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p1.ipynb")) - s = api.get_submission("ps1", "foo") - assert set(s.keys()) == keys - target = default.copy() - target["submitted"] = True - assert s == target - - run_nbgrader(["autograde", "ps1", "--no-execute", "--force", "--db", db]) - s = api.get_submission("ps1", "foo") - target = default.copy() - target["id"] = s["id"] - target["autograded"] = True - target["submitted"] = True - target["code_score"] = 2 - target["max_code_score"] = 5 - target["score"] = 2 - target["max_score"] = 7 - target["written_score"] = 0 - target["max_written_score"] = 2 - target["needs_manual_grade"] = True - assert s == target - - def test_get_submissions(self, api, course_dir, db): - assert api.get_submissions("ps1") == [] - - self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb")) - run_nbgrader(["generate_assignment", "ps1", "--db", db]) - - self._copy_file(join("files", "submitted-changed.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p1.ipynb")) - timestamp = datetime.now() - self._make_file(join(course_dir, "submitted", "foo", "ps1", "timestamp.txt"), contents=timestamp.isoformat()) - s1, = api.get_submissions("ps1") - assert s1 == api.get_submission("ps1", "foo") - - run_nbgrader(["autograde", "ps1", "--no-execute", "--force", "--db", db]) - s1, = api.get_submissions("ps1") - assert s1 == api.get_submission("ps1", "foo") - - def test_filter_existing_notebooks(self, api, course_dir, db): - self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb")) - self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps1", "p2.ipynb")) - run_nbgrader(["generate_assignment", "ps1", "--db", db]) - - self._copy_file(join("files", "submitted-changed.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p1.ipynb")) - run_nbgrader(["autograde", "ps1", "--no-execute", "--force", "--db", db]) - - with api.gradebook as gb: - notebooks = gb.notebook_submissions("p1", "ps1") - s = api._filter_existing_notebooks("ps1", notebooks) - assert s == notebooks - - notebooks = gb.notebook_submissions("p2", "ps1") - s = api._filter_existing_notebooks("ps1", notebooks) - assert s == [] - - @notwindows - def test_filter_existing_notebooks_strict(self, api, course_dir, db): - api.config.ExchangeSubmit.strict = True - - self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb")) - self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps1", "p2.ipynb")) - run_nbgrader(["generate_assignment", "ps1", "--db", db]) - - self._copy_file(join("files", "submitted-changed.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p1.ipynb")) - run_nbgrader(["autograde", "ps1", "--no-execute", "--force", "--db", db]) - - with api.gradebook as gb: - notebooks = gb.notebook_submissions("p1", "ps1") - s = api._filter_existing_notebooks("ps1", notebooks) - assert s == notebooks - - notebooks = gb.notebook_submissions("p2", "ps1") - s = api._filter_existing_notebooks("ps1", notebooks) - assert s == notebooks - - def test_get_notebook_submission_indices(self, api, course_dir, db): - self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb")) - run_nbgrader(["generate_assignment", "ps1", "--db", db]) - - self._copy_file(join("files", "submitted-changed.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p1.ipynb")) - self._copy_file(join("files", "submitted-changed.ipynb"), join(course_dir, "submitted", "bar", "ps1", "p1.ipynb")) - run_nbgrader(["autograde", "ps1", "--no-execute", "--force", "--db", db]) - - with api.gradebook as gb: - notebooks = gb.notebook_submissions("p1", "ps1") - notebooks.sort(key=lambda x: x.id) - idx = api.get_notebook_submission_indices("ps1", "p1") - assert idx[notebooks[0].id] == 0 - assert idx[notebooks[1].id] == 1 - - def test_get_notebook_submissions(self, api, course_dir, db): - assert api.get_notebook_submissions("ps1", "p1") == [] - - self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb")) - run_nbgrader(["generate_assignment", "ps1", "--db", db]) - - self._copy_file(join("files", "submitted-changed.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p1.ipynb")) - self._copy_file(join("files", "submitted-changed.ipynb"), join(course_dir, "submitted", "bar", "ps1", "p1.ipynb")) - run_nbgrader(["autograde", "ps1", "--no-execute", "--force", "--db", db]) - self._copy_file(join("files", "submitted-changed.ipynb"), join(course_dir, "submitted", "baz", "ps1", "p1.ipynb")) - - s = api.get_notebook_submissions("ps1", "p1") - assert len(s) == 2 - with api.gradebook as gb: - notebooks = gb.notebook_submissions("p1", "ps1") - notebooks.sort(key=lambda x: x.id) - notebooks = [x.to_dict() for x in notebooks] - for i in range(2): - notebooks[i]["index"] = i - assert s[i] == notebooks[i] - - def test_get_student(self, api, course_dir, db): - assert api.get_student("foo") is None - - self._copy_file(join("files", "submitted-changed.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p1.ipynb")) - assert api.get_student("foo") == { - "id": "foo", - "last_name": None, - "first_name": None, - "email": None, - "lms_user_id": None, - "max_score": 0, - "score": 0 - } - rmtree(join(course_dir, "submitted", "foo")) - - with api.gradebook as gb: - gb.add_student("foo") - assert api.get_student("foo") == { - "id": "foo", - "last_name": None, - "first_name": None, - "email": None, - "lms_user_id": None, - "max_score": 0, - "score": 0 - } - - gb.update_or_create_student("foo", last_name="Foo", first_name="A", email="a.foo@email.com", lms_user_id="230") - assert api.get_student("foo") == { - "id": "foo", - "last_name": "Foo", - "first_name": "A", - "email": "a.foo@email.com", - "lms_user_id": "230", - "max_score": 0, - "score": 0 - } - - self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb")) - run_nbgrader(["generate_assignment", "ps1", "--db", db]) - self._copy_file(join("files", "submitted-changed.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p1.ipynb")) - run_nbgrader(["autograde", "ps1", "--no-execute", "--force", "--db", db]) - assert api.get_student("foo") == { - "id": "foo", - "last_name": "Foo", - "first_name": "A", - "email": "a.foo@email.com", - "lms_user_id": "230", - "max_score": 7, - "score": 2 - } - - def test_get_students(self, api, course_dir): - assert api.get_students() == [] - - with api.gradebook as gb: - gb.update_or_create_student("foo", last_name="Foo", first_name="A", email="a.foo@email.com", lms_user_id=None) - s1 = { - "id": "foo", - "last_name": "Foo", - "first_name": "A", - "email": "a.foo@email.com", - "lms_user_id": None, - "max_score": 0, - "score": 0 - } - assert api.get_students() == [s1] - - self._copy_file(join("files", "submitted-changed.ipynb"), join(course_dir, "submitted", "bar", "ps1", "p1.ipynb")) - s2 = { - "id": "bar", - "last_name": None, - "first_name": None, - "email": None, - "lms_user_id": None, - "max_score": 0, - "score": 0 - } - assert api.get_students() == [s1, s2] - - def test_get_student_submissions(self, api, course_dir, db): - assert api.get_student_submissions("foo") == [] - - self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb")) - run_nbgrader(["generate_assignment", "ps1", "--db", db]) - self._copy_file(join("files", "submitted-changed.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p1.ipynb")) - timestamp = datetime.now() - self._make_file(join(course_dir, "submitted", "foo", "ps1", "timestamp.txt"), contents=timestamp.isoformat()) - run_nbgrader(["autograde", "ps1", "--no-execute", "--force", "--db", db]) - - assert api.get_student_submissions("foo") == [api.get_submission("ps1", "foo")] - - def test_get_student_notebook_submissions(self, api, course_dir, db): - assert api.get_student_notebook_submissions("foo", "ps1") == [] - - self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb")) - self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps1", "p2.ipynb")) - run_nbgrader(["generate_assignment", "ps1", "--db", db]) - - self._copy_file(join("files", "submitted-changed.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p1.ipynb")) - run_nbgrader(["autograde", "ps1", "--no-execute", "--force", "--db", db]) - - s_p1, s_p2 = api.get_student_notebook_submissions("foo", "ps1") - p1, = api.get_notebook_submissions("ps1", "p1") - del p1["index"] - assert s_p1 == p1 - assert s_p2 == { - "id": None, - "name": "p2", - "student": "foo", - "last_name": None, - "first_name": None, - "score": 0, - "max_score": 7, - "code_score": 0, - "max_code_score": 5, - "written_score": 0, - "max_written_score": 2, - "task_score": 0, - "max_task_score": 0, - "needs_manual_grade": False, - "failed_tests": False, - "flagged": False - } - - def test_deprecation(self, api, course_dir, db): - self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb")) - result = api.generate_assignment("ps1") - assert result["success"] - assert os.path.exists(join(course_dir, "release", "ps1", "p1.ipynb")) - - os.makedirs(join(course_dir, "source", "ps2")) - result = api.assign("ps2") - assert not result["success"] - - def test_generate_assignment(self, api, course_dir, db): - self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb")) - result = api.generate_assignment("ps1") - assert result["success"] - assert os.path.exists(join(course_dir, "release", "ps1", "p1.ipynb")) - - os.makedirs(join(course_dir, "source", "ps2")) - result = api.generate_assignment("ps2") - assert not result["success"] - - @notwindows - def test_release_deprecated(self, api, course_dir, db, exchange): - self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb")) - result = api.generate_assignment("ps1") - result = api.release("ps1") - assert result["success"] - assert os.path.exists(join(exchange, "abc101", "outbound", "ps1", "p1.ipynb")) - - @notwindows - def test_release_and_unrelease(self, api, course_dir, db, exchange): - self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb")) - result = api.generate_assignment("ps1") - result = api.release_assignment("ps1") - assert result["success"] - assert os.path.exists(join(exchange, "abc101", "outbound", "ps1", "p1.ipynb")) - - result = api.release_assignment("ps1") - assert not result["success"] - - result = api.unrelease("ps1") - assert result["success"] - assert not os.path.exists(join(exchange, "abc101", "outbound", "ps1", "p1.ipynb")) - - @notwindows - def test_collect(self, api, course_dir, db, exchange): - self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb")) - result = api.generate_assignment("ps1") - result = api.release_assignment("ps1") - result = api.collect("ps1") - assert result["success"] - assert "No submissions" in result["log"] - - run_nbgrader(["fetch_assignment", "ps1", "--course", "abc101", "--Exchange.root={}".format(exchange)]) - run_nbgrader(["submit", "ps1", "--course", "abc101", "--Exchange.root={}".format(exchange)]) - username = get_username() - result = api.collect("ps1") - assert result["success"] - assert "Collecting submission" in result["log"] - assert os.path.exists(join(course_dir, "submitted", username, "ps1", "p1.ipynb")) - - run_nbgrader(["submit", "ps1", "--course", "abc101", "--Exchange.root={}".format(exchange)]) - result = api.collect("ps1") - assert result["success"] - assert "Updating submission" in result["log"] - assert os.path.exists(join(course_dir, "submitted", username, "ps1", "p1.ipynb")) - - @notwindows - def test_autograde(self, api, course_dir, db): - self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb")) - api.generate_assignment("ps1") - - result = api.autograde("ps1", "foo") - assert not result["success"] - assert "No notebooks were matched" in result["log"] - - self._copy_file(join("files", "submitted-changed.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p1.ipynb")) - result = api.autograde("ps1", "foo") - assert result["success"] - assert os.path.exists(join(course_dir, "autograded", "foo", "ps1", "p1.ipynb")) - - result = api.autograde("ps1", "foo") - assert result["success"] - - def test_generate_feedback(self, api, course_dir, db): - self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb")) - api.generate_assignment("ps1") - self._copy_file(join("files", "submitted-changed.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p1.ipynb")) - api.autograde("ps1", "foo") - - result = api.generate_feedback("ps1", "foo") - assert result["success"] - assert os.path.exists(join(course_dir, "feedback", "foo", "ps1", "p1.html")) - contents = open(join(course_dir, "feedback", "foo", "ps1", "p1.html"), "r").read() - - # update the grade - with api.gradebook as gb: - nb = gb.find_submission_notebook("p1", "ps1", "foo") - nb.grades[0].manual_score = 123 - gb.db.commit() - - # contents shouldn't have changed, because force=False - result = api.generate_feedback("ps1", "foo", force=False) - assert result["success"] - assert os.path.exists(join(course_dir, "feedback", "foo", "ps1", "p1.html")) - new_contents = open(join(course_dir, "feedback", "foo", "ps1", "p1.html"), "r").read() - assert new_contents == contents - - # contents should now have changed, because force=True - result = api.generate_feedback("ps1", "foo", force=True) - assert result["success"] - assert os.path.exists(join(course_dir, "feedback", "foo", "ps1", "p1.html")) - new_contents = open(join(course_dir, "feedback", "foo", "ps1", "p1.html"), "r").read() - assert new_contents != contents - - # should not work for an empty submission - os.makedirs(join(course_dir, "submitted", "foo", "ps2")) - result = api.generate_feedback("ps2", "foo") - assert not result["success"] - - self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps2", "p2.ipynb")) - api.generate_assignment("ps2") - self._copy_file(join("files", "submitted-changed.ipynb"), join(course_dir, "submitted", "foo", "ps2", "p2.ipynb")) - api.autograde("ps2", "foo") - result = api.generate_feedback("ps2", "foo") - assert result["success"] - - @notwindows - def test_release_feedback(self, api, course_dir, db, exchange): - self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb")) - api.generate_assignment("ps1") - self._copy_file(join("files", "submitted-changed.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p1.ipynb")) - self._copy_file(join("files", "timestamp.txt"), join(course_dir, "submitted", "foo", "ps1", "timestamp.txt")) - api.autograde("ps1", "foo") - api.generate_feedback("ps1", "foo") - result = api.release_feedback("ps1", "foo") - assert result["success"] - assert os.path.isdir(join(exchange, "abc101", "feedback")) - assert os.path.exists(join(exchange, "abc101", "feedback", "c600ef68c434c3d136bb5e68ea874169.html")) - # add another assignment - self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps2", "p2.ipynb")) - api.generate_assignment("ps2") - self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "submitted", "foo", "ps2", "p2.ipynb")) - self._copy_file(join("files", "timestamp.txt"), join(course_dir, "submitted", "foo", "ps2", "timestamp.txt")) - api.autograde("ps2", "foo") - api.generate_feedback("ps2", "foo") - result = api.release_feedback("ps2", "foo") - assert result["success"] - assert os.path.exists(join(exchange, "abc101", "feedback", "e190e1f234b633832f2069f4f8a3a680.html")) - - @notwindows - def test_fetch_feedback(self, api, course_dir, db, cache): - self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb")) - api.generate_assignment("ps1") - timestamp = open(os.path.join(os.path.dirname(__file__), "files", "timestamp.txt")).read() - cachepath = join(cache, "abc101", "foo+ps1+{}".format(timestamp)) - self._copy_file(join("files", "submitted-changed.ipynb"), join(cachepath, "p1.ipynb")) - self._copy_file(join("files", "timestamp.txt"), join(cachepath, "timestamp.txt")) - self._copy_file(join("files", "submitted-changed.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p1.ipynb")) - self._copy_file(join("files", "timestamp.txt"), join(course_dir, "submitted", "foo", "ps1", "timestamp.txt")) - api.autograde("ps1", "foo") - api.generate_feedback("ps1", "foo") - api.release_feedback("ps1", "foo") - result = api.fetch_feedback("ps1", "foo") - assert result["success"] - assert os.path.isdir(join("ps1", "feedback")) - assert os.path.exists(join("ps1", "feedback", timestamp, "p1.html")) - # add another assignment - self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps2", "p2.ipynb")) - api.generate_assignment("ps2") - cachepath = join(cache, "abc101", "foo+ps2+{}".format(timestamp)) - self._copy_file(join("files", "submitted-unchanged.ipynb"), join(cachepath, "p2.ipynb")) - self._copy_file(join("files", "timestamp.txt"), join(cachepath, "timestamp.txt")) - self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "submitted", "foo", "ps2", "p2.ipynb")) - self._copy_file(join("files", "timestamp.txt"), join(course_dir, "submitted", "foo", "ps2", "timestamp.txt")) - api.autograde("ps2", "foo") - api.generate_feedback("ps2", "foo") - api.release_feedback("ps2", "foo") - result = api.fetch_feedback("ps2", "foo") - assert result["success"] - assert os.path.exists(join("ps2", "feedback", timestamp, "p2.html")) diff --git a/nbgrader/tests/apps/test_config.py b/nbgrader/tests/apps/test_config.py deleted file mode 100644 index 4f2e8a9a7..000000000 --- a/nbgrader/tests/apps/test_config.py +++ /dev/null @@ -1,17 +0,0 @@ -from traitlets.config import Config -from ...coursedir import CourseDirectory -from .base import BaseTestApp -from .conftest import notwindows - -class TestCourseDirectory(BaseTestApp): - - @notwindows - def test_format_path(self): - config = Config() - config.Exchange.course_id = "abc101" - config.CourseDirectory.root = "/root" - coursedir = CourseDirectory(config=config) - # Test the support for both absolute paths and paths relative to the root directory - # See #1222 - assert coursedir.format_path("submitted", "alice", "HW1") == "/root/submitted/alice/HW1" - assert coursedir.format_path("/bar/submitted", "alice", "HW1") == "/bar/submitted/alice/HW1" diff --git a/nbgrader/tests/apps/test_nbgrader.py b/nbgrader/tests/apps/test_nbgrader.py deleted file mode 100644 index ca3fc503d..000000000 --- a/nbgrader/tests/apps/test_nbgrader.py +++ /dev/null @@ -1,38 +0,0 @@ -import os -import sys - -from .. import run_nbgrader, run_command -from .base import BaseTestApp - - -class TestNbGrader(BaseTestApp): - - def test_help(self): - """Does the help display without error?""" - run_nbgrader(["--help-all"]) - - def test_no_subapp(self): - """Is the help displayed when no subapp is given?""" - run_nbgrader([], retcode=0) - - def test_check_version(self, capfd): - """Is the version the same regardless of how we run nbgrader?""" - out1 = '\n'.join( - run_command([sys.executable, "-m", "nbgrader", "--version"]).splitlines()[-3:] - ).strip() - out2 = '\n'.join( - run_nbgrader(["--version"], stdout=True).splitlines()[-3:] - ).strip() - assert out1 == out2 - - def test_logfile(self): - # by default, there should be no logfile created - cwd = os.getcwd() - files_before = set(os.listdir(cwd)) - run_nbgrader([]) - files_after = set(os.listdir(cwd)) - assert files_before == files_after - - # if we specify a logfile, it should get used - run_nbgrader(["--NbGrader.logfile=log.txt"]) - assert os.path.exists("log.txt") diff --git a/nbgrader/tests/apps/test_nbgrader_autograde.py b/nbgrader/tests/apps/test_nbgrader_autograde.py deleted file mode 100644 index be4e04838..000000000 --- a/nbgrader/tests/apps/test_nbgrader_autograde.py +++ /dev/null @@ -1,1107 +0,0 @@ -# -*- coding: utf-8 -*- - -import io -import os -import sys -import json -import pytest - -from os.path import join -from textwrap import dedent -from nbformat import current_nbformat - -from ...api import Gradebook, MissingEntry -from ...utils import remove -from ...nbgraderformat import reads -from .. import run_nbgrader -from .base import BaseTestApp - - -class TestNbGraderAutograde(BaseTestApp): - - def test_help(self): - """Does the help display without error?""" - run_nbgrader(["autograde", "--help-all"]) - - def test_missing_student(self, db, course_dir): - """Is a missing student automatically created?""" - run_nbgrader(["db", "assignment", "add", "ps1", "--db", db, "--duedate", "2015-02-02 14:58:23.948203 America/Los_Angeles"]) - run_nbgrader(["db", "student", "add", "foo", "--db", db]) - run_nbgrader(["db", "student", "add", "bar", "--db", db]) - - self._copy_file(join("files", "submitted-changed.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb")) - run_nbgrader(["generate_assignment", "ps1", "--db", db]) - - self._copy_file(join("files", "submitted-changed.ipynb"), join(course_dir, "submitted", "baz", "ps1", "p1.ipynb")) - - # If we explicitly disable creating students, autograde should fail - run_nbgrader(["autograde", "ps1", "--db", db, "--Autograde.create_student=False"], retcode=1) - - # The default is now to create missing students (formerly --create) - run_nbgrader(["autograde", "ps1", "--db", db]) - - def test_missing_assignment(self, db, course_dir): - """Is an error thrown when the assignment is missing?""" - run_nbgrader(["db", "assignment", "add", "ps1", "--db", db, "--duedate", "2015-02-02 14:58:23.948203 America/Los_Angeles"]) - run_nbgrader(["db", "student", "add", "foo", "--db", db]) - run_nbgrader(["db", "student", "add", "bar", "--db", db]) - - self._copy_file(join("files", "submitted-changed.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb")) - run_nbgrader(["generate_assignment", "ps1", "--db", db]) - - self._copy_file(join("files", "submitted-changed.ipynb"), join(course_dir, "submitted", "ps2", "foo", "p1.ipynb")) - run_nbgrader(["autograde", "ps2", "--db", db], retcode=1) - - def test_grade(self, db, course_dir): - """Can files be graded?""" - run_nbgrader(["db", "assignment", "add", "ps1", "--db", db, "--duedate", "2015-02-02 14:58:23.948203 America/Los_Angeles"]) - run_nbgrader(["db", "student", "add", "foo", "--db", db]) - run_nbgrader(["db", "student", "add", "bar", "--db", db]) - - self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb")) - run_nbgrader(["generate_assignment", "ps1", "--db", db]) - - self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p1.ipynb")) - self._copy_file(join("files", "submitted-changed.ipynb"), join(course_dir, "submitted", "bar", "ps1", "p1.ipynb")) - run_nbgrader(["autograde", "ps1", "--db", db]) - - assert os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "p1.ipynb")) - assert not os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "timestamp.txt")) - assert os.path.isfile(join(course_dir, "autograded", "bar", "ps1", "p1.ipynb")) - assert not os.path.isfile(join(course_dir, "autograded", "bar", "ps1", "timestamp.txt")) - - with Gradebook(db) as gb: - notebook = gb.find_submission_notebook("p1", "ps1", "foo") - assert notebook.score == 1 - assert notebook.max_score == 7 - assert notebook.needs_manual_grade == False - - comment1 = gb.find_comment("set_a", "p1", "ps1", "foo") - comment2 = gb.find_comment("baz", "p1", "ps1", "foo") - comment3 = gb.find_comment("quux", "p1", "ps1", "foo") - assert comment1.comment == "No response." - assert comment2.comment == "No response." - assert comment3.comment == "No response." - - notebook = gb.find_submission_notebook("p1", "ps1", "bar") - assert notebook.score == 2 - assert notebook.max_score == 7 - assert notebook.needs_manual_grade == True - - comment1 = gb.find_comment("set_a", "p1", "ps1", "bar") - comment2 = gb.find_comment("baz", "p1", "ps1", "bar") - comment2 = gb.find_comment("quux", "p1", "ps1", "bar") - assert comment1.comment == None - assert comment2.comment == None - - def test_showtraceback_exploit(self, db, course_dir): - """Can students exploit showtraceback to hide errors from all future cell outputs to receive free points for incorrect cells?""" - run_nbgrader(["db", "assignment", "add", "ps1", "--db", db, "--duedate", "2015-02-02 14:58:23.948203 America/Los_Angeles"]) - run_nbgrader(["db", "student", "add", "foo", "--db", db]) - run_nbgrader(["db", "student", "add", "bar", "--db", db]) - run_nbgrader(["db", "student", "add", "spam", "--db", db]) - run_nbgrader(["db", "student", "add", "eggs", "--db", db]) - - self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb")) - run_nbgrader(["generate_assignment", "ps1", "--db", db]) - - # This exploit previously caused cell executions that would indefinitely hang. - # See: https://github.com/ipython/ipython/commit/fd34cf5 - with open("nbgrader_config.py", "a") as fh: - fh.write("""c.Execute.timeout = None""") - - self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p1.ipynb")) - self._copy_file(join("files", "submitted-changed.ipynb"), join(course_dir, "submitted", "bar", "ps1", "p1.ipynb")) - self._copy_file(join("files", "submitted-cheat-attempt.ipynb"), join(course_dir, "submitted", "spam", "ps1", "p1.ipynb")) - self._copy_file(join("files", "submitted-cheat-attempt-alternative.ipynb"), join(course_dir, "submitted", "eggs", "ps1", "p1.ipynb")) - run_nbgrader(["autograde", "ps1", "--db", db]) - - assert os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "p1.ipynb")) - assert not os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "timestamp.txt")) - assert os.path.isfile(join(course_dir, "autograded", "bar", "ps1", "p1.ipynb")) - assert not os.path.isfile(join(course_dir, "autograded", "bar", "ps1", "timestamp.txt")) - assert os.path.isfile(join(course_dir, "autograded", "spam", "ps1", "p1.ipynb")) - assert not os.path.isfile(join(course_dir, "autograded", "spam", "ps1", "timestamp.txt")) - assert os.path.isfile(join(course_dir, "autograded", "eggs", "ps1", "p1.ipynb")) - assert not os.path.isfile(join(course_dir, "autograded", "eggs", "ps1", "timestamp.txt")) - - with Gradebook(db) as gb: - notebook = gb.find_submission_notebook("p1", "ps1", "foo") - assert notebook.score == 1 - assert notebook.max_score == 7 - assert notebook.needs_manual_grade == False - - notebook = gb.find_submission_notebook("p1", "ps1", "bar") - assert notebook.score == 2 - assert notebook.max_score == 7 - assert notebook.needs_manual_grade == True - - notebook = gb.find_submission_notebook("p1", "ps1", "spam") - assert notebook.score == 1 - assert notebook.max_score == 7 - assert notebook.needs_manual_grade == True - - notebook = gb.find_submission_notebook("p1", "ps1", "eggs") - assert notebook.score == 1 - assert notebook.max_score == 7 - assert notebook.needs_manual_grade == True - - def test_student_id_exclude(self, db, course_dir): - """Does --CourseDirectory.student_id_exclude=X exclude students?""" - run_nbgrader(["db", "assignment", "add", "ps1", "--db", db, "--duedate", - "2015-02-02 14:58:23.948203 America/Los_Angeles"]) - run_nbgrader(["db", "student", "add", "foo", "--db", db]) - run_nbgrader(["db", "student", "add", "bar", "--db", db]) - - self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb")) - run_nbgrader(["generate_assignment", "ps1", "--db", db]) - - self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p1.ipynb")) - self._copy_file(join("files", "submitted-changed.ipynb"), join(course_dir, "submitted", "bar", "ps1", "p1.ipynb")) - self._copy_file(join("files", "submitted-changed.ipynb"), join(course_dir, "submitted", "baz", "ps1", "p1.ipynb")) - run_nbgrader(["autograde", "ps1", "--db", db, '--CourseDirectory.student_id_exclude=bar,baz']) - - assert os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "p1.ipynb")) - assert not os.path.isfile(join(course_dir, "autograded", "bar", "ps1", "p1.ipynb")) - assert not os.path.isfile(join(course_dir, "autograded", "baz", "ps1", "p1.ipynb")) - - with Gradebook(db) as gb: - notebook = gb.find_submission_notebook("p1", "ps1", "foo") - assert notebook.score == 1 - - with pytest.raises(MissingEntry): - notebook = gb.find_submission_notebook("p1", "ps1", "bar") - with pytest.raises(MissingEntry): - notebook = gb.find_submission_notebook("p1", "ps1", "baz") - - def test_grade_timestamp(self, db: str, course_dir: str) -> None: - """Is a timestamp correctly read in?""" - run_nbgrader(["db", "assignment", "add", "ps1", "--db", db, "--duedate", - "2015-02-02 14:58:23.948203 America/Los_Angeles"]) - run_nbgrader(["db", "student", "add", "foo", "--db", db]) - run_nbgrader(["db", "student", "add", "bar", "--db", db]) - - self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb")) - run_nbgrader(["generate_assignment", "ps1", "--db", db]) - - self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p1.ipynb")) - self._make_file(join(course_dir, "submitted", "foo", "ps1", "timestamp.txt"), "2015-02-02 15:58:23.948203 America/Los_Angeles") - - self._copy_file(join("files", "submitted-changed.ipynb"), join(course_dir, "submitted", "bar", "ps1", "p1.ipynb")) - self._make_file(join(course_dir, "submitted", "bar", "ps1", "timestamp.txt"), "2015-02-01 14:58:23.948203 America/Los_Angeles") - - run_nbgrader(["autograde", "ps1", "--db", db]) - - assert os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "p1.ipynb")) - assert os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "timestamp.txt")) - assert os.path.isfile(join(course_dir, "autograded", "bar", "ps1", "p1.ipynb")) - assert os.path.isfile(join(course_dir, "autograded", "bar", "ps1", "timestamp.txt")) - - with Gradebook(db) as gb: - submission = gb.find_submission("ps1", "foo") - assert submission.total_seconds_late > 0 - submission = gb.find_submission("ps1", "bar") - assert submission.total_seconds_late == 0 - - # make sure it still works to run it a second time - run_nbgrader(["autograde", "ps1", "--db", db]) - - def test_grade_empty_timestamp(self, db: str, course_dir: str) -> None: - """Issue #580 - Does the autograder handle empty or invalid timestamp - strings""" - run_nbgrader(["db", "assignment", "add", "ps1", "--db", db, "--duedate", - "2015-02-02 14:58:23.948203 America/Los_Angeles"]) - run_nbgrader(["db", "student", "add", "foo", "--db", db]) - run_nbgrader(["db", "student", "add", "bar", "--db", db]) - - - self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb")) - run_nbgrader(["generate_assignment", "ps1", "--db", db]) - - self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p1.ipynb")) - self._make_file(join(course_dir, "submitted", "foo", "ps1", "timestamp.txt"), "") - run_nbgrader(["autograde", "ps1", "--db", db]) - - assert os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "p1.ipynb")) - assert os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "timestamp.txt")) - - with Gradebook(db) as gb: - submission = gb.find_submission("ps1", "foo") - assert submission.total_seconds_late == 0 - - invalid_timestamp = "But I want to be a timestamp string :(" - self._copy_file(join("files", "submitted-changed.ipynb"), join(course_dir, "submitted", "bar", "ps1", "p1.ipynb")) - self._make_file(join(course_dir, "submitted", "bar", "ps1", "timestamp.txt"), invalid_timestamp) - run_nbgrader(["autograde", "ps1", "--db", db], retcode=1) - - def test_late_submission_penalty_none(self, db: str, course_dir: str) -> None: - """Does 'none' method do nothing?""" - run_nbgrader(["db", "assignment", "add", "ps1", "--db", db, "--duedate", - "2015-02-02 14:58:23.948203 America/Los_Angeles"]) - run_nbgrader(["db", "student", "add", "foo", "--db", db]) - run_nbgrader(["db", "student", "add", "bar", "--db", db]) - - self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb")) - run_nbgrader(["generate_assignment", "ps1", "--db", db]) - - # not late - self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p1.ipynb")) - self._make_file(join(course_dir, "submitted", "foo", "ps1", "timestamp.txt"), "2015-02-02 14:58:23.948203 America/Los_Angeles") - - # 1h late - self._copy_file(join("files", "submitted-changed.ipynb"), join(course_dir, "submitted", "bar", "ps1", "p1.ipynb")) - self._make_file(join(course_dir, "submitted", "bar", "ps1", "timestamp.txt"), "2015-02-02 15:58:23.948203 America/Los_Angeles") - - run_nbgrader(["autograde", "ps1", "--db", db]) - - assert os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "p1.ipynb")) - assert os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "timestamp.txt")) - assert os.path.isfile(join(course_dir, "autograded", "bar", "ps1", "p1.ipynb")) - assert os.path.isfile(join(course_dir, "autograded", "bar", "ps1", "timestamp.txt")) - - with Gradebook(db) as gb: - # not late - submission = gb.find_submission("ps1", "foo") - nb = submission.notebooks[0] - assert nb.score == 1 - assert submission.total_seconds_late == 0 - assert nb.late_submission_penalty == None - - # 1h late - submission = gb.find_submission("ps1", "bar") - nb = submission.notebooks[0] - assert nb.score == 2 - assert submission.total_seconds_late > 0 - assert nb.late_submission_penalty == None - - def test_late_submission_penalty_zero(self, db: str, course_dir: str) -> None: - """Does 'zero' method assign notebook.score as penalty if late?""" - run_nbgrader(["db", "assignment", "add", "ps1", "--db", db, "--duedate", - "2015-02-02 14:58:23.948203 America/Los_Angeles"]) - run_nbgrader(["db", "student", "add", "foo", "--db", db]) - run_nbgrader(["db", "student", "add", "bar", "--db", db]) - with open("nbgrader_config.py", "a") as fh: - fh.write("""c.LateSubmissionPlugin.penalty_method = 'zero'""") - - self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb")) - run_nbgrader(["generate_assignment", "ps1", "--db", db]) - - # not late - self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p1.ipynb")) - self._make_file(join(course_dir, "submitted", "foo", "ps1", "timestamp.txt"), "2015-02-02 14:58:23.948203 America/Los_Angeles") - - # 1h late - self._copy_file(join("files", "submitted-changed.ipynb"), join(course_dir, "submitted", "bar", "ps1", "p1.ipynb")) - self._make_file(join(course_dir, "submitted", "bar", "ps1", "timestamp.txt"), "2015-02-02 15:58:23.948203 America/Los_Angeles") - - run_nbgrader(["autograde", "ps1", "--db", db]) - - assert os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "p1.ipynb")) - assert os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "timestamp.txt")) - assert os.path.isfile(join(course_dir, "autograded", "bar", "ps1", "p1.ipynb")) - assert os.path.isfile(join(course_dir, "autograded", "bar", "ps1", "timestamp.txt")) - - with Gradebook(db) as gb: - # not late - submission = gb.find_submission("ps1", "foo") - nb = submission.notebooks[0] - assert nb.score == 1 - assert submission.total_seconds_late == 0 - assert nb.late_submission_penalty == None - - # 1h late - submission = gb.find_submission("ps1", "bar") - nb = submission.notebooks[0] - assert nb.score == 2 - assert submission.total_seconds_late > 0 - assert nb.late_submission_penalty == nb.score - - # Issue 723 - check penalty is reset if timestamp changed - self._make_file(join(course_dir, "submitted", "bar", "ps1", "timestamp.txt"), "2015-02-02 14:58:23.948203 America/Los_Angeles") - run_nbgrader(["autograde", "--force", "ps1", "--db", db]) - - with Gradebook(db) as gb: - # no longer late - submission = gb.find_submission("ps1", "bar") - nb = submission.notebooks[0] - assert nb.score == 2 - assert submission.total_seconds_late == 0 - assert nb.late_submission_penalty == None - - def test_late_submission_penalty_plugin(self, db: str, course_dir: str) -> None: - """Does plugin set 1 point per hour late penalty?""" - - plugin = dedent(""" - from __future__ import division - from nbgrader.plugins import BasePlugin - - class Blarg(BasePlugin): - def late_submission_penalty(self, student_id, score, total_seconds_late): - # loss 1 mark per hour late - hours_late = total_seconds_late / 3600 - return int(hours_late) - """) - - with open("late_plugin.py", 'w') as fh: - fh.write(plugin) - - run_nbgrader(["db", "assignment", "add", "ps1", "--db", db, "--duedate", - "2015-02-02 14:58:23.948203 America/Los_Angeles"]) - run_nbgrader(["db", "student", "add", "foo", "--db", db]) - run_nbgrader(["db", "student", "add", "bar", "--db", db]) - with open("nbgrader_config.py", "a") as fh: - fh.write("""c.AssignLatePenalties.plugin_class = 'late_plugin.Blarg'""") - - self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb")) - run_nbgrader(["generate_assignment", "ps1", "--db", db]) - - # 4h late - self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p1.ipynb")) - self._make_file(join(course_dir, "submitted", "foo", "ps1", "timestamp.txt"), "2015-02-02 18:58:23.948203 America/Los_Angeles") - - # 1h late - self._copy_file(join("files", "submitted-changed.ipynb"), join(course_dir, "submitted", "bar", "ps1", "p1.ipynb")) - self._make_file(join(course_dir, "submitted", "bar", "ps1", "timestamp.txt"), "2015-02-02 15:58:23.948203 America/Los_Angeles") - - run_nbgrader(["autograde", "ps1", "--db", db]) - - assert os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "p1.ipynb")) - assert os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "timestamp.txt")) - assert os.path.isfile(join(course_dir, "autograded", "bar", "ps1", "p1.ipynb")) - assert os.path.isfile(join(course_dir, "autograded", "bar", "ps1", "timestamp.txt")) - - with Gradebook(db) as gb: - # 4h late - submission = gb.find_submission("ps1", "foo") - nb = submission.notebooks[0] - assert nb.score == 1 - assert submission.total_seconds_late > 0 - assert nb.late_submission_penalty == nb.score - - # 1h late - submission = gb.find_submission("ps1", "bar") - nb = submission.notebooks[0] - assert nb.score == 2 - assert submission.total_seconds_late > 0 - assert nb.late_submission_penalty == 1 - - def test_force(self, db: str, course_dir: str) -> None: - """Ensure the force option works properly""" - run_nbgrader(["db", "assignment", "add", "ps1", "--db", db, "--duedate", - "2015-02-02 14:58:23.948203 America/Los_Angeles"]) - run_nbgrader(["db", "student", "add", "foo", "--db", db]) - run_nbgrader(["db", "student", "add", "bar", "--db", db]) - - self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb")) - self._make_file(join(course_dir, "source", "ps1", "foo.txt"), "foo") - self._make_file(join(course_dir, "source", "ps1", "data", "bar.txt"), "bar") - run_nbgrader(["generate_assignment", "ps1", "--db", db]) - - self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p1.ipynb")) - self._make_file(join(course_dir, "submitted", "foo", "ps1", "foo.txt"), "foo") - self._make_file(join(course_dir, "submitted", "foo", "ps1", "data", "bar.txt"), "bar") - self._make_file(join(course_dir, "submitted", "foo", "ps1", "blah.pyc"), "asdf") - run_nbgrader(["autograde", "ps1", "--db", db]) - - assert os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "p1.ipynb")) - assert os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "foo.txt")) - assert os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "data", "bar.txt")) - assert not os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "blah.pyc")) - - # check that it skips the existing directory - remove(join(course_dir, "autograded", "foo", "ps1", "foo.txt")) - run_nbgrader(["autograde", "ps1", "--db", db]) - assert not os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "foo.txt")) - - # force overwrite the supplemental files - run_nbgrader(["autograde", "ps1", "--db", db, "--force"]) - assert os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "foo.txt")) - - # force overwrite - remove(join(course_dir, "source", "ps1", "foo.txt")) - remove(join(course_dir, "submitted", "foo", "ps1", "foo.txt")) - run_nbgrader(["autograde", "ps1", "--db", db, "--force"]) - assert os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "p1.ipynb")) - assert not os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "foo.txt")) - assert os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "data", "bar.txt")) - assert not os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "blah.pyc")) - - def test_force_f(self, db: str, course_dir: str) -> None: - """Ensure the force option works properly""" - run_nbgrader(["db", "assignment", "add", "ps1", "--db", db, "--duedate", - "2015-02-02 14:58:23.948203 America/Los_Angeles"]) - run_nbgrader(["db", "student", "add", "foo", "--db", db]) - run_nbgrader(["db", "student", "add", "bar", "--db", db]) - - self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb")) - self._make_file(join(course_dir, "source", "ps1", "foo.txt"), "foo") - self._make_file(join(course_dir, "source", "ps1", "data", "bar.txt"), "bar") - run_nbgrader(["generate_assignment", "ps1", "--db", db]) - - self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p1.ipynb")) - self._make_file(join(course_dir, "submitted", "foo", "ps1", "foo.txt"), "foo") - self._make_file(join(course_dir, "submitted", "foo", "ps1", "data", "bar.txt"), "bar") - self._make_file(join(course_dir, "submitted", "foo", "ps1", "blah.pyc"), "asdf") - run_nbgrader(["autograde", "ps1", "--db", db]) - - assert os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "p1.ipynb")) - assert os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "foo.txt")) - assert os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "data", "bar.txt")) - assert not os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "blah.pyc")) - - # check that it skips the existing directory - remove(join(course_dir, "autograded", "foo", "ps1", "foo.txt")) - run_nbgrader(["autograde", "ps1", "--db", db]) - assert not os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "foo.txt")) - - # force overwrite the supplemental files - run_nbgrader(["autograde", "ps1", "--db", db, "-f"]) - assert os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "foo.txt")) - - # force overwrite - remove(join(course_dir, "source", "ps1", "foo.txt")) - remove(join(course_dir, "submitted", "foo", "ps1", "foo.txt")) - run_nbgrader(["autograde", "ps1", "--db", db, "-f"]) - assert os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "p1.ipynb")) - assert not os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "foo.txt")) - assert os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "data", "bar.txt")) - assert not os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "blah.pyc")) - - def test_filter_notebook(self, db: str, course_dir: str) -> None: - """Does autograding filter by notebook properly?""" - run_nbgrader(["db", "assignment", "add", "ps1", "--db", db, "--duedate", - "2015-02-02 14:58:23.948203 America/Los_Angeles"]) - run_nbgrader(["db", "student", "add", "foo", "--db", db]) - run_nbgrader(["db", "student", "add", "bar", "--db", db]) - - self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb")) - self._make_file(join(course_dir, "source", "ps1", "foo.txt"), "foo") - self._make_file(join(course_dir, "source", "ps1", "data", "bar.txt"), "bar") - run_nbgrader(["generate_assignment", "ps1", "--db", db]) - - self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p1.ipynb")) - self._make_file(join(course_dir, "submitted", "foo", "ps1", "foo.txt"), "foo") - self._make_file(join(course_dir, "submitted", "foo", "ps1", "data", "bar.txt"), "bar") - self._make_file(join(course_dir, "submitted", "foo", "ps1", "blah.pyc"), "asdf") - run_nbgrader(["autograde", "ps1", "--db", db, "--notebook", "p1"]) - - assert os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "p1.ipynb")) - assert os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "foo.txt")) - assert os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "data", "bar.txt")) - assert not os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "blah.pyc")) - - # check that removing the notebook still causes the autograder to run - remove(join(course_dir, "autograded", "foo", "ps1", "p1.ipynb")) - remove(join(course_dir, "autograded", "foo", "ps1", "foo.txt")) - run_nbgrader(["autograde", "ps1", "--db", db, "--notebook", "p1"]) - - assert os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "p1.ipynb")) - assert os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "foo.txt")) - assert os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "data", "bar.txt")) - assert not os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "blah.pyc")) - - # check that running it again doesn"t do anything - remove(join(course_dir, "autograded", "foo", "ps1", "foo.txt")) - run_nbgrader(["autograde", "ps1", "--db", db, "--notebook", "p1"]) - - assert os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "p1.ipynb")) - assert not os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "foo.txt")) - assert os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "data", "bar.txt")) - assert not os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "blah.pyc")) - - # check that removing the notebook doesn"t caus the autograder to run - remove(join(course_dir, "autograded", "foo", "ps1", "p1.ipynb")) - run_nbgrader(["autograde", "ps1", "--db", db]) - - assert not os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "p1.ipynb")) - assert not os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "foo.txt")) - assert os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "data", "bar.txt")) - assert not os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "blah.pyc")) - - def test_grade_overwrite_files(self, db: str, course_dir: str) -> None: - """Are dependent files properly linked and overwritten?""" - run_nbgrader(["db", "assignment", "add", "ps1", "--db", db, "--duedate", - "2015-02-02 14:58:23.948203 America/Los_Angeles"]) - run_nbgrader(["db", "student", "add", "foo", "--db", db]) - run_nbgrader(["db", "student", "add", "bar", "--db", db]) - with open("nbgrader_config.py", "a") as fh: - fh.write("""c.Autograde.exclude_overwriting = {"ps1": ["helper.py"]}\n""") - - self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb")) - self._make_file(join(course_dir, "source", "ps1", "data.csv"), "some,data\n") - self._make_file(join(course_dir, "source", "ps1", "helper.py"), "print('hello!')\n") - run_nbgrader(["generate_assignment", "ps1", "--db", db]) - - self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p1.ipynb")) - self._make_file(join(course_dir, "submitted", "foo", "ps1", "timestamp.txt"), "2015-02-02 15:58:23.948203 America/Los_Angeles") - self._make_file(join(course_dir, "submitted", "foo", "ps1", "data.csv"), "some,other,data\n") - self._make_file(join(course_dir, "submitted", "foo", "ps1", "helper.py"), "print('this is different!')\n") - run_nbgrader(["autograde", "ps1", "--db", db]) - - assert os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "p1.ipynb")) - assert os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "timestamp.txt")) - assert os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "data.csv")) - assert os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "helper.py")) - - with open(join(course_dir, "autograded", "foo", "ps1", "timestamp.txt"), "r") as fh: - contents = fh.read() - assert contents == "2015-02-02 15:58:23.948203 America/Los_Angeles" - - with open(join(course_dir, "autograded", "foo", "ps1", "data.csv"), "r") as fh: - contents = fh.read() - assert contents == "some,data\n" - - with open(join(course_dir, "autograded", "foo", "ps1", "helper.py"), "r") as fh: - contents = fh.read() - assert contents == "print('this is different!')\n" - - def test_grade_overwrite_files_subdirs(self, db: str, course_dir: str) -> None: - """Are dependent files properly linked and overwritten?""" - run_nbgrader(["db", "assignment", "add", "ps1", "--db", db, "--duedate", - "2015-02-02 14:58:23.948203 America/Los_Angeles"]) - run_nbgrader(["db", "student", "add", "foo", "--db", db]) - run_nbgrader(["db", "student", "add", "bar", "--db", db]) - with open("nbgrader_config.py", "a") as fh: - fh.write("""c.Autograde.exclude_overwriting = {{"ps1": ["{}"]}}\n""".format(os.path.join("subdir", "helper.py"))) - - self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb")) - self._make_file(join(course_dir, "source", "ps1", "subdir", "data.csv"), "some,data\n") - self._make_file(join(course_dir, "source", "ps1", "subdir", "helper.py"), "print('hello!')\n") - run_nbgrader(["generate_assignment", "ps1", "--db", db]) - - self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p1.ipynb")) - self._make_file(join(course_dir, "submitted", "foo", "ps1", "timestamp.txt"), "2015-02-02 15:58:23.948203 America/Los_Angeles") - self._make_file(join(course_dir, "submitted", "foo", "ps1", "subdir", "data.csv"), "some,other,data\n") - self._make_file(join(course_dir, "submitted", "foo", "ps1", "subdir", "helper.py"), "print('this is different!')\n") - run_nbgrader(["autograde", "ps1", "--db", db]) - - assert os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "p1.ipynb")) - assert os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "timestamp.txt")) - assert os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "subdir", "data.csv")) - assert os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "subdir", "helper.py")) - - with open(join(course_dir, "autograded", "foo", "ps1", "timestamp.txt"), "r") as fh: - contents = fh.read() - assert contents == "2015-02-02 15:58:23.948203 America/Los_Angeles" - - with open(join(course_dir, "autograded", "foo", "ps1", "subdir", "data.csv"), "r") as fh: - contents = fh.read() - assert contents == "some,data\n" - - with open(join(course_dir, "autograded", "foo", "ps1", "subdir", "helper.py"), "r") as fh: - contents = fh.read() - assert contents == "print('this is different!')\n" - - def test_side_effects(self, db: str, course_dir: str) -> None: - run_nbgrader(["db", "assignment", "add", "ps1", "--db", db, "--duedate", - "2015-02-02 14:58:23.948203 America/Los_Angeles"]) - run_nbgrader(["db", "student", "add", "foo", "--db", db]) - run_nbgrader(["db", "student", "add", "bar", "--db", db]) - - self._copy_file(join("files", "side-effects.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb")) - run_nbgrader(["generate_assignment", "ps1", "--db", db]) - - self._copy_file(join("files", "side-effects.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p1.ipynb")) - run_nbgrader(["autograde", "ps1", "--db", db]) - - assert os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "side-effect.txt")) - assert not os.path.isfile(join(course_dir, "submitted", "foo", "ps1", "side-effect.txt")) - - def test_skip_extra_notebooks(self, db, course_dir): - run_nbgrader(["db", "assignment", "add", "ps1", "--db", db, "--duedate", - "2015-02-02 14:58:23.948203 America/Los_Angeles"]) - run_nbgrader(["db", "student", "add", "foo", "--db", db]) - run_nbgrader(["db", "student", "add", "bar", "--db", db]) - - self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb")) - run_nbgrader(["generate_assignment", "ps1", "--db", db]) - - self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p1 copy.ipynb")) - self._copy_file(join("files", "submitted-changed.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p1.ipynb")) - run_nbgrader(["autograde", "ps1", "--db", db]) - - assert os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "p1.ipynb")) - assert not os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "p1 copy.ipynb")) - - @pytest.mark.parametrize("groupshared", [False, True]) - def test_permissions(self, course_dir, groupshared): - """Are permissions properly set?""" - run_nbgrader(["db", "assignment", "add", "ps1"]) - run_nbgrader(["db", "student", "add", "foo"]) - run_nbgrader(["db", "student", "add", "bar"]) - with open("nbgrader_config.py", "a") as fh: - if groupshared: - fh.write("""c.CourseDirectory.groupshared = True\n""") - - self._empty_notebook(join(course_dir, "source", "ps1", "foo.ipynb")) - self._make_file(join(course_dir, "source", "ps1", "foo.txt"), "foo") - run_nbgrader(["generate_assignment", "ps1"]) - - self._empty_notebook(join(course_dir, "submitted", "foo", "ps1", "foo.ipynb")) - self._make_file(join(course_dir, "source", "foo", "ps1", "foo.txt"), "foo") - run_nbgrader(["autograde", "ps1"]) - - if not groupshared: - if sys.platform == 'win32': - perms = '444' - else: - perms = '444' - else: - if sys.platform == 'win32': - perms = '666' - dirperms = '777' - else: - perms = '664' - dirperms = '2775' - - assert os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "foo.ipynb")) - assert os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "foo.txt")) - if groupshared: - # non-groupshared doesn't make guarantees about directory perms - assert self._get_permissions(join(course_dir, "autograded", "foo", "ps1")) == dirperms - assert self._get_permissions(join(course_dir, "autograded", "foo", "ps1", "foo.ipynb")) == perms - assert self._get_permissions(join(course_dir, "autograded", "foo", "ps1", "foo.txt")) == perms - - def test_custom_permissions(self, course_dir): - """Are custom permissions properly set?""" - run_nbgrader(["db", "assignment", "add", "ps1", "--duedate", - "2015-02-02 14:58:23.948203 America/Los_Angeles"]) - run_nbgrader(["db", "student", "add", "foo"]) - run_nbgrader(["db", "student", "add", "bar"]) - - self._empty_notebook(join(course_dir, "source", "ps1", "foo.ipynb")) - self._make_file(join(course_dir, "source", "ps1", "foo.txt"), "foo") - run_nbgrader(["generate_assignment", "ps1"]) - - self._empty_notebook(join(course_dir, "submitted", "foo", "ps1", "foo.ipynb")) - self._make_file(join(course_dir, "source", "foo", "ps1", "foo.txt"), "foo") - run_nbgrader(["autograde", "ps1", "--AutogradeApp.permissions=644"]) - - if sys.platform == 'win32': - perms = '666' - else: - perms = '644' - - assert os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "foo.ipynb")) - assert os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "foo.txt")) - assert self._get_permissions(join(course_dir, "autograded", "foo", "ps1", "foo.ipynb")) == perms - assert self._get_permissions(join(course_dir, "autograded", "foo", "ps1", "foo.txt")) == perms - - def test_force_single_notebook(self, course_dir): - run_nbgrader(["db", "assignment", "add", "ps1", "--duedate", - "2015-02-02 14:58:23.948203 America/Los_Angeles"]) - run_nbgrader(["db", "student", "add", "foo"]) - run_nbgrader(["db", "student", "add", "bar"]) - - self._copy_file(join("files", "test.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb")) - self._copy_file(join("files", "test.ipynb"), join(course_dir, "source", "ps1", "p2.ipynb")) - run_nbgrader(["generate_assignment", "ps1"]) - - self._copy_file(join("files", "test.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p1.ipynb")) - self._copy_file(join("files", "test.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p2.ipynb")) - run_nbgrader(["autograde", "ps1", - "--ClearMetadataPreprocessor.enabled=True", - "--ClearMetadataPreprocessor.clear_notebook_metadata=False", - "--ClearMetadataPreprocessor.preserve_cell_metadata_mask=[('nbgrader')]" - ]) - - assert os.path.exists(join(course_dir, "autograded", "foo", "ps1", "p1.ipynb")) - assert os.path.exists(join(course_dir, "autograded", "foo", "ps1", "p2.ipynb")) - p1 = self._file_contents(join(course_dir, "autograded", "foo", "ps1", "p1.ipynb")) - p2 = self._file_contents(join(course_dir, "autograded", "foo", "ps1", "p2.ipynb")) - assert p1 == p2 - - self._empty_notebook(join(course_dir, "submitted", "foo", "ps1", "p1.ipynb")) - self._empty_notebook(join(course_dir, "submitted", "foo", "ps1", "p2.ipynb")) - run_nbgrader(["autograde", "ps1", "--notebook", "p1", "--force"]) - - assert os.path.exists(join(course_dir, "autograded", "foo", "ps1", "p1.ipynb")) - assert os.path.exists(join(course_dir, "autograded", "foo", "ps1", "p2.ipynb")) - assert p1 != self._file_contents(join(course_dir, "autograded", "foo", "ps1", "p1.ipynb")) - assert p2 == self._file_contents(join(course_dir, "autograded", "foo", "ps1", "p2.ipynb")) - - def test_update_newer(self, course_dir): - run_nbgrader(["db", "assignment", "add", "ps1", "--duedate", - "2015-02-02 14:58:23.948203 America/Los_Angeles"]) - run_nbgrader(["db", "student", "add", "foo"]) - run_nbgrader(["db", "student", "add", "bar"]) - - self._copy_file(join("files", "test.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb")) - run_nbgrader(["generate_assignment", "ps1"]) - - self._copy_file(join("files", "test.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p1.ipynb")) - self._make_file(join(course_dir, "submitted", "foo", "ps1", "timestamp.txt"), "2015-02-02 15:58:23.948203 America/Los_Angeles") - run_nbgrader(["autograde", "ps1"]) - - assert os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "p1.ipynb")) - assert os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "timestamp.txt")) - assert self._file_contents(join(course_dir, "autograded", "foo", "ps1", "timestamp.txt")) == "2015-02-02 15:58:23.948203 America/Los_Angeles" - p = self._file_contents(join(course_dir, "autograded", "foo", "ps1", "p1.ipynb")) - - self._empty_notebook(join(course_dir, "submitted", "foo", "ps1", "p1.ipynb")) - self._make_file(join(course_dir, "submitted", "foo", "ps1", "timestamp.txt"), "2015-02-02 16:58:23.948203 America/Los_Angeles") - run_nbgrader(["autograde", "ps1"]) - - assert os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "p1.ipynb")) - assert os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "timestamp.txt")) - assert self._file_contents(join(course_dir, "autograded", "foo", "ps1", "timestamp.txt")) == "2015-02-02 16:58:23.948203 America/Los_Angeles" - assert p != self._file_contents(join(course_dir, "autograded", "foo", "ps1", "p1.ipynb")) - - def test_update_newer_single_notebook(self, course_dir): - run_nbgrader(["db", "assignment", "add", "ps1", "--duedate", - "2015-02-02 14:58:23.948203 America/Los_Angeles"]) - run_nbgrader(["db", "student", "add", "foo"]) - run_nbgrader(["db", "student", "add", "bar"]) - - self._copy_file(join("files", "test.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb")) - self._copy_file(join("files", "test.ipynb"), join(course_dir, "source", "ps1", "p2.ipynb")) - run_nbgrader(["generate_assignment", "ps1"]) - - self._copy_file(join("files", "test.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p1.ipynb")) - self._copy_file(join("files", "test.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p2.ipynb")) - self._make_file(join(course_dir, "submitted", "foo", "ps1", "timestamp.txt"), "2015-02-02 15:58:23.948203 America/Los_Angeles") - run_nbgrader(["autograde", "ps1", - "--ClearMetadataPreprocessor.enabled=True", - "--ClearMetadataPreprocessor.clear_notebook_metadata=False", - "--ClearMetadataPreprocessor.preserve_cell_metadata_mask=[('nbgrader')]" - ]) - - assert os.path.exists(join(course_dir, "autograded", "foo", "ps1", "p1.ipynb")) - assert os.path.exists(join(course_dir, "autograded", "foo", "ps1", "p2.ipynb")) - assert os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "timestamp.txt")) - assert self._file_contents(join(course_dir, "autograded", "foo", "ps1", "timestamp.txt")) == "2015-02-02 15:58:23.948203 America/Los_Angeles" - p1 = self._file_contents(join(course_dir, "autograded", "foo", "ps1", "p1.ipynb")) - p2 = self._file_contents(join(course_dir, "autograded", "foo", "ps1", "p2.ipynb")) - assert p1 == p2 - - self._empty_notebook(join(course_dir, "submitted", "foo", "ps1", "p1.ipynb")) - self._empty_notebook(join(course_dir, "submitted", "foo", "ps1", "p2.ipynb")) - self._make_file(join(course_dir, "submitted", "foo", "ps1", "timestamp.txt"), "2015-02-02 16:58:23.948203 America/Los_Angeles") - run_nbgrader(["autograde", "ps1", "--notebook", "p1"]) - - assert os.path.exists(join(course_dir, "autograded", "foo", "ps1", "p1.ipynb")) - assert os.path.exists(join(course_dir, "autograded", "foo", "ps1", "p2.ipynb")) - assert os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "timestamp.txt")) - assert self._file_contents(join(course_dir, "autograded", "foo", "ps1", "timestamp.txt")) == "2015-02-02 16:58:23.948203 America/Los_Angeles" - assert p1 != self._file_contents(join(course_dir, "autograded", "foo", "ps1", "p1.ipynb")) - assert p2 == self._file_contents(join(course_dir, "autograded", "foo", "ps1", "p2.ipynb")) - - def test_hidden_tests_single_notebook(self, db, course_dir): - run_nbgrader(["db", "assignment", "add", "ps1", "--db", db, "--duedate", - "2015-02-02 14:58:23.948203 America/Los_Angeles"]) - run_nbgrader(["db", "student", "add", "foo", "--db", db]) - run_nbgrader(["db", "student", "add", "bar", "--db", db]) - with open("nbgrader_config.py", "a") as fh: - fh.write("""c.ClearSolutions.code_stub=dict(python="# YOUR CODE HERE")""") - - self._copy_file( - join("files", "test-hidden-tests.ipynb"), - join(course_dir, "source", "ps1", "p1.ipynb") - ) - # test-hidden-tests.ipynb contains vizable solutions that pass - # vizable tests, but fail on hidden tests - - run_nbgrader(["generate_assignment", "ps1", "--db", db]) - - # make sure hidden tests are removed in release - with io.open(join(course_dir, "release", "ps1", "p1.ipynb"), mode='r', encoding='utf-8') as nb: - source = nb.read() - assert "BEGIN HIDDEN TESTS" not in source - - self._copy_file( - join(course_dir, "release", "ps1", "p1.ipynb"), - join(course_dir, "submitted", "foo", "ps1", "p1.ipynb") - ) - - # make sure submitted validates, should only fail on hidden tests - output = run_nbgrader([ - "validate", join(course_dir, "submitted", "foo", "ps1", "p1.ipynb") - ], stdout=True) - assert output.strip() == "Success! Your notebook passes all the tests." - - run_nbgrader(["autograde", "ps1", "--db", db]) - assert os.path.exists(join(course_dir, "autograded", "foo", "ps1", "p1.ipynb")) - - # make sure hidden tests are placed back in autograded - sub_nb = join(course_dir, "autograded", "foo", "ps1", "p1.ipynb") - with io.open(sub_nb, mode='r', encoding='utf-8') as nb: - source = nb.read() - assert "BEGIN HIDDEN TESTS" in source - - # make sure autograded does not validate, should fail on hidden tests - output = run_nbgrader([ - "validate", join(course_dir, "autograded", "foo", "ps1", "p1.ipynb"), - ], stdout=True) - assert output.splitlines()[0] == ( - "VALIDATION FAILED ON 2 CELL(S)! If you submit your assignment " - "as it is, you WILL NOT" - ) - - with Gradebook(db) as gb: - submission = gb.find_submission("ps1", "foo") - nb1 = submission.notebooks[0] - assert nb1.score == 1.5 - - def test_handle_failure(self, course_dir): - run_nbgrader(["db", "assignment", "add", "ps1", "--duedate", - "2015-02-02 14:58:23.948203 America/Los_Angeles"]) - run_nbgrader(["db", "student", "add", "foo"]) - run_nbgrader(["db", "student", "add", "bar"]) - - self._empty_notebook(join(course_dir, "source", "ps1", "p1.ipynb")) - self._empty_notebook(join(course_dir, "source", "ps1", "p2.ipynb")) - run_nbgrader(["generate_assignment", "ps1"]) - - self._empty_notebook(join(course_dir, "submitted", "bar", "ps1", "p1.ipynb")) - self._copy_file(join("files", "test.ipynb"), join(course_dir, "submitted", "bar", "ps1", "p2.ipynb")) - self._empty_notebook(join(course_dir, "submitted", "foo", "ps1", "p1.ipynb")) - self._empty_notebook(join(course_dir, "submitted", "foo", "ps1", "p2.ipynb")) - run_nbgrader(["autograde", "ps1"], retcode=1) - - assert not os.path.exists(join(course_dir, "autograded", "bar", "ps1")) - assert os.path.exists(join(course_dir, "autograded", "foo", "ps1")) - - def test_handle_failure_single_notebook(self, course_dir): - run_nbgrader(["db", "assignment", "add", "ps1", "--duedate", - "2015-02-02 14:58:23.948203 America/Los_Angeles"]) - run_nbgrader(["db", "student", "add", "foo"]) - run_nbgrader(["db", "student", "add", "bar"]) - - self._empty_notebook(join(course_dir, "source", "ps1", "p1.ipynb")) - self._empty_notebook(join(course_dir, "source", "ps1", "p2.ipynb")) - run_nbgrader(["generate_assignment", "ps1"]) - - self._empty_notebook(join(course_dir, "submitted", "foo", "ps1", "p1.ipynb")) - self._copy_file(join("files", "test.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p2.ipynb")) - run_nbgrader(["autograde", "ps1", "--notebook", "p*"], retcode=1) - - assert os.path.exists(join(course_dir, "autograded", "foo", "ps1")) - assert not os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "p1.ipynb")) - assert not os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "p2.ipynb")) - - def test_missing_source_kernelspec(self, course_dir): - run_nbgrader(["db", "assignment", "add", "ps1", "--duedate", - "2015-02-02 14:58:23.948203 America/Los_Angeles"]) - run_nbgrader(["db", "student", "add", "foo"]) - run_nbgrader(["db", "student", "add", "bar"]) - with open("nbgrader_config.py", "a") as fh: - fh.write("""c.ClearSolutions.code_stub = {'python': '## Answer', 'blah': '## Answer'}""") - - self._empty_notebook(join(course_dir, "source", "ps1", "p1.ipynb")) - run_nbgrader(["generate_assignment", "ps1"]) - - self._empty_notebook(join(course_dir, "submitted", "foo", "ps1", "p1.ipynb"), kernel="python") - run_nbgrader(["autograde", "ps1"]) - assert os.path.exists(join(course_dir, "autograded", "foo", "ps1")) - assert os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "p1.ipynb")) - - self._empty_notebook(join(course_dir, "submitted", "bar", "ps1", "p1.ipynb"), kernel="blarg") - run_nbgrader(["autograde", "ps1"], retcode=1) - assert not os.path.exists(join(course_dir, "autograded", "bar", "ps1")) - - def test_incorrect_source_kernelspec(self, course_dir): - run_nbgrader(["db", "assignment", "add", "ps1", "--duedate", - "2015-02-02 14:58:23.948203 America/Los_Angeles"]) - run_nbgrader(["db", "student", "add", "foo"]) - run_nbgrader(["db", "student", "add", "bar"]) - with open("nbgrader_config.py", "a") as fh: - fh.write("""c.ClearSolutions.code_stub = {'python': '## Answer', 'blah': '## Answer'}""") - - self._empty_notebook(join(course_dir, "source", "ps1", "p1.ipynb"), kernel="blah") - run_nbgrader(["generate_assignment", "ps1"]) - - self._empty_notebook(join(course_dir, "submitted", "foo", "ps1", "p1.ipynb"), kernel="python") - run_nbgrader(["autograde", "ps1"], retcode=1) - assert not os.path.exists(join(course_dir, "autograded", "foo", "ps1")) - - def test_incorrect_submitted_kernelspec(self, db, course_dir): - run_nbgrader(["db", "assignment", "add", "ps1", "--db", db, "--duedate", - "2015-02-02 14:58:23.948203 America/Los_Angeles"]) - run_nbgrader(["db", "student", "add", "foo", "--db", db]) - run_nbgrader(["db", "student", "add", "bar", "--db", db]) - - self._empty_notebook(join(course_dir, "source", "ps1", "p1.ipynb"), kernel="python") - run_nbgrader(["generate_assignment", "ps1"]) - - self._empty_notebook(join(course_dir, "submitted", "foo", "ps1", "p1.ipynb"), kernel="blah") - run_nbgrader(["autograde", "ps1"]) - assert os.path.exists(join(course_dir, "autograded", "foo", "ps1")) - assert os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "p1.ipynb")) - - def test_no_execute(self, course_dir): - run_nbgrader(["db", "assignment", "add", "ps1", "--duedate", - "2015-02-02 14:58:23.948203 America/Los_Angeles"]) - run_nbgrader(["db", "student", "add", "foo"]) - run_nbgrader(["db", "student", "add", "bar"]) - - self._copy_file(join("files", "test.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb")) - run_nbgrader(["generate_assignment", "ps1"]) - - self._copy_file(join("files", "test-with-output.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p1.ipynb")) - with io.open(join(os.path.dirname(__file__), "files", "test-with-output.ipynb"), mode="r", encoding='utf-8') as fh: - orig_contents = reads(fh.read(), as_version=current_nbformat) - - run_nbgrader(["autograde", "ps1"]) - with io.open(join(course_dir, "autograded", "foo", "ps1", "p1.ipynb"), mode="r", encoding="utf-8") as fh: - new_contents = reads(fh.read(), as_version=current_nbformat) - - different = False - for i in range(len(orig_contents.cells)): - orig_cell = orig_contents.cells[i] - new_cell = new_contents.cells[i] - if 'outputs' in orig_cell: - if orig_cell.outputs != new_cell.outputs: - different = True - break - elif 'outputs' in new_cell: - different = True - - assert different - - run_nbgrader(["autograde", "ps1", "--force", "--no-execute"]) - with io.open(join(course_dir, "autograded", "foo", "ps1", "p1.ipynb"), mode="r", encoding="utf-8") as fh: - new_contents = reads(fh.read(), as_version=current_nbformat) - - for i in range(len(orig_contents.cells)): - orig_cell = orig_contents.cells[i] - new_cell = new_contents.cells[i] - if 'outputs' in orig_cell: - assert orig_cell.outputs == new_cell.outputs - else: - assert 'outputs' not in new_cell - - def test_many_students(self, course_dir): - pytest.skip("this test takes too long to run and requires manual configuration") - - # NOTE: to test this, you will manually have to configure the postgres - # database. In the postgresql.conf file in the postgres data directory, - # set max_connections to something low (like 5). Then, create the gradebook - # database and run this test. - db = "postgresql://localhost:5432/gradebook" - - run_nbgrader(["db", "assignment", "add", "ps1", "--duedate", "--db", db, - "2015-02-02 14:58:23.948203 America/Los_Angeles"]) - student_fmt = "student{:03d}" - num_students = 50 - for i in range(num_students): - run_nbgrader(["db", "student", "add", student_fmt.format(i), "--db", db]) - - self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb")) - run_nbgrader(["generate_assignment", "ps1", "--db", db]) - - for i in range(num_students): - self._copy_file(join("files", "submitted-changed.ipynb"), join(course_dir, "submitted", student_fmt.format(i), "ps1", "p1.ipynb")) - - run_nbgrader(["autograde", "ps1", "--db", db]) - - def test_infinite_loop(self, db, course_dir): - run_nbgrader(["db", "assignment", "add", "ps1", "--db", db, "--duedate", - "2015-02-02 14:58:23.948203 America/Los_Angeles"]) - run_nbgrader(["db", "student", "add", "foo", "--db", db]) - run_nbgrader(["db", "student", "add", "bar", "--db", db]) - with open("nbgrader_config.py", "a") as fh: - fh.write("""c.Execute.timeout = 1""") - - self._copy_file(join("files", "infinite-loop.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb")) - run_nbgrader(["generate_assignment", "ps1", "--db", db]) - - self._copy_file(join("files", "infinite-loop.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p1.ipynb")) - run_nbgrader(["autograde", "ps1", "--db", db]) - - assert os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "p1.ipynb")) - - def test_infinite_loop_with_output(self, db, course_dir): - pytest.skip("this test takes too long to run and consumes a LOT of memory") - - run_nbgrader(["db", "assignment", "add", "ps1", "--db", db, "--duedate", - "2015-02-02 14:58:23.948203 America/Los_Angeles"]) - run_nbgrader(["db", "student", "add", "foo", "--db", db]) - - self._copy_file(join("files", "infinite-loop-with-output.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb")) - run_nbgrader(["generate_assignment", "ps1", "--db", db]) - - self._copy_file(join("files", "infinite-loop-with-output.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p1.ipynb")) - run_nbgrader(["autograde", "ps1", "--db", db], retcode=1) - - assert not os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "p1.ipynb")) - - def test_missing_files(self, db, course_dir): - run_nbgrader(["db", "assignment", "add", "ps1", "--db", db, "--duedate", - "2015-02-02 14:58:23.948203 America/Los_Angeles"]) - run_nbgrader(["db", "student", "add", "foo", "--db", db]) - run_nbgrader(["db", "student", "add", "bar", "--db", db]) - - self._empty_notebook(join(course_dir, "source", "ps1", "p1.ipynb")) - run_nbgrader(["generate_assignment", "ps1"]) - - self._empty_notebook(join(course_dir, "submitted", "foo", "ps1", "p1.ipynb")) - os.makedirs(join(course_dir, "submitted", "bar", "ps1")) - run_nbgrader(["autograde", "ps1"]) - - assert os.path.exists(join(course_dir, "autograded", "foo", "ps1")) - assert os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "p1.ipynb")) - assert not os.path.exists(join(course_dir, "autograded", "bar")) - - def test_grade_missing_notebook(self, db, course_dir): - run_nbgrader(["db", "assignment", "add", "ps1", "--db", db, "--duedate", - "2015-02-02 14:58:23.948203 America/Los_Angeles"]) - run_nbgrader(["db", "student", "add", "foo", "--db", db]) - run_nbgrader(["db", "student", "add", "bar", "--db", db]) - - self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb")) - self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps1", "p2.ipynb")) - run_nbgrader(["generate_assignment", "ps1", "--db", db]) - - self._copy_file(join("files", "submitted-changed.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p1.ipynb")) - run_nbgrader(["autograde", "ps1", "--db", db]) - - assert os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "p1.ipynb")) - assert not os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "p2.ipynb")) - - with Gradebook(db) as gb: - submission = gb.find_submission("ps1", "foo") - nb1, nb2 = submission.notebooks - assert not nb2.needs_manual_grade - assert nb2.score == 0 - - def test_grade_with_validating_envvar(self, db, course_dir): - run_nbgrader(["db", "assignment", "add", "ps1", "--db", db, "--duedate", - "2015-02-02 14:58:23.948203 America/Los_Angeles"]) - run_nbgrader(["db", "student", "add", "foo", "--db", db]) - - self._copy_file(join("files", "validating-environment-variable.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb")) - run_nbgrader(["generate_assignment", "ps1", "--db", db]) - - self._copy_file(join("files", "validating-environment-variable.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p1.ipynb")) - run_nbgrader(["autograde", "ps1", "--db", db]) - - assert os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "p1.ipynb")) - - with Gradebook(db) as gb: - submission = gb.find_submission("ps1", "foo") - nb1, = submission.notebooks - assert nb1.score == 0 - - def test_autograde_timeout(self, db, course_dir): - """Does autograde accept timeout configuration correctly?""" - run_nbgrader(["db", "assignment", "add", "ps1", "--db", db, "--duedate", - "2015-02-02 14:58:23.948203 America/Los_Angeles"]) - run_nbgrader(["db", "student", "add", "foo", "--db", db]) - run_nbgrader(["db", "student", "add", "bar", "--db", db]) - - self._copy_file(join("files", "timeout.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb")) - run_nbgrader(["generate_assignment", "ps1", "--db", db]) - - self._copy_file(join("files", "timeout.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p1.ipynb")) - self._copy_file(join("files", "timeout.ipynb"), join(course_dir, "submitted", "bar", "ps1", "p1.ipynb")) - - output = run_nbgrader(["autograde", "ps1", "--db", db, "--student", "foo"]) - # timeout=2 secs, 1 was causing an asyncio error on Windows - output = run_nbgrader(["autograde", "ps1", "--db", db, "--student", "bar", "--Execute.timeout=2"]) - - # Check timeout config changes function based on timeout config - with Gradebook(db) as gb: - notebook = gb.find_submission_notebook("p1", "ps1", "foo") - assert notebook.score == 1 - notebook = gb.find_submission_notebook("p1", "ps1", "bar") - assert notebook.score == 0 diff --git a/nbgrader/tests/apps/test_nbgrader_collect.py b/nbgrader/tests/apps/test_nbgrader_collect.py deleted file mode 100644 index 8e39db7ee..000000000 --- a/nbgrader/tests/apps/test_nbgrader_collect.py +++ /dev/null @@ -1,197 +0,0 @@ -import datetime -import os -import time -import pytest - -from os.path import join - -from .. import run_nbgrader -from .base import BaseTestApp -from .conftest import notwindows -from ...api import Gradebook -from ...utils import parse_utc, get_username - - -@notwindows -class TestNbGraderCollect(BaseTestApp): - - def _release_and_fetch(self, assignment, exchange, course_dir): - self._copy_file(os.path.join("files", "test.ipynb"), os.path.join(course_dir, "release", "ps1", "p1.ipynb")) - run_nbgrader([ - "release_assignment", assignment, - "--course", "abc101", - "--Exchange.root={}".format(exchange) - ]) - run_nbgrader([ - "fetch_assignment", assignment, - "--course", "abc101", - "--Exchange.root={}".format(exchange) - ]) - - def _submit(self, assignment, exchange, cache, flags=None): - cmd = [ - "submit", assignment, - "--course", "abc101", - "--Exchange.cache={}".format(cache), - "--Exchange.root={}".format(exchange) - ] - - if flags is not None: - cmd.extend(flags) - run_nbgrader(cmd) - - def _collect(self, assignment, exchange, flags=None, retcode=0): - cmd = [ - "collect", assignment, - "--course", "abc101", - "--Exchange.root={}".format(exchange) - ] - - if flags is not None: - cmd.extend(flags) - - return run_nbgrader(cmd, retcode=retcode) - - def _read_timestamp(self, root): - with open(os.path.os.path.join(root, "timestamp.txt"), "r") as fh: - timestamp = parse_utc(fh.read()) - return timestamp - - def test_help(self): - """Does the help display without error?""" - run_nbgrader(["collect", "--help-all"]) - - def test_no_course_id(self, exchange, course_dir, cache): - """Does releasing without a course id thrown an error?""" - self._release_and_fetch("ps1", exchange, course_dir) - self._submit("ps1", exchange, cache) - cmd = [ - "collect", "ps1", - "--Exchange.root={}".format(exchange) - ] - run_nbgrader(cmd, retcode=1) - - def test_collect(self, exchange, course_dir, cache): - self._release_and_fetch("ps1", exchange, course_dir) - - # try to collect when there"s nothing to collect - self._collect("ps1", exchange) - root = os.path.os.path.join(os.path.join(course_dir, "submitted", get_username(), "ps1")) - assert not os.path.isdir(os.path.join(course_dir, "submitted")) - - # submit something - self._submit("ps1", exchange, cache) - time.sleep(1) - - # try to collect it - self._collect("ps1", exchange) - assert os.path.isfile(os.path.os.path.join(root, "p1.ipynb")) - assert os.path.isfile(os.path.os.path.join(root, "timestamp.txt")) - timestamp = self._read_timestamp(root) - - # try to collect it again - self._collect("ps1", exchange) - assert self._read_timestamp(root) == timestamp - - # submit again - self._submit("ps1", exchange, cache) - - # collect again - self._collect("ps1", exchange) - assert self._read_timestamp(root) == timestamp - - # collect again with --update - self._collect("ps1", exchange, ["--update"]) - assert self._read_timestamp(root) != timestamp - - def test_collect_assignment_flag(self, exchange, course_dir, cache): - self._release_and_fetch("ps1", exchange, course_dir) - self._submit("ps1", exchange, cache) - - # try to collect when there"s nothing to collect - self._collect("--assignment=ps1", exchange) - root = os.path.os.path.join(os.path.join(course_dir, "submitted", get_username(), "ps1")) - assert os.path.isfile(os.path.os.path.join(root, "p1.ipynb")) - assert os.path.isfile(os.path.os.path.join(root, "timestamp.txt")) - - def test_collect_subdirectories(self, exchange, course_dir, cache): - self._release_and_fetch("ps1", exchange, course_dir) - - # create a subdirectory with an empty file - os.makedirs(os.path.join('ps1', 'foo')) - with open(os.path.join('ps1', 'foo', 'temp.txt'), 'w') as fh: - fh.write("") - - self._submit("ps1", exchange, cache) - - # make sure collect succeeds - self._collect("ps1", exchange) - - def test_owner_check(self, exchange, course_dir, cache): - self._release_and_fetch("ps1", exchange, course_dir) - self._submit("ps1", exchange, cache, flags=["--student=foobar_student",]) - - # By default, a warning is raised if the student id does not match the directory owner - out = self._collect("--assignment=ps1", exchange) - assert 'WARNING' in out - - # This warning can be disabled - out = self._collect("--assignment=ps1", exchange, flags=["--ExchangeCollect.check_owner=False"]) - assert 'WARNING' not in out - - @notwindows - @pytest.mark.parametrize("groupshared", [False, True]) - def test_permissions(self, exchange, course_dir, cache, groupshared): - if groupshared: - with open("nbgrader_config.py", "a") as fh: - fh.write("""c.CourseDirectory.groupshared = True\n""") - self._release_and_fetch("ps1", exchange, course_dir) - self._submit("ps1", exchange, cache, flags=["--student=foobar_student",]) - - # By default, a warning is raised if the student id does not match the directory owner - self._collect("--assignment=ps1", exchange) - assert self._get_permissions(join(exchange, "abc101", "inbound")) == ("2733" if not groupshared else "2773") - assert self._get_permissions(join(course_dir, "submitted", "foobar_student", "ps1")) == ("777" if not groupshared else "2777") - assert self._get_permissions(join(course_dir, "submitted", "foobar_student", "ps1", "p1.ipynb")) == ("644" if not groupshared else "664") - - @pytest.mark.parametrize('before_duedate', - ['yes', 'no', 'nofirst']) - def test_collect_before_due_date(self, exchange, course_dir, cache, db, before_duedate): - """Test --before-duedate flag. - - Test is parameterized so we test both with it and without the flag. - - 'yes': test with --before-duedate - 'no': test without - 'nofirst': test with --before-duedate but no assignment before duedate - - """ - # Release assignment - self._release_and_fetch("ps1", exchange, course_dir) - - # Submit something, wait, submit again. Due date is between. - if before_duedate != 'nofirst': - # We don't submit first assignment. - self._submit("ps1", exchange, cache) - time.sleep(.05) - time_duedate = datetime.datetime.utcnow() - time.sleep(.05) - self._submit("ps1", exchange, cache) - - # Set the due date - with Gradebook(db) as gb: - gb.update_or_create_assignment('ps1', duedate=time_duedate) - - # Collect - flags = ['--db', db] - if before_duedate != 'no': - flags.append('--before-duedate') - self._collect("ps1", exchange, flags=flags) - - root = os.path.os.path.join(os.path.join(course_dir, "submitted", get_username(), "ps1")) - timestamp = self._read_timestamp(root) - # Test both ways: with --before-duedate flag and without - if before_duedate == 'yes': - assert timestamp < time_duedate - else: # 'no', 'nofirst' - assert timestamp > time_duedate diff --git a/nbgrader/tests/apps/test_nbgrader_db.py b/nbgrader/tests/apps/test_nbgrader_db.py deleted file mode 100644 index 676df1ddf..000000000 --- a/nbgrader/tests/apps/test_nbgrader_db.py +++ /dev/null @@ -1,423 +0,0 @@ -import pytest -import datetime -import shutil -import os - -from textwrap import dedent -from os.path import join - -from ...api import Gradebook, MissingEntry -from .. import run_nbgrader -from .base import BaseTestApp - - -class TestNbGraderDb(BaseTestApp): - - def test_help(self): - """Does the help display without error?""" - run_nbgrader(["db", "--help-all"]) - run_nbgrader(["db", "student", "--help-all"]) - run_nbgrader(["db", "student", "list", "--help-all"]) - run_nbgrader(["db", "student", "remove", "--help-all"]) - run_nbgrader(["db", "student", "add", "--help-all"]) - run_nbgrader(["db", "student", "import", "--help-all"]) - run_nbgrader(["db", "assignment", "--help-all"]) - run_nbgrader(["db", "assignment", "list", "--help-all"]) - run_nbgrader(["db", "assignment", "remove", "--help-all"]) - run_nbgrader(["db", "assignment", "add", "--help-all"]) - run_nbgrader(["db", "assignment", "import", "--help-all"]) - - def test_no_args(self): - """Is there an error if no arguments are given?""" - run_nbgrader(["db"], retcode=0) - run_nbgrader(["db", "student"], retcode=0) - run_nbgrader(["db", "student", "remove"], retcode=1) - run_nbgrader(["db", "student", "add"], retcode=1) - run_nbgrader(["db", "student", "import"], retcode=1) - run_nbgrader(["db", "assignment"], retcode=0) - run_nbgrader(["db", "assignment", "remove"], retcode=1) - run_nbgrader(["db", "assignment", "add"], retcode=1) - run_nbgrader(["db", "assignment", "import"], retcode=1) - - def test_student_add(self, db): - run_nbgrader(["db", "student", "add", "foo", "--db", db]) - with Gradebook(db) as gb: - student = gb.find_student("foo") - assert student.last_name is None - assert student.first_name is None - assert student.email is None - - run_nbgrader(["db", "student", "add", "foo", "--last-name=FooBar", "--db", db]) - with Gradebook(db) as gb: - student = gb.find_student("foo") - assert student.last_name == "FooBar" - assert student.first_name is None - assert student.email is None - - run_nbgrader(["db", "student", "add", "foo", "--first-name=FooBar", "--db", db]) - with Gradebook(db) as gb: - student = gb.find_student("foo") - assert student.last_name is None - assert student.first_name == "FooBar" - assert student.email is None - - run_nbgrader(["db", "student", "add", "foo", "--email=foo@bar.com", "--db", db]) - with Gradebook(db) as gb: - student = gb.find_student("foo") - assert student.last_name is None - assert student.first_name is None - assert student.email == "foo@bar.com" - - def test_student_remove(self, db): - run_nbgrader(["db", "student", "add", "foo", "--db", db]) - with Gradebook(db) as gb: - student = gb.find_student("foo") - assert student.last_name is None - assert student.first_name is None - assert student.email is None - - run_nbgrader(["db", "student", "remove", "foo", "--db", db]) - with Gradebook(db) as gb: - with pytest.raises(MissingEntry): - gb.find_student("foo") - - # running it again should give an error - run_nbgrader(["db", "student", "remove", "foo", "--db", db], retcode=1) - - def test_student_remove_with_submissions(self, db, course_dir): - run_nbgrader(["db", "student", "add", "foo", "--db", db]) - run_nbgrader(["db", "assignment", "add", "ps1", "--db", db]) - self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb")) - run_nbgrader(["generate_assignment", "ps1", "--db", db]) - self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p1.ipynb")) - run_nbgrader(["autograde", "ps1", "--db", db]) - - with Gradebook(db) as gb: - gb.find_student("foo") - - # it should fail if we don't run with --force - run_nbgrader(["db", "student", "remove", "foo", "--db", db], retcode=1) - - # make sure we can still find the student - with Gradebook(db) as gb: - gb.find_student("foo") - - # now force it to complete - run_nbgrader(["db", "student", "remove", "foo", "--force", "--db", db]) - - # student should be gone - with Gradebook(db) as gb: - with pytest.raises(MissingEntry): - gb.find_student("foo") - - def test_student_remove_with_submissions_f(self, db, course_dir): - run_nbgrader(["db", "student", "add", "foo", "--db", db]) - run_nbgrader(["db", "assignment", "add", "ps1", "--db", db]) - self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb")) - run_nbgrader(["generate_assignment", "ps1", "--db", db]) - self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p1.ipynb")) - run_nbgrader(["autograde", "ps1", "--db", db]) - - with Gradebook(db) as gb: - gb.find_student("foo") - - # it should fail if we don't run with --force - run_nbgrader(["db", "student", "remove", "foo", "--db", db], retcode=1) - - # make sure we can still find the student - with Gradebook(db) as gb: - gb.find_student("foo") - - # now force it to complete - run_nbgrader(["db", "student", "remove", "foo", "-f", "--db", db]) - - # student should be gone - with Gradebook(db) as gb: - with pytest.raises(MissingEntry): - gb.find_student("foo") - - def test_student_list(self, db): - run_nbgrader(["db", "student", "add", "foo", "--first-name=abc", "--last-name=xyz", "--email=foo@bar.com", "--db", db]) - run_nbgrader(["db", "student", "add", "bar", "--db", db]) - out = run_nbgrader(["db", "student", "list", "--db", db], stdout=True) - assert out == dedent( - """ - There are 2 students in the database: - bar (None, None) -- None, None - foo (xyz, abc) -- foo@bar.com, None - """ - ).strip() + "\n" - - def test_student_import(self, db, temp_cwd): - with open("students.csv", "w") as fh: - fh.write(dedent( - """ - id,first_name,last_name,email - foo,abc,xyz,foo@bar.com - bar,,, - """ - ).strip()) - - run_nbgrader(["db", "student", "import", "students.csv", "--db", db]) - with Gradebook(db) as gb: - student = gb.find_student("foo") - assert student.last_name == "xyz" - assert student.first_name == "abc" - assert student.email == "foo@bar.com" - student = gb.find_student("bar") - assert student.last_name is None - assert student.first_name is None - assert student.email is None - - # check that it fails when no id column is given - with open("students.csv", "w") as fh: - fh.write(dedent( - """ - first_name,last_name,email - abc,xyz,foo@bar.com - ,, - """ - ).strip()) - - run_nbgrader(["db", "student", "import", "students.csv", "--db", db], retcode=1) - - # check that it works ok with extra and missing columns - with open("students.csv", "w") as fh: - fh.write(dedent( - """ - id,first_name,last_name,foo - foo,abc,xyzzzz,blah - bar,,, - """ - ).strip()) - - run_nbgrader(["db", "student", "import", "students.csv", "--db", db]) - with Gradebook(db) as gb: - student = gb.find_student("foo") - assert student.last_name == "xyzzzz" - assert student.first_name == "abc" - assert student.email == "foo@bar.com" - student = gb.find_student("bar") - assert student.last_name is None - assert student.first_name is None - assert student.email is None - - - def test_student_import_csv_spaces(self, db, temp_cwd): - with open("students.csv", "w") as fh: - fh.write(dedent( - """ - id,first_name,last_name, email - foo,abc,xyz,foo@bar.com - bar,,, - """ - ).strip()) - - run_nbgrader(["db", "student", "import", "students.csv", "--db", db]) - with Gradebook(db) as gb: - student = gb.find_student("foo") - assert student.last_name == "xyz" - assert student.first_name == "abc" - assert student.email == "foo@bar.com" - student = gb.find_student("bar") - assert student.last_name is None - assert student.first_name is None - assert student.email is None - - def test_assignment_add(self, db): - run_nbgrader(["db", "assignment", "add", "foo", "--db", db]) - with Gradebook(db) as gb: - assignment = gb.find_assignment("foo") - assert assignment.duedate is None - - run_nbgrader(["db", "assignment", "add", "foo", '--duedate="Sun Jan 8 2017 4:31:22 PM"', "--db", db]) - with Gradebook(db) as gb: - assignment = gb.find_assignment("foo") - assert assignment.duedate == datetime.datetime(2017, 1, 8, 16, 31, 22) - - def test_assignment_remove(self, db): - run_nbgrader(["db", "assignment", "add", "foo", "--db", db]) - with Gradebook(db) as gb: - assignment = gb.find_assignment("foo") - assert assignment.duedate is None - - run_nbgrader(["db", "assignment", "remove", "foo", "--db", db]) - with Gradebook(db) as gb: - with pytest.raises(MissingEntry): - gb.find_assignment("foo") - - # running it again should give an error - run_nbgrader(["db", "assignment", "remove", "foo", "--db", db], retcode=1) - - def test_assignment_remove_with_submissions(self, db, course_dir): - run_nbgrader(["db", "student", "add", "foo", "--db", db]) - run_nbgrader(["db", "assignment", "add", "ps1", "--db", db]) - self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb")) - run_nbgrader(["generate_assignment", "ps1", "--db", db]) - self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p1.ipynb")) - run_nbgrader(["autograde", "ps1", "--db", db]) - - with Gradebook(db) as gb: - gb.find_assignment("ps1") - - # it should fail if we don't run with --force - run_nbgrader(["db", "assignment", "remove", "ps1", "--db", db], retcode=1) - - # make sure we can still find the assignment - with Gradebook(db) as gb: - gb.find_assignment("ps1") - - # now force it to complete - run_nbgrader(["db", "assignment", "remove", "ps1", "--force", "--db", db]) - - # assignment should be gone - with Gradebook(db) as gb: - with pytest.raises(MissingEntry): - gb.find_assignment("ps1") - - def test_assignment_remove_with_submissions_f(self, db, course_dir): - run_nbgrader(["db", "student", "add", "foo", "--db", db]) - run_nbgrader(["db", "assignment", "add", "ps1", "--db", db]) - self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb")) - run_nbgrader(["generate_assignment", "ps1", "--db", db]) - self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p1.ipynb")) - run_nbgrader(["autograde", "ps1", "--db", db]) - - with Gradebook(db) as gb: - gb.find_assignment("ps1") - - # it should fail if we don't run with --force - run_nbgrader(["db", "assignment", "remove", "ps1", "--db", db], retcode=1) - - # make sure we can still find the assignment - with Gradebook(db) as gb: - gb.find_assignment("ps1") - - # now force it to complete - run_nbgrader(["db", "assignment", "remove", "ps1", "-f", "--db", db]) - - # assignment should be gone - with Gradebook(db) as gb: - with pytest.raises(MissingEntry): - gb.find_assignment("ps1") - - def test_assignment_list(self, db): - run_nbgrader(["db", "assignment", "add", "foo", '--duedate="Sun Jan 8 2017 4:31:22 PM"', "--db", db]) - run_nbgrader(["db", "assignment", "add", "bar", "--db", db]) - out = run_nbgrader(["db", "assignment", "list", "--db", db], stdout=True) - assert out == dedent( - """ - There are 2 assignments in the database: - bar (due: None) - foo (due: 2017-01-08 16:31:22) - """ - ).strip() + "\n" - - def test_assignment_import(self, db, temp_cwd): - with open("assignments.csv", "w") as fh: - fh.write(dedent( - """ - name,duedate - foo,Sun Jan 8 2017 4:31:22 PM - bar, - """ - ).strip()) - - run_nbgrader(["db", "assignment", "import", "assignments.csv", "--db", db]) - with Gradebook(db) as gb: - assignment = gb.find_assignment("foo") - assert assignment.duedate == datetime.datetime(2017, 1, 8, 16, 31, 22) - assignment = gb.find_assignment("bar") - assert assignment.duedate is None - - - def test_assignment_import_csv_spaces(self, db, temp_cwd): - with open("assignments.csv", "w") as fh: - fh.write(dedent( - """ - name, duedate - foo,Sun Jan 8 2017 4:31:22 PM - bar, - """ - ).strip()) - - run_nbgrader(["db", "assignment", "import", "assignments.csv", "--db", db]) - with Gradebook(db) as gb: - assignment = gb.find_assignment("foo") - assert assignment.duedate == datetime.datetime(2017, 1, 8, 16, 31, 22) - assignment = gb.find_assignment("bar") - assert assignment.duedate is None - - # check that it fails when no id column is given - with open("assignments.csv", "w") as fh: - fh.write(dedent( - """ - duedate - Sun Jan 8 2017 4:31:22 PM - , - """ - ).strip()) - - run_nbgrader(["db", "assignment", "import", "assignments.csv", "--db", db], retcode=1) - - # check that it works ok with extra and missing columns - with open("assignments.csv", "w") as fh: - fh.write(dedent( - """ - name - foo - bar - """ - ).strip()) - - run_nbgrader(["db", "assignment", "import", "assignments.csv", "--db", db]) - with Gradebook(db) as gb: - assignment = gb.find_assignment("foo") - assert assignment.duedate == datetime.datetime(2017, 1, 8, 16, 31, 22) - assignment = gb.find_assignment("bar") - assert assignment.duedate is None - def test_upgrade_nodb(self, temp_cwd): - # test upgrading without a database - run_nbgrader(["db", "upgrade"]) - - def test_upgrade_current_db(self, course_dir): - # add assignment files - self._copy_file(join("files", "test.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb")) - self._copy_file(join("files", "test.ipynb"), join(course_dir, "source", "ps1", "p2.ipynb")) - - # check that nbgrader generate_assignment passes - run_nbgrader(["generate_assignment", "ps1"]) - - # test upgrading with a current database - run_nbgrader(["db", "upgrade"]) - - def test_upgrade_old_db_no_assign(self, course_dir): - # add assignment files - self._copy_file(join("files", "test.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb")) - self._copy_file(join("files", "test.ipynb"), join(course_dir, "source", "ps1", "p2.ipynb")) - - # replace the gradebook with an old version - self._copy_file(join("files", "gradebook.db"), join(course_dir, "gradebook.db")) - - # upgrade the database - run_nbgrader(["db", "upgrade"]) - - # check that nbgrader assign passes - run_nbgrader(["assign", "ps1"]) - - def test_upgrade_old_db(self, course_dir): - # add assignment files - self._copy_file(join("files", "test.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb")) - self._copy_file(join("files", "test.ipynb"), join(course_dir, "source", "ps1", "p2.ipynb")) - - # replace the gradebook with an old version - self._copy_file(join("files", "gradebook.db"), join(course_dir, "gradebook.db")) - - # check that nbgrader generate_assignment fails - run_nbgrader(["generate_assignment", "ps1"], retcode=1) - - # upgrade the database - run_nbgrader(["db", "upgrade"]) - - # check that nbgrader generate_assignment passes - run_nbgrader(["generate_assignment", "ps1"]) diff --git a/nbgrader/tests/apps/test_nbgrader_export.py b/nbgrader/tests/apps/test_nbgrader_export.py deleted file mode 100644 index 984d8a2d9..000000000 --- a/nbgrader/tests/apps/test_nbgrader_export.py +++ /dev/null @@ -1,65 +0,0 @@ -import os - -from os.path import join -from ...utils import remove -from .. import run_nbgrader -from .base import BaseTestApp - - -class TestNbGraderExport(BaseTestApp): - - def test_help(self): - """Does the help display without error?""" - run_nbgrader(["export", "--help-all"]) - - def test_export(self, db, course_dir): - run_nbgrader(["db", "assignment", "add", "ps1", "--db", db, "--duedate", - "2015-02-02 14:58:23.948203 America/Los_Angeles"]) - run_nbgrader(["db", "assignment", "add", "ps2", "--db", db, "--duedate", - "2015-02-02 14:58:23.948203 America/Los_Angeles"]) - run_nbgrader(["db", "student", "add", "foo", "--db", db]) - run_nbgrader(["db", "student", "add", "bar", "--db", db]) - - self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb")) - self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps2", "p1.ipynb")) - run_nbgrader(["generate_assignment", "ps1", "--db", db]) - run_nbgrader(["generate_assignment", "ps2", "--db", db]) - - self._copy_file(join("files", "submitted-changed.ipynb"), join(course_dir, "submitted", "bar", "ps1", "p1.ipynb")) - self._copy_file(join("files", "submitted-changed.ipynb"), join(course_dir, "submitted", "foo", "ps2", "p1.ipynb")) - run_nbgrader(["autograde", "ps1", "--db", db]) - run_nbgrader(["autograde", "ps2", "--db", db]) - - run_nbgrader(["export", "--db", db]) - assert os.path.isfile("grades.csv") - with open("grades.csv", "r") as fh: - contents = fh.readlines() - assert len(contents) == 5 - - run_nbgrader(["export", "--db", db, "--to", "mygrades.csv"]) - assert os.path.isfile("mygrades.csv") - - remove("grades.csv") - run_nbgrader(["export", "--db", db, "--exporter", "nbgrader.plugins.CsvExportPlugin"]) - assert os.path.isfile("grades.csv") - - run_nbgrader(["export", "--db", db, "--exporter=nbgrader.tests.apps.files.myexporter.MyExporter", "--to", "foo.txt"]) - assert os.path.isfile("foo.txt") - - run_nbgrader(["export", "--db", db, "--student", "['bar']"]) - assert os.path.isfile("grades.csv") - with open("grades.csv", "r") as fh: - contents = fh.readlines() - assert len(contents) == 3 - - run_nbgrader(["export", "--db", db, "--assignment", "['ps1']"]) - assert os.path.isfile("grades.csv") - with open("grades.csv", "r") as fh: - contents = fh.readlines() - assert len(contents) == 3 - - run_nbgrader(["export", "--db", db, "--assignment", "['ps1']", "--student", "['foo']"]) - assert os.path.isfile("grades.csv") - with open("grades.csv", "r") as fh: - contents = fh.readlines() - assert len(contents) == 2 diff --git a/nbgrader/tests/apps/test_nbgrader_fetch_assignment.py b/nbgrader/tests/apps/test_nbgrader_fetch_assignment.py deleted file mode 100644 index e0e34aad5..000000000 --- a/nbgrader/tests/apps/test_nbgrader_fetch_assignment.py +++ /dev/null @@ -1,115 +0,0 @@ -# -*- coding: utf-8 -*- - -import io -import os -from os.path import join - -from .. import run_nbgrader -from .base import BaseTestApp -from .conftest import notwindows - - -@notwindows -class TestNbGraderFetch(BaseTestApp): - - def _release(self, assignment, exchange, course_dir, course="abc101"): - self._copy_file(join("files", "test.ipynb"), join(course_dir, "release", assignment, "p1.ipynb")) - run_nbgrader([ - "release_assignment", assignment, - "--course", course, - "--Exchange.root={}".format(exchange) - ]) - - def _fetch(self, assignment, exchange, flags=None, retcode=0, course="abc101"): - cmd = [ - "fetch_assignment", assignment, - "--course", course, - "--Exchange.root={}".format(exchange) - ] - - if flags is not None: - cmd.extend(flags) - - run_nbgrader(cmd, retcode=retcode) - - def _fetch_multi(self, assignments, exchange, flags=None, retcode=0, course="abc101"): - cmd = [ - "fetch_assignment", - "--course", course, - "--Exchange.root={}".format(exchange) - ] - cmd.extend(assignments) - - if flags is not None: - cmd.extend(flags) - - run_nbgrader(cmd, retcode=retcode) - - def test_help(self): - """Does the help display without error?""" - run_nbgrader(["fetch_assignment", "--help-all"]) - - def test_no_course_id(self, exchange, course_dir): - """Does releasing without a course id thrown an error?""" - self._release("ps1", exchange, course_dir) - cmd = [ - "fetch_assignment", "ps1", - "--Exchange.root={}".format(exchange) - ] - run_nbgrader(cmd, retcode=1) - - def test_fetch(self, exchange, course_dir): - self._release("ps1", exchange, course_dir) - self._fetch("ps1", exchange) - assert os.path.isfile(join("ps1", "p1.ipynb")) - - # make sure it fails if the assignment already exists - self._fetch("ps1", exchange, retcode=1) - - # make sure it fails even if the assignment is incomplete - os.remove(join("ps1", "p1.ipynb")) - self._fetch("ps1", exchange, retcode=1) - - # make sure it passes if the --replace flag is given - self._fetch("ps1", exchange, flags=["--replace"]) - assert os.path.isfile(join("ps1", "p1.ipynb")) - - # make sure the --replace flag doesn't overwrite files, though - self._copy_file(join("files", "submitted-changed.ipynb"), join("ps1", "p1.ipynb")) - with io.open(join("ps1", "p1.ipynb"), mode="r", encoding='utf-8') as fh: - contents1 = fh.read() - self._fetch("ps1", exchange, flags=["--replace"]) - with io.open(join("ps1", "p1.ipynb"), mode="r", encoding='utf-8') as fh: - contents2 = fh.read() - assert contents1 == contents2 - - def test_deprecated(self, exchange, course_dir): - self._release("ps1", exchange, course_dir) - run_nbgrader([ - "fetch", "ps1", - "--course", "abc101", - "--Exchange.root={}".format(exchange) - ]) - assert os.path.isfile(join("ps1", "p1.ipynb")) - - def test_fetch_with_assignment_flag(self, exchange, course_dir): - self._release("ps1", exchange, course_dir) - self._fetch("--assignment=ps1", exchange) - assert os.path.isfile(join("ps1", "p1.ipynb")) - - def test_fetch_multiple_courses(self, exchange, course_dir): - self._release("ps1", exchange, course_dir, course="abc101") - self._fetch("ps1", exchange, course="abc101", flags=["--Exchange.path_includes_course=True"]) - assert os.path.isfile(join("abc101", "ps1", "p1.ipynb")) - - self._release("ps1", exchange, course_dir, course="abc102") - self._fetch("ps1", exchange, course="abc102", flags=["--Exchange.path_includes_course=True"]) - assert os.path.isfile(join("abc102", "ps1", "p1.ipynb")) - - def test_fetch_multiple_assignments(self, exchange, course_dir): - self._release("ps1", exchange, course_dir, course="abc101") - - self._release("ps2", exchange, course_dir, course="abc101") - self._fetch_multi(["ps1", "ps2"], exchange, course="abc101", flags=["--Exchange.path_includes_course=True"]) - assert os.path.isfile(join("abc101", "ps1", "p1.ipynb")) - assert os.path.isfile(join("abc101", "ps2", "p1.ipynb")) diff --git a/nbgrader/tests/apps/test_nbgrader_fetchfeedback.py b/nbgrader/tests/apps/test_nbgrader_fetchfeedback.py deleted file mode 100644 index e10656d8c..000000000 --- a/nbgrader/tests/apps/test_nbgrader_fetchfeedback.py +++ /dev/null @@ -1,93 +0,0 @@ -import os -import sys -from os.path import join, exists, isfile - -from ...utils import remove, notebook_hash -from .. import run_nbgrader -from .base import BaseTestApp -from .conftest import notwindows - - -class TestNbGraderFetchFeedback(BaseTestApp): - - def _generate_assignment(self, assignment, course_dir, db, course="abc101"): - run_nbgrader([ - "generate_assignment", assignment, - "--db", db - ]) - - def _release(self, assignment, exchange, cache, course_dir, course="abc101"): - run_nbgrader([ - "release_assignment", assignment, - "--course", course, - "--Exchange.cache={}".format(cache), - "--Exchange.root={}".format(exchange) - ]) - - def _fetch(self, assignment, exchange, cache, course="abc101", flags=None): - cmd = [ - "fetch_assignment", assignment, - "--course", course, - "--Exchange.cache={}".format(cache), - "--Exchange.root={}".format(exchange) - ] - - if flags is not None: - cmd.extend(flags) - - run_nbgrader(cmd) - - def _release_and_fetch(self, assignment, exchange, cache, course_dir, course="abc101"): - self._release(assignment, exchange, cache, course_dir, course=course) - self._fetch(assignment, exchange, cache, course=course) - - def _submit(self, assignment, exchange, cache, flags=None, retcode=0, course="abc101"): - cmd = [ - "submit", assignment, - "--course", course, - "--Exchange.cache={}".format(cache), - "--Exchange.root={}".format(exchange), - ] - - if flags is not None: - cmd.extend(flags) - - run_nbgrader(cmd, retcode=retcode) - - def _collect(self, assignment, exchange, flags=None, retcode=0): - cmd = [ - "collect", assignment, - "--course", "abc101", - "--Exchange.root={}".format(exchange) - ] - - if flags is not None: - cmd.extend(flags) - - run_nbgrader(cmd, retcode=retcode) - - @notwindows - def test_help(self): - """Does the help display without error?""" - run_nbgrader(["fetch_feedback", "--help-all"]) - - @notwindows - def test_single_file(self, db, course_dir, exchange, cache): - self._copy_file(join("files", "test.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb")) - self._copy_file(join("files", "test.ipynb"), join(course_dir, "source", "ps1", "p2.ipynb")) - run_nbgrader(["db", "assignment", "add", "ps1", "--db", db, "--duedate", - "2015-02-02 14:58:23.948203 America/Los_Angeles"]) - self._generate_assignment("ps1", course_dir, db) - self._release_and_fetch("ps1", exchange, cache, course_dir) - self._submit("ps1", exchange, cache) - self._collect("ps1", exchange) - run_nbgrader(["autograde", "ps1", "--create", "--db", db]) - run_nbgrader(["generate_feedback", "ps1", "--db", db]) - run_nbgrader(["release_feedback", "ps1", "--Exchange.root={}".format(exchange), '--course', 'abc101']) - run_nbgrader(["fetch_feedback", "ps1", "--Exchange.root={}".format(exchange), "--Exchange.cache={}".format(cache), '--course', 'abc101']) - assert os.path.isdir(join("ps1", "feedback")) - username = os.environ["USER"] - timestamp = open(join(course_dir, "submitted", username, "ps1", "timestamp.txt")).read() - assert os.path.isdir(join("ps1", "feedback", timestamp)) - assert os.path.isfile(join("ps1", "feedback", timestamp, 'p1.html')) - assert os.path.isfile(join("ps1", "feedback", timestamp, 'p1.html')) diff --git a/nbgrader/tests/apps/test_nbgrader_formgrade.py b/nbgrader/tests/apps/test_nbgrader_formgrade.py deleted file mode 100644 index 474c6cfcc..000000000 --- a/nbgrader/tests/apps/test_nbgrader_formgrade.py +++ /dev/null @@ -1,9 +0,0 @@ -from .. import run_nbgrader -from .base import BaseTestApp - - -class TestNbGraderFormgrade(BaseTestApp): - - def test_help(self): - """Does the help display without error?""" - run_nbgrader(["formgrade", "--help-all"]) diff --git a/nbgrader/tests/apps/test_nbgrader_generate_assignment.py b/nbgrader/tests/apps/test_nbgrader_generate_assignment.py deleted file mode 100644 index 34cce609a..000000000 --- a/nbgrader/tests/apps/test_nbgrader_generate_assignment.py +++ /dev/null @@ -1,350 +0,0 @@ -import os -import sys -import pytest -import traitlets - -from os.path import join -from sqlalchemy.exc import InvalidRequestError -from textwrap import dedent - -from ...api import Gradebook -from .. import run_nbgrader -from .base import BaseTestApp - - -class TestNbGraderGenerateAssignment(BaseTestApp): - - def test_help(self): - """Does the help display without error?""" - run_nbgrader(["generate_assignment", "--help-all"]) - - def test_no_args(self): - """Is there an error if no arguments are given?""" - run_nbgrader(["generate_assignment"], retcode=1) - - def test_conflicting_args(self): - """Is there an error if assignment is specified both in config and as an argument?""" - run_nbgrader(["generate_assignment", "--assignment", "foo", "foo"], retcode=1) - - def test_multiple_args(self): - """Is there an error if multiple arguments are given?""" - run_nbgrader(["generate_assignment", "foo", "bar"], retcode=1) - - def test_no_assignment(self, course_dir): - """Is an assignment automatically created if it doesn't exist?""" - self._empty_notebook(join(course_dir, 'source', 'ps1', 'foo.ipynb')) - - # If we explicitly disable creating assignments, assign should fail - run_nbgrader(["generate_assignment", "ps1", "--GenerateAssignment.create_assignment=False"], retcode=1) - - # The default is now to create missing assignments (formerly --create) - run_nbgrader(["generate_assignment", "ps1", "--debug"]) - - def test_single_file(self, course_dir, temp_cwd): - """Can a single file be assigned?""" - self._empty_notebook(join(course_dir, 'source', 'ps1', 'foo.ipynb')) - run_nbgrader(["db", "assignment", "add", "ps1"]) - run_nbgrader(["generate_assignment", "ps1"]) - assert os.path.isfile(join(course_dir, "release", "ps1", "foo.ipynb")) - - def test_deprecation(self, course_dir, temp_cwd): - """Can a single file be assigned?""" - self._empty_notebook(join(course_dir, 'source', 'ps1', 'foo.ipynb')) - run_nbgrader(["db", "assignment", "add", "ps1"]) - run_nbgrader(["assign", "ps1"]) - assert os.path.isfile(join(course_dir, "release", "ps1", "foo.ipynb")) - - def test_single_file_bad_assignment_name(self, course_dir, temp_cwd): - """Test that an error is thrown when the assignment name is invalid.""" - self._empty_notebook(join(course_dir, 'source', 'foo+bar', 'foo.ipynb')) - with pytest.raises(traitlets.TraitError): - run_nbgrader(["generate_assignment", "foo+bar"]) - assert not os.path.isfile(join(course_dir, "release", "foo+bar", "foo.ipynb")) - - def test_multiple_files(self, course_dir): - """Can multiple files be assigned?""" - self._empty_notebook(join(course_dir, 'source', 'ps1', 'foo.ipynb')) - self._empty_notebook(join(course_dir, 'source', 'ps1', 'bar.ipynb')) - run_nbgrader(["db", "assignment", "add", "ps1"]) - run_nbgrader(["generate_assignment", "ps1"]) - assert os.path.isfile(join(course_dir, 'release', 'ps1', 'foo.ipynb')) - assert os.path.isfile(join(course_dir, 'release', 'ps1', 'bar.ipynb')) - - def test_dependent_files(self, course_dir): - """Are dependent files properly linked?""" - self._make_file(join(course_dir, 'source', 'ps1', 'data', 'foo.csv'), 'foo') - self._make_file(join(course_dir, 'source', 'ps1', 'data', 'bar.csv'), 'bar') - self._empty_notebook(join(course_dir, 'source', 'ps1', 'foo.ipynb')) - self._empty_notebook(join(course_dir, 'source', 'ps1', 'bar.ipynb')) - run_nbgrader(["db", "assignment", "add", "ps1"]) - run_nbgrader(["generate_assignment", "ps1"]) - - assert os.path.isfile(join(course_dir, 'release', 'ps1', 'foo.ipynb')) - assert os.path.isfile(join(course_dir, 'release', 'ps1', 'bar.ipynb')) - assert os.path.isfile(join(course_dir, 'release', 'ps1', 'data', 'foo.csv')) - assert os.path.isfile(join(course_dir, 'release', 'ps1', 'data', 'bar.csv')) - - with open(join(course_dir, 'release', 'ps1', 'data', 'foo.csv'), 'r') as fh: - assert fh.read() == 'foo' - with open(join(course_dir, 'release', 'ps1', 'data', 'bar.csv'), 'r') as fh: - assert fh.read() == 'bar' - - def test_save_cells(self, db, course_dir): - """Ensure cells are saved into the database""" - self._copy_file(join('files', 'test.ipynb'), join(course_dir, 'source', 'ps1', 'test.ipynb')) - run_nbgrader(["db", "assignment", "add", "ps1"]) - run_nbgrader(["generate_assignment", "ps1", "--db", db]) - - with Gradebook(db) as gb: - notebook = gb.find_notebook("test", "ps1") - assert len(notebook.grade_cells) == 6 - - def test_force(self, course_dir): - """Ensure the force option works properly""" - self._copy_file(join('files', 'test.ipynb'), join(course_dir, 'source', 'ps1', 'test.ipynb')) - self._make_file(join(course_dir, 'source', 'ps1', 'foo.txt'), "foo") - self._make_file(join(course_dir, 'source', 'ps1', 'data', 'bar.txt'), "bar") - self._make_file(join(course_dir, 'source', 'ps1', 'blah.pyc'), "asdf") - run_nbgrader(["db", "assignment", "add", "ps1"]) - run_nbgrader(["generate_assignment", "ps1"]) - assert os.path.isfile(join(course_dir, 'release', 'ps1', 'test.ipynb')) - assert os.path.isfile(join(course_dir, 'release', 'ps1', 'foo.txt')) - assert os.path.isfile(join(course_dir, 'release', 'ps1', 'data', 'bar.txt')) - assert not os.path.isfile(join(course_dir, 'release', 'ps1', 'blah.pyc')) - - # check that it skips the existing directory - os.remove(join(course_dir, 'release', 'ps1', 'foo.txt')) - run_nbgrader(["generate_assignment", "ps1"]) - assert not os.path.isfile(join(course_dir, 'release', 'ps1', 'foo.txt')) - - # force overwrite the supplemental files - run_nbgrader(["generate_assignment", "ps1", "--force"]) - assert os.path.isfile(join(course_dir, 'release', 'ps1', 'foo.txt')) - - # force overwrite - os.remove(join(course_dir, 'source', 'ps1', 'foo.txt')) - run_nbgrader(["generate_assignment", "ps1", "--force"]) - assert os.path.isfile(join(course_dir, "release", "ps1", "test.ipynb")) - assert os.path.isfile(join(course_dir, "release", "ps1", "data", "bar.txt")) - assert not os.path.isfile(join(course_dir, "release", "ps1", "foo.txt")) - assert not os.path.isfile(join(course_dir, "release", "ps1", "blah.pyc")) - - def test_force_f(self, course_dir): - """Ensure the force option works properly""" - self._copy_file(join('files', 'test.ipynb'), join(course_dir, 'source', 'ps1', 'test.ipynb')) - self._make_file(join(course_dir, 'source', 'ps1', 'foo.txt'), "foo") - self._make_file(join(course_dir, 'source', 'ps1', 'data', 'bar.txt'), "bar") - self._make_file(join(course_dir, 'source', 'ps1', 'blah.pyc'), "asdf") - run_nbgrader(["db", "assignment", "add", "ps1"]) - run_nbgrader(["generate_assignment", "ps1"]) - assert os.path.isfile(join(course_dir, 'release', 'ps1', 'test.ipynb')) - assert os.path.isfile(join(course_dir, 'release', 'ps1', 'foo.txt')) - assert os.path.isfile(join(course_dir, 'release', 'ps1', 'data', 'bar.txt')) - assert not os.path.isfile(join(course_dir, 'release', 'ps1', 'blah.pyc')) - - # check that it skips the existing directory - os.remove(join(course_dir, 'release', 'ps1', 'foo.txt')) - run_nbgrader(["generate_assignment", "ps1"]) - assert not os.path.isfile(join(course_dir, 'release', 'ps1', 'foo.txt')) - - # force overwrite the supplemental files - run_nbgrader(["generate_assignment", "ps1", "-f"]) - assert os.path.isfile(join(course_dir, 'release', 'ps1', 'foo.txt')) - - # force overwrite - os.remove(join(course_dir, 'source', 'ps1', 'foo.txt')) - run_nbgrader(["generate_assignment", "ps1", "-f"]) - assert os.path.isfile(join(course_dir, "release", "ps1", "test.ipynb")) - assert os.path.isfile(join(course_dir, "release", "ps1", "data", "bar.txt")) - assert not os.path.isfile(join(course_dir, "release", "ps1", "foo.txt")) - assert not os.path.isfile(join(course_dir, "release", "ps1", "blah.pyc")) - - @pytest.mark.parametrize("groupshared", [False, True]) - def test_permissions(self, course_dir, groupshared): - """Are permissions properly set?""" - self._empty_notebook(join(course_dir, 'source', 'ps1', 'foo.ipynb')) - self._make_file(join(course_dir, 'source', 'ps1', 'foo.txt'), 'foo') - run_nbgrader(["db", "assignment", "add", "ps1"]) - with open("nbgrader_config.py", "a") as fh: - if groupshared: - fh.write("""c.CourseDirectory.groupshared = True\n""") - run_nbgrader(["generate_assignment", "ps1"]) - - if not groupshared: - if sys.platform == 'win32': - perms = '666' - else: - perms = '644' - else: - if sys.platform == 'win32': - perms = '666' - dirperms = '777' - else: - perms = '664' - dirperms = '2775' - - assert os.path.isfile(join(course_dir, "release", "ps1", "foo.ipynb")) - assert os.path.isfile(join(course_dir, "release", "ps1", "foo.txt")) - if groupshared: - # non-groupshared doesn't make guarantees about directory perms - assert self._get_permissions(join(course_dir, "release")) == dirperms - assert self._get_permissions(join(course_dir, "release", "ps1")) == dirperms - assert self._get_permissions(join(course_dir, "release", "ps1", "foo.ipynb")) == perms - assert self._get_permissions(join(course_dir, "release", "ps1", "foo.txt")) == perms - - def test_custom_permissions(self, course_dir): - """Are custom permissions properly set?""" - self._empty_notebook(join(course_dir, 'source', 'ps1', 'foo.ipynb')) - self._make_file(join(course_dir, 'source', 'ps1', 'foo.txt'), 'foo') - run_nbgrader(["db", "assignment", "add", "ps1"]) - run_nbgrader(["generate_assignment", "ps1", "--GenerateAssignment.permissions=444"]) - - assert os.path.isfile(join(course_dir, "release", "ps1", "foo.ipynb")) - assert os.path.isfile(join(course_dir, "release", "ps1", "foo.txt")) - assert self._get_permissions(join(course_dir, "release", "ps1", "foo.ipynb")) == "444" - assert self._get_permissions(join(course_dir, "release", "ps1", "foo.txt")) == "444" - - def test_add_remove_extra_notebooks(self, db, course_dir): - """Are extra notebooks added and removed?""" - self._copy_file(join("files", "test.ipynb"), join(course_dir, "source", "ps1", "test.ipynb")) - run_nbgrader(["db", "assignment", "add", "ps1", "--db", db]) - run_nbgrader(["generate_assignment", "ps1", "--db", db]) - - with Gradebook(db) as gb: - assignment = gb.find_assignment("ps1") - assert len(assignment.notebooks) == 1 - notebook1 = gb.find_notebook("test", "ps1") - - self._copy_file(join("files", "test.ipynb"), join(course_dir, "source", "ps1", "test2.ipynb")) - run_nbgrader(["generate_assignment", "ps1", "--db", db, "--force"]) - - gb.db.refresh(assignment) - assert len(assignment.notebooks) == 2 - gb.db.refresh(notebook1) - notebook2 = gb.find_notebook("test2", "ps1") - - os.remove(join(course_dir, "source", "ps1", "test2.ipynb")) - run_nbgrader(["generate_assignment", "ps1", "--db", db, "--force"]) - - gb.db.refresh(assignment) - assert len(assignment.notebooks) == 1 - gb.db.refresh(notebook1) - with pytest.raises(InvalidRequestError): - gb.db.refresh(notebook2) - - def test_add_extra_notebooks_with_submissions(self, db, course_dir): - """Is an error thrown when new notebooks are added and there are existing submissions?""" - - self._copy_file(join("files", "test.ipynb"), join(course_dir, "source", "ps1", "test.ipynb")) - run_nbgrader(["db", "assignment", "add", "ps1", "--db", db]) - run_nbgrader(["generate_assignment", "ps1", "--db", db]) - - with Gradebook(db) as gb: - assignment = gb.find_assignment("ps1") - assert len(assignment.notebooks) == 1 - - gb.add_student("hacker123") - gb.add_submission("ps1", "hacker123") - - self._copy_file(join("files", "test.ipynb"), join(course_dir, "source", "ps1", "test2.ipynb")) - run_nbgrader(["generate_assignment", "ps1", "--db", db, "--force"], retcode=1) - - def test_remove_extra_notebooks_with_submissions(self, db, course_dir): - """Is an error thrown when notebooks are removed and there are existing submissions?""" - - self._copy_file(join("files", "test.ipynb"), join(course_dir, "source", "ps1", "test.ipynb")) - self._copy_file(join("files", "test.ipynb"), join(course_dir, "source", "ps1", "test2.ipynb")) - run_nbgrader(["db", "assignment", "add", "ps1"]) - run_nbgrader(["generate_assignment", "ps1", "--db", db]) - - with Gradebook(db) as gb: - assignment = gb.find_assignment("ps1") - assert len(assignment.notebooks) == 2 - - gb.add_student("hacker123") - gb.add_submission("ps1", "hacker123") - - os.remove(join(course_dir, "source", "ps1", "test2.ipynb")) - run_nbgrader(["generate_assignment", "ps1", "--db", db, "--force"], retcode=1) - - def test_same_notebooks_with_submissions(self, db, course_dir): - """Is it ok to run nbgrader generate_assignment with the same notebooks and existing submissions?""" - - self._copy_file(join("files", "test.ipynb"), join(course_dir, "source", "ps1", "test.ipynb")) - run_nbgrader(["db", "assignment", "add", "ps1"]) - run_nbgrader(["generate_assignment", "ps1", "--db", db]) - - with Gradebook(db) as gb: - assignment = gb.find_assignment("ps1") - assert len(assignment.notebooks) == 1 - notebook = assignment.notebooks[0] - - gb.add_student("hacker123") - submission = gb.add_submission("ps1", "hacker123") - submission_notebook = submission.notebooks[0] - - run_nbgrader(["generate_assignment", "ps1", "--db", db, "--force"]) - - gb.db.refresh(assignment) - assert len(assignment.notebooks) == 1 - gb.db.refresh(notebook) - gb.db.refresh(submission) - gb.db.refresh(submission_notebook) - - def test_force_single_notebook(self, course_dir): - self._copy_file(join("files", "test.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb")) - self._copy_file(join("files", "test.ipynb"), join(course_dir, "source", "ps1", "p2.ipynb")) - run_nbgrader(["db", "assignment", "add", "ps1"]) - run_nbgrader(["generate_assignment", "ps1"]) - - assert os.path.exists(join(course_dir, "release", "ps1", "p1.ipynb")) - assert os.path.exists(join(course_dir, "release", "ps1", "p2.ipynb")) - p1 = self._file_contents(join(course_dir, "release", "ps1", "p1.ipynb")) - p2 = self._file_contents(join(course_dir, "release", "ps1", "p2.ipynb")) - assert p1 == p2 - - self._copy_file(join("files", "submitted-changed.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb")) - self._copy_file(join("files", "submitted-changed.ipynb"), join(course_dir, "source", "ps1", "p2.ipynb")) - run_nbgrader(["generate_assignment", "ps1", "--notebook", "p1", "--force"]) - - assert os.path.exists(join(course_dir, "release", "ps1", "p1.ipynb")) - assert os.path.exists(join(course_dir, "release", "ps1", "p2.ipynb")) - assert p1 != self._file_contents(join(course_dir, "release", "ps1", "p1.ipynb")) - assert p2 == self._file_contents(join(course_dir, "release", "ps1", "p2.ipynb")) - - def test_fail_no_notebooks(self): - run_nbgrader(["db", "assignment", "add", "ps1"]) - run_nbgrader(["generate_assignment", "ps1"], retcode=1) - - def test_no_metadata(self, course_dir): - self._copy_file(join("files", "test-no-metadata.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb")) - - # it should fail because of the solution and hidden test regions - run_nbgrader(["generate_assignment", "ps1", "--no-db"], retcode=1) - - # it should pass now that we're not enforcing metadata - run_nbgrader(["generate_assignment", "ps1", "--no-db", "--no-metadata"]) - assert os.path.exists(join(course_dir, "release", "ps1", "p1.ipynb")) - - def test_header(self, course_dir): - """Does the relative path to the header work?""" - self._empty_notebook(join(course_dir, 'source', 'ps1', 'foo.ipynb')) - self._empty_notebook(join(course_dir, 'source', 'header.ipynb')) - run_nbgrader(["db", "assignment", "add", "ps1", "--duedate", "2015-02-02 14:58:23.948203 America/Los_Angeles"]) - run_nbgrader(["db", "assignment", "add", "ps1"]) - run_nbgrader(["generate_assignment", "ps1"]) - assert os.path.isfile(join(course_dir, "release", "ps1", "foo.ipynb")) - - def test_trailing_slash(self, course_dir): - """Can a single file be assigned?""" - self._empty_notebook(join(course_dir, 'source', 'ps1', 'foo.ipynb')) - if sys.platform == 'win32': - trailing_slash = "\\\\" - path = course_dir.replace("\\", "\\\\") + trailing_slash - else: - trailing_slash = "/" - path = course_dir + trailing_slash - with open("nbgrader_config.py", "a") as fh: - fh.write("""c.CourseDirectory.root = "{}"\n""".format(path)) - run_nbgrader(["assign", "ps1"]) - assert os.path.isfile(join(course_dir, "release", "ps1", "foo.ipynb")) diff --git a/nbgrader/tests/apps/test_nbgrader_generate_config.py b/nbgrader/tests/apps/test_nbgrader_generate_config.py deleted file mode 100644 index 2753256e2..000000000 --- a/nbgrader/tests/apps/test_nbgrader_generate_config.py +++ /dev/null @@ -1,30 +0,0 @@ -import os - -from .. import run_nbgrader -from .base import BaseTestApp - - -class TestNbGraderGenerateConfig(BaseTestApp): - - def test_help(self): - """Does the help display without error?""" - run_nbgrader(["generate_config", "--help-all"]) - - def test_generate_config(self): - """Is the config file properly generated?""" - - # it already exists, because we create it in conftest.py - os.remove("nbgrader_config.py") - - # try recreating it - run_nbgrader(["generate_config"]) - assert os.path.isfile("nbgrader_config.py") - - with open("nbgrader_config.py") as f: - contents = f.read() - - # This was missing in issue #1089 - assert "AssignLatePenalties" in contents - - # does it fail if it already exists? - run_nbgrader(["generate_config"], retcode=1) diff --git a/nbgrader/tests/apps/test_nbgrader_generate_feedback.py b/nbgrader/tests/apps/test_nbgrader_generate_feedback.py deleted file mode 100644 index 6b59bfab9..000000000 --- a/nbgrader/tests/apps/test_nbgrader_generate_feedback.py +++ /dev/null @@ -1,348 +0,0 @@ -import os -import sys -import pytest -from os.path import join, exists, isfile - -from ...utils import remove -from .. import run_nbgrader -from .base import BaseTestApp - - -class TestNbGraderFeedback(BaseTestApp): - - def test_help(self): - """Does the help display without error?""" - run_nbgrader(["generate_feedback", "--help-all"]) - - def test_deprecated(self, db, course_dir): - """Can feedback be generated for an unchanged assignment?""" - run_nbgrader(["db", "assignment", "add", "ps1", "--db", db]) - run_nbgrader(["db", "student", "add", "foo", "--db", db]) - - self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb")) - run_nbgrader(["generate_assignment", "ps1", "--db", db]) - - self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p1.ipynb")) - run_nbgrader(["autograde", "ps1", "--db", db]) - run_nbgrader(["feedback", "ps1", "--db", db]) - - assert exists(join(course_dir, "feedback", "foo", "ps1", "p1.html")) - - def test_single_file(self, db, course_dir): - """Can feedback be generated for an unchanged assignment?""" - run_nbgrader(["db", "assignment", "add", "ps1", "--db", db]) - run_nbgrader(["db", "student", "add", "foo", "--db", db]) - - self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb")) - run_nbgrader(["assign", "ps1", "--db", db]) - - self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p1.ipynb")) - run_nbgrader(["autograde", "ps1", "--db", db]) - run_nbgrader(["generate_feedback", "ps1", "--db", db]) - - assert exists(join(course_dir, "feedback", "foo", "ps1", "p1.html")) - - def test_student_id_exclude(self, db, course_dir): - """Does --CourseDirectory.student_id_exclude=X exclude students?""" - run_nbgrader(["db", "assignment", "add", "ps1", "--db", db]) - run_nbgrader(["db", "student", "add", "foo", "--db", db]) - run_nbgrader(["db", "student", "add", "bar", "--db", db]) - run_nbgrader(["db", "student", "add", "baz", "--db", db]) - self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb")) - run_nbgrader(["assign", "ps1", "--db", db]) - - for student in ["foo", "bar", "baz"]: - self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "submitted", student, "ps1", "p1.ipynb")) - run_nbgrader(["autograde", "ps1", "--db", db]) - run_nbgrader(["generate_feedback", "ps1", "--db", db, "--CourseDirectory.student_id_exclude=bar,baz"]) - - for student in ["foo", "bar", "baz"]: - assert exists(join(course_dir, "autograded", "foo", "ps1", "p1.ipynb")) - - assert exists(join(course_dir, "feedback", "foo", "ps1", "p1.html")) - assert not exists(join(course_dir, "feedback", "bar", "ps1", "p1.html")) - assert not exists(join(course_dir, "feedback", "baz", "ps1", "p1.html")) - - - def test_force(self, db, course_dir): - """Ensure the force option works properly""" - run_nbgrader(["db", "assignment", "add", "ps1", "--db", db]) - run_nbgrader(["db", "student", "add", "foo", "--db", db]) - - self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb")) - self._make_file(join(course_dir, "source", "ps1", "foo.txt"), "foo") - self._make_file(join(course_dir, "source", "ps1", "data", "bar.txt"), "bar") - run_nbgrader(["generate_assignment", "ps1", "--db", db]) - - self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p1.ipynb")) - self._make_file(join(course_dir, "submitted", "foo", "ps1", "foo.txt"), "foo") - self._make_file(join(course_dir, "submitted", "foo", "ps1", "data", "bar.txt"), "bar") - run_nbgrader(["autograde", "ps1", "--db", db]) - - self._make_file(join(course_dir, "autograded", "foo", "ps1", "blah.pyc"), "asdf") - run_nbgrader(["generate_feedback", "ps1", "--db", db]) - - assert isfile(join(course_dir, "feedback", "foo", "ps1", "p1.html")) - assert isfile(join(course_dir, "feedback", "foo", "ps1", "foo.txt")) - assert isfile(join(course_dir, "feedback", "foo", "ps1", "data", "bar.txt")) - assert not isfile(join(course_dir, "feedback", "foo", "ps1", "blah.pyc")) - - # check that it skips the existing directory - remove(join(course_dir, "feedback", "foo", "ps1", "foo.txt")) - run_nbgrader(["generate_feedback", "ps1", "--db", db]) - assert not isfile(join(course_dir, "feedback", "foo", "ps1", "foo.txt")) - - # force overwrite the supplemental files - run_nbgrader(["generate_feedback", "ps1", "--db", db, "--force"]) - assert isfile(join(course_dir, "feedback", "foo", "ps1", "foo.txt")) - - # force overwrite - remove(join(course_dir, "autograded", "foo", "ps1", "foo.txt")) - run_nbgrader(["generate_feedback", "ps1", "--db", db, "--force"]) - assert isfile(join(course_dir, "feedback", "foo", "ps1", "p1.html")) - assert not isfile(join(course_dir, "feedback", "foo", "ps1", "foo.txt")) - assert isfile(join(course_dir, "feedback", "foo", "ps1", "data", "bar.txt")) - assert not isfile(join(course_dir, "feedback", "foo", "ps1", "blah.pyc")) - - def test_force_f(self, db, course_dir): - """Ensure the force option works properly""" - run_nbgrader(["db", "assignment", "add", "ps1", "--db", db]) - run_nbgrader(["db", "student", "add", "foo", "--db", db]) - - self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb")) - self._make_file(join(course_dir, "source", "ps1", "foo.txt"), "foo") - self._make_file(join(course_dir, "source", "ps1", "data", "bar.txt"), "bar") - run_nbgrader(["generate_assignment", "ps1", "--db", db]) - - self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p1.ipynb")) - self._make_file(join(course_dir, "submitted", "foo", "ps1", "foo.txt"), "foo") - self._make_file(join(course_dir, "submitted", "foo", "ps1", "data", "bar.txt"), "bar") - run_nbgrader(["autograde", "ps1", "--db", db]) - - self._make_file(join(course_dir, "autograded", "foo", "ps1", "blah.pyc"), "asdf") - run_nbgrader(["generate_feedback", "ps1", "--db", db]) - - assert isfile(join(course_dir, "feedback", "foo", "ps1", "p1.html")) - assert isfile(join(course_dir, "feedback", "foo", "ps1", "foo.txt")) - assert isfile(join(course_dir, "feedback", "foo", "ps1", "data", "bar.txt")) - assert not isfile(join(course_dir, "feedback", "foo", "ps1", "blah.pyc")) - - # check that it skips the existing directory - remove(join(course_dir, "feedback", "foo", "ps1", "foo.txt")) - run_nbgrader(["generate_feedback", "ps1", "--db", db]) - assert not isfile(join(course_dir, "feedback", "foo", "ps1", "foo.txt")) - - # force overwrite the supplemental files - run_nbgrader(["generate_feedback", "ps1", "--db", db, "-f"]) - assert isfile(join(course_dir, "feedback", "foo", "ps1", "foo.txt")) - - # force overwrite - remove(join(course_dir, "autograded", "foo", "ps1", "foo.txt")) - run_nbgrader(["generate_feedback", "ps1", "--db", db, "--force"]) - assert isfile(join(course_dir, "feedback", "foo", "ps1", "p1.html")) - assert not isfile(join(course_dir, "feedback", "foo", "ps1", "foo.txt")) - assert isfile(join(course_dir, "feedback", "foo", "ps1", "data", "bar.txt")) - assert not isfile(join(course_dir, "feedback", "foo", "ps1", "blah.pyc")) - - def test_filter_notebook(self, db, course_dir): - """Does feedback filter by notebook properly?""" - run_nbgrader(["db", "assignment", "add", "ps1", "--db", db]) - run_nbgrader(["db", "student", "add", "foo", "--db", db]) - - self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb")) - self._make_file(join(course_dir, "source", "ps1", "foo.txt"), "foo") - self._make_file(join(course_dir, "source", "ps1", "data", "bar.txt"), "bar") - run_nbgrader(["generate_assignment", "ps1", "--db", db]) - - self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p1.ipynb")) - self._make_file(join(course_dir, "submitted", "foo", "ps1", "foo.txt"), "foo") - self._make_file(join(course_dir, "submitted", "foo", "ps1", "data", "bar.txt"), "bar") - self._make_file(join(course_dir, "submitted", "foo", "ps1", "blah.pyc"), "asdf") - run_nbgrader(["autograde", "ps1", "--db", db]) - run_nbgrader(["generate_feedback", "ps1", "--db", db, "--notebook", "p1"]) - - assert isfile(join(course_dir, "feedback", "foo", "ps1", "p1.html")) - assert isfile(join(course_dir, "feedback", "foo", "ps1", "foo.txt")) - assert isfile(join(course_dir, "feedback", "foo", "ps1", "data", "bar.txt")) - assert not isfile(join(course_dir, "feedback", "foo", "ps1", "blah.pyc")) - - # check that removing the notebook still causes it to run - remove(join(course_dir, "feedback", "foo", "ps1", "p1.html")) - remove(join(course_dir, "feedback", "foo", "ps1", "foo.txt")) - run_nbgrader(["generate_feedback", "ps1", "--db", db, "--notebook", "p1"]) - - assert isfile(join(course_dir, "feedback", "foo", "ps1", "p1.html")) - assert isfile(join(course_dir, "feedback", "foo", "ps1", "foo.txt")) - assert isfile(join(course_dir, "feedback", "foo", "ps1", "data", "bar.txt")) - assert not isfile(join(course_dir, "feedback", "foo", "ps1", "blah.pyc")) - - # check that running it again doesn"t do anything - remove(join(course_dir, "feedback", "foo", "ps1", "foo.txt")) - run_nbgrader(["generate_feedback", "ps1", "--db", db, "--notebook", "p1"]) - - assert isfile(join(course_dir, "feedback", "foo", "ps1", "p1.html")) - assert not isfile(join(course_dir, "feedback", "foo", "ps1", "foo.txt")) - assert isfile(join(course_dir, "feedback", "foo", "ps1", "data", "bar.txt")) - assert not isfile(join(course_dir, "feedback", "foo", "ps1", "blah.pyc")) - - # check that removing the notebook doesn"t cause it to run - remove(join(course_dir, "feedback", "foo", "ps1", "p1.html")) - run_nbgrader(["generate_feedback", "ps1", "--db", db]) - - assert not isfile(join(course_dir, "feedback", "foo", "ps1", "p1.html")) - assert not isfile(join(course_dir, "feedback", "foo", "ps1", "foo.txt")) - assert isfile(join(course_dir, "feedback", "foo", "ps1", "data", "bar.txt")) - assert not isfile(join(course_dir, "feedback", "foo", "ps1", "blah.pyc")) - - @pytest.mark.parametrize("groupshared", [False, True]) - def test_permissions(self, course_dir, groupshared): - """Are permissions properly set?""" - run_nbgrader(["db", "assignment", "add", "ps1"]) - run_nbgrader(["db", "student", "add", "foo"]) - with open("nbgrader_config.py", "a") as fh: - if groupshared: - fh.write("""c.CourseDirectory.groupshared = True\n""") - self._empty_notebook(join(course_dir, "source", "ps1", "foo.ipynb")) - run_nbgrader(["generate_assignment", "ps1"]) - - self._empty_notebook(join(course_dir, "submitted", "foo", "ps1", "foo.ipynb")) - run_nbgrader(["autograde", "ps1"]) - run_nbgrader(["generate_feedback", "ps1"]) - - if not groupshared: - if sys.platform == 'win32': - perms = '666' - else: - perms = '644' - else: - if sys.platform == 'win32': - perms = '666' - dirperms = '777' - else: - perms = '664' - dirperms = '2775' - - assert isfile(join(course_dir, "feedback", "foo", "ps1", "foo.html")) - if groupshared: - # non-groupshared doesn't guarantee anything about directory perms - assert self._get_permissions(join(course_dir, "feedback", "foo", "ps1")) == dirperms - assert self._get_permissions(join(course_dir, "feedback", "foo", "ps1", "foo.html")) == perms - - def test_custom_permissions(self, course_dir): - """Are custom permissions properly set?""" - run_nbgrader(["db", "assignment", "add", "ps1"]) - run_nbgrader(["db", "student", "add", "foo"]) - self._empty_notebook(join(course_dir, "source", "ps1", "foo.ipynb")) - run_nbgrader(["generate_assignment", "ps1"]) - - self._empty_notebook(join(course_dir, "submitted", "foo", "ps1", "foo.ipynb")) - run_nbgrader(["autograde", "ps1"]) - run_nbgrader(["generate_feedback", "ps1", "--GenerateFeedback.permissions=444"]) - - assert isfile(join(course_dir, "feedback", "foo", "ps1", "foo.html")) - assert self._get_permissions(join(course_dir, "feedback", "foo", "ps1", "foo.html")) == '444' - - def test_force_single_notebook(self, course_dir): - run_nbgrader(["db", "assignment", "add", "ps1"]) - run_nbgrader(["db", "student", "add", "foo"]) - self._copy_file(join("files", "test.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb")) - self._copy_file(join("files", "test.ipynb"), join(course_dir, "source", "ps1", "p2.ipynb")) - run_nbgrader(["generate_assignment", "ps1"]) - - self._copy_file(join("files", "test.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p1.ipynb")) - self._copy_file(join("files", "test.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p2.ipynb")) - run_nbgrader(["autograde", "ps1"]) - run_nbgrader(["generate_feedback", "ps1"]) - - assert exists(join(course_dir, "feedback", "foo", "ps1", "p1.html")) - assert exists(join(course_dir, "feedback", "foo", "ps1", "p2.html")) - p1 = self._file_contents(join(course_dir, "feedback", "foo", "ps1", "p1.html")) - p2 = self._file_contents(join(course_dir, "feedback", "foo", "ps1", "p2.html")) - - self._empty_notebook(join(course_dir, "autograded", "foo", "ps1", "p1.ipynb")) - self._empty_notebook(join(course_dir, "autograded", "foo", "ps1", "p2.ipynb")) - run_nbgrader(["generate_feedback", "ps1", "--notebook", "p1", "--force"]) - - assert exists(join(course_dir, "feedback", "foo", "ps1", "p1.html")) - assert exists(join(course_dir, "feedback", "foo", "ps1", "p2.html")) - assert p1 != self._file_contents(join(course_dir, "feedback", "foo", "ps1", "p1.html")) - assert p2 == self._file_contents(join(course_dir, "feedback", "foo", "ps1", "p2.html")) - - def test_update_newer(self, course_dir): - run_nbgrader(["db", "assignment", "add", "ps1"]) - run_nbgrader(["db", "student", "add", "foo"]) - self._copy_file(join("files", "test.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb")) - run_nbgrader(["generate_assignment", "ps1"]) - - self._copy_file(join("files", "test.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p1.ipynb")) - self._make_file(join(course_dir, "submitted", "foo", "ps1", "timestamp.txt"), "2015-02-02 15:58:23.948203 America/Los_Angeles") - run_nbgrader(["autograde", "ps1"]) - run_nbgrader(["generate_feedback", "ps1"]) - - assert isfile(join(course_dir, "feedback", "foo", "ps1", "p1.html")) - assert isfile(join(course_dir, "feedback", "foo", "ps1", "timestamp.txt")) - assert self._file_contents(join(course_dir, "feedback", "foo", "ps1", "timestamp.txt")) == "2015-02-02 15:58:23.948203 America/Los_Angeles" - p = self._file_contents(join(course_dir, "feedback", "foo", "ps1", "p1.html")) - - self._empty_notebook(join(course_dir, "autograded", "foo", "ps1", "p1.ipynb")) - self._make_file(join(course_dir, "autograded", "foo", "ps1", "timestamp.txt"), "2015-02-02 16:58:23.948203 America/Los_Angeles") - run_nbgrader(["generate_feedback", "ps1"]) - - assert isfile(join(course_dir, "feedback", "foo", "ps1", "p1.html")) - assert isfile(join(course_dir, "feedback", "foo", "ps1", "timestamp.txt")) - assert self._file_contents(join(course_dir, "feedback", "foo", "ps1", "timestamp.txt")) == "2015-02-02 16:58:23.948203 America/Los_Angeles" - assert p != self._file_contents(join(course_dir, "feedback", "foo", "ps1", "p1.html")) - - def test_update_newer_single_notebook(self, course_dir): - run_nbgrader(["db", "assignment", "add", "ps1"]) - run_nbgrader(["db", "student", "add", "foo"]) - - self._copy_file(join("files", "test.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb")) - self._copy_file(join("files", "test.ipynb"), join(course_dir, "source", "ps1", "p2.ipynb")) - run_nbgrader(["generate_assignment", "ps1"]) - - self._copy_file(join("files", "test.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p1.ipynb")) - self._copy_file(join("files", "test.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p2.ipynb")) - self._make_file(join(course_dir, "submitted", "foo", "ps1", "timestamp.txt"), "2015-02-02 15:58:23.948203 America/Los_Angeles") - run_nbgrader(["autograde", "ps1"]) - run_nbgrader(["generate_feedback", "ps1"]) - - assert exists(join(course_dir, "feedback", "foo", "ps1", "p1.html")) - assert exists(join(course_dir, "feedback", "foo", "ps1", "p2.html")) - assert isfile(join(course_dir, "feedback", "foo", "ps1", "timestamp.txt")) - assert self._file_contents(join(course_dir, "feedback", "foo", "ps1", "timestamp.txt")) == "2015-02-02 15:58:23.948203 America/Los_Angeles" - p1 = self._file_contents(join(course_dir, "feedback", "foo", "ps1", "p1.html")) - p2 = self._file_contents(join(course_dir, "feedback", "foo", "ps1", "p2.html")) - - self._empty_notebook(join(course_dir, "autograded", "foo", "ps1", "p1.ipynb")) - self._empty_notebook(join(course_dir, "autograded", "foo", "ps1", "p2.ipynb")) - self._make_file(join(course_dir, "autograded", "foo", "ps1", "timestamp.txt"), "2015-02-02 16:58:23.948203 America/Los_Angeles") - run_nbgrader(["generate_feedback", "ps1", "--notebook", "p1"]) - - assert exists(join(course_dir, "feedback", "foo", "ps1", "p1.html")) - assert exists(join(course_dir, "feedback", "foo", "ps1", "p2.html")) - assert isfile(join(course_dir, "feedback", "foo", "ps1", "timestamp.txt")) - assert self._file_contents(join(course_dir, "feedback", "foo", "ps1", "timestamp.txt")) == "2015-02-02 16:58:23.948203 America/Los_Angeles" - assert p1 != self._file_contents(join(course_dir, "feedback", "foo", "ps1", "p1.html")) - assert p2 == self._file_contents(join(course_dir, "feedback", "foo", "ps1", "p2.html")) - - def test_single_user(self, course_dir): - run_nbgrader(["db", "assignment", "add", "ps1", "--duedate", - "2015-02-02 14:58:23.948203 America/Los_Angeles"]) - run_nbgrader(["db", "student", "add", "foo"]) - run_nbgrader(["db", "student", "add", "bar"]) - self._copy_file(join("files", "test.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb")) - self._copy_file(join("files", "test.ipynb"), join(course_dir, "source", "ps1", "p2.ipynb")) - run_nbgrader(["assign", "ps1"]) - - self._copy_file(join("files", "test.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p1.ipynb")) - self._copy_file(join("files", "test.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p2.ipynb")) - self._copy_file(join("files", "test.ipynb"), join(course_dir, "submitted", "bar", "ps1", "p1.ipynb")) - self._copy_file(join("files", "test.ipynb"), join(course_dir, "submitted", "bar", "ps1", "p2.ipynb")) - run_nbgrader(["autograde", "ps1"]) - run_nbgrader(["generate_feedback", "ps1", "--student", "foo"]) - - assert exists(join(course_dir, "feedback", "foo", "ps1", "p1.html")) - assert exists(join(course_dir, "feedback", "foo", "ps1", "p2.html")) - assert not exists(join(course_dir, "feedback", "bar", "ps1", "p1.html")) - assert not exists(join(course_dir, "feedback", "bar", "ps1", "p2.html")) diff --git a/nbgrader/tests/apps/test_nbgrader_generate_solution.py b/nbgrader/tests/apps/test_nbgrader_generate_solution.py deleted file mode 100644 index 59058e659..000000000 --- a/nbgrader/tests/apps/test_nbgrader_generate_solution.py +++ /dev/null @@ -1,142 +0,0 @@ -import os -import sys -import pytest -import traitlets - -from os.path import join -from sqlalchemy.exc import InvalidRequestError -from textwrap import dedent - -from ...api import Gradebook -from .. import run_nbgrader -from .base import BaseTestApp - - -class TestNbGraderGenerateSolution(BaseTestApp): - - def test_help(self): - """Does the help display without error?""" - run_nbgrader(["generate_solution", "--help-all"]) - - def test_no_args(self): - """Is there an error if no arguments are given?""" - run_nbgrader(["generate_solution"], retcode=1) - - def test_conflicting_args(self): - """Is there an error if assignment is specified both in config and as an argument?""" - run_nbgrader(["generate_solution", "--assignment", "foo", "foo"], retcode=1) - - def test_multiple_args(self): - """Is there an error if multiple arguments are given?""" - run_nbgrader(["generate_solution", "foo", "bar"], retcode=1) - - def test_no_assignment(self, course_dir): - """If an assignment does not exists it fails""" - self._empty_notebook(join(course_dir, 'source', 'ps1', 'foo.ipynb')) - run_nbgrader(["generate_solution", "ps1"], retcode=1) - - def test_assignment(self, course_dir): - self._empty_notebook(join(course_dir, 'source', 'ps1', 'foo.ipynb')) - run_nbgrader(["db", "assignment", "add", "ps1"]) - run_nbgrader(["generate_solution", "ps1"]) - assert os.path.isfile(join(course_dir, "solution", "ps1", "foo.ipynb")) - - def test_single_file_bad_assignment_name(self, course_dir, temp_cwd): - """Test that an error is thrown when the assignment name is invalid.""" - self._empty_notebook(join(course_dir, 'source', 'foo+bar', 'foo.ipynb')) - with pytest.raises(traitlets.TraitError): - run_nbgrader(["generate_solution", "foo+bar"]) - assert not os.path.isfile(join(course_dir, "solution", "foo+bar", "foo.ipynb")) - - def test_multiple_files(self, course_dir): - """Can multiple files be assigned?""" - self._empty_notebook(join(course_dir, 'source', 'ps1', 'foo.ipynb')) - self._empty_notebook(join(course_dir, 'source', 'ps1', 'bar.ipynb')) - run_nbgrader(["db", "assignment", "add", "ps1"]) - run_nbgrader(["generate_solution", "ps1"]) - assert os.path.isfile(join(course_dir, 'solution', 'ps1', 'foo.ipynb')) - assert os.path.isfile(join(course_dir, 'solution', 'ps1', 'bar.ipynb')) - - def test_dependent_files(self, course_dir): - """Are dependent files properly linked?""" - self._make_file(join(course_dir, 'source', 'ps1', 'data', 'foo.csv'), 'foo') - self._make_file(join(course_dir, 'source', 'ps1', 'data', 'bar.csv'), 'bar') - self._empty_notebook(join(course_dir, 'source', 'ps1', 'foo.ipynb')) - self._empty_notebook(join(course_dir, 'source', 'ps1', 'bar.ipynb')) - run_nbgrader(["db", "assignment", "add", "ps1"]) - run_nbgrader(["generate_solution", "ps1"]) - - assert os.path.isfile(join(course_dir, 'solution', 'ps1', 'foo.ipynb')) - assert os.path.isfile(join(course_dir, 'solution', 'ps1', 'bar.ipynb')) - assert os.path.isfile(join(course_dir, 'solution', 'ps1', 'data', 'foo.csv')) - assert os.path.isfile(join(course_dir, 'solution', 'ps1', 'data', 'bar.csv')) - - with open(join(course_dir, 'solution', 'ps1', 'data', 'foo.csv'), 'r') as fh: - assert fh.read() == 'foo' - with open(join(course_dir, 'solution', 'ps1', 'data', 'bar.csv'), 'r') as fh: - assert fh.read() == 'bar' - - def test_force(self, course_dir): - """Ensure the force option works properly""" - self._copy_file(join('files', 'test.ipynb'), join(course_dir, 'source', 'ps1', 'test.ipynb')) - self._make_file(join(course_dir, 'source', 'ps1', 'foo.txt'), "foo") - self._make_file(join(course_dir, 'source', 'ps1', 'data', 'bar.txt'), "bar") - self._make_file(join(course_dir, 'source', 'ps1', 'blah.pyc'), "asdf") - run_nbgrader(["db", "assignment", "add", "ps1"]) - run_nbgrader(["generate_solution", "ps1"]) - assert os.path.isfile(join(course_dir, 'solution', 'ps1', 'test.ipynb')) - assert os.path.isfile(join(course_dir, 'solution', 'ps1', 'foo.txt')) - assert os.path.isfile(join(course_dir, 'solution', 'ps1', 'data', 'bar.txt')) - assert not os.path.isfile(join(course_dir, 'solution', 'ps1', 'blah.pyc')) - - # check that it skips the existing directory - os.remove(join(course_dir, 'solution', 'ps1', 'foo.txt')) - run_nbgrader(["generate_solution", "ps1"]) - assert not os.path.isfile(join(course_dir, 'solution', 'ps1', 'foo.txt')) - - # force overwrite the supplemental files - run_nbgrader(["generate_solution", "ps1", "--force"]) - assert os.path.isfile(join(course_dir, 'solution', 'ps1', 'foo.txt')) - - # force overwrite - os.remove(join(course_dir, 'source', 'ps1', 'foo.txt')) - run_nbgrader(["generate_solution", "ps1", "--force"]) - assert os.path.isfile(join(course_dir, "solution", "ps1", "test.ipynb")) - assert os.path.isfile(join(course_dir, "solution", "ps1", "data", "bar.txt")) - assert not os.path.isfile(join(course_dir, "solution", "ps1", "foo.txt")) - assert not os.path.isfile(join(course_dir, "solution", "ps1", "blah.pyc")) - - def test_force_f(self, course_dir): - """Ensure the force option works properly""" - self._copy_file(join('files', 'test.ipynb'), join(course_dir, 'source', 'ps1', 'test.ipynb')) - self._make_file(join(course_dir, 'source', 'ps1', 'foo.txt'), "foo") - self._make_file(join(course_dir, 'source', 'ps1', 'data', 'bar.txt'), "bar") - self._make_file(join(course_dir, 'source', 'ps1', 'blah.pyc'), "asdf") - run_nbgrader(["db", "assignment", "add", "ps1"]) - run_nbgrader(["generate_solution", "ps1"]) - assert os.path.isfile(join(course_dir, 'solution', 'ps1', 'test.ipynb')) - assert os.path.isfile(join(course_dir, 'solution', 'ps1', 'foo.txt')) - assert os.path.isfile(join(course_dir, 'solution', 'ps1', 'data', 'bar.txt')) - assert not os.path.isfile(join(course_dir, 'solution', 'ps1', 'blah.pyc')) - - # check that it skips the existing directory - os.remove(join(course_dir, 'solution', 'ps1', 'foo.txt')) - run_nbgrader(["generate_solution", "ps1"]) - assert not os.path.isfile(join(course_dir, 'solution', 'ps1', 'foo.txt')) - - # force overwrite the supplemental files - run_nbgrader(["generate_solution", "ps1", "-f"]) - assert os.path.isfile(join(course_dir, 'solution', 'ps1', 'foo.txt')) - - # force overwrite - os.remove(join(course_dir, 'source', 'ps1', 'foo.txt')) - run_nbgrader(["generate_solution", "ps1", "-f"]) - assert os.path.isfile(join(course_dir, "solution", "ps1", "test.ipynb")) - assert os.path.isfile(join(course_dir, "solution", "ps1", "data", "bar.txt")) - assert not os.path.isfile(join(course_dir, "solution", "ps1", "foo.txt")) - assert not os.path.isfile(join(course_dir, "solution", "ps1", "blah.pyc")) - - def test_fail_no_notebooks(self): - run_nbgrader(["db", "assignment", "add", "ps1"]) - run_nbgrader(["generate_solution", "ps1"], retcode=1) - diff --git a/nbgrader/tests/apps/test_nbgrader_list.py b/nbgrader/tests/apps/test_nbgrader_list.py deleted file mode 100644 index 2018f7a47..000000000 --- a/nbgrader/tests/apps/test_nbgrader_list.py +++ /dev/null @@ -1,500 +0,0 @@ -import os -import time - -from textwrap import dedent - -from .. import run_nbgrader -from .base import BaseTestApp -from .conftest import notwindows - -from ...utils import get_username - - -@notwindows -class TestNbGraderList(BaseTestApp): - - def _release(self, assignment, exchange, cache, course_dir, course="abc101"): - self._copy_file(os.path.join("files", "test.ipynb"), os.path.join(course_dir, "release", assignment, "p1.ipynb")) - run_nbgrader([ - "release_assignment", assignment, - "--course", course, - "--CourseDirectory.root={}".format(course_dir), - "--Exchange.cache={}".format(cache), - "--Exchange.root={}".format(exchange) - ]) - - def _release_full(self, assignment, exchange, cache, course_dir, course="abc101"): - self._copy_file(os.path.join("files", "test.ipynb"), os.path.join(course_dir, "source", assignment, "p1.ipynb")) - run_nbgrader([ - "generate_assignment", assignment, - "--course", course, - "--CourseDirectory.root={}".format(course_dir), - "--Exchange.cache={}".format(cache), - "--Exchange.root={}".format(exchange) - ]) - run_nbgrader([ - "release_assignment", assignment, - "--course", course, - "--CourseDirectory.root={}".format(course_dir), - "--Exchange.cache={}".format(cache), - "--Exchange.root={}".format(exchange) - ]) - - def _fetch(self, assignment, exchange, cache, course="abc101", flags=None): - cmd = [ - "fetch_assignment", assignment, - "--course", course, - "--Exchange.cache={}".format(cache), - "--Exchange.root={}".format(exchange) - ] - - if flags is not None: - cmd.extend(flags) - - run_nbgrader(cmd) - - def _submit(self, assignment, exchange, cache, course="abc101", flags=None): - cmd = [ - "submit", assignment, - "--course", course, - "--Exchange.cache={}".format(cache), - "--Exchange.root={}".format(exchange) - ] - - if flags is not None: - cmd.extend(flags) - - run_nbgrader(cmd) - - def _make_feedback(self, assignment, exchange, cache, course_dir, course="abc101"): - run_nbgrader([ - "collect", assignment, - "--update", - "--course", course, - "--CourseDirectory.root={}".format(course_dir), - "--Exchange.cache={}".format(cache), - "--Exchange.root={}".format(exchange) - ]) - run_nbgrader([ - "autograde", assignment, - "--course", course, - "--CourseDirectory.root={}".format(course_dir), - "--Exchange.cache={}".format(cache), - "--Exchange.root={}".format(exchange) - ]) - run_nbgrader([ - "generate_feedback", assignment, - "--course", course, - "--CourseDirectory.root={}".format(course_dir), - "--Exchange.cache={}".format(cache), - "--Exchange.root={}".format(exchange) - ]) - run_nbgrader([ - "release_feedback", assignment, - "--course", course, - "--CourseDirectory.root={}".format(course_dir), - "--Exchange.cache={}".format(cache), - "--Exchange.root={}".format(exchange) - ]) - - def _fetch_feedback(self, assignment, exchange, cache, course="abc101", flags=None): - cmd = [ - "fetch_feedback", assignment, - "--course", course, - "--Exchange.cache={}".format(cache), - "--Exchange.root={}".format(exchange) - ] - - if flags is not None: - cmd.extend(flags) - - run_nbgrader(cmd) - - def _list(self, exchange, cache, assignment=None, flags=None, retcode=0): - cmd = [ - "list", - "--Exchange.cache={}".format(cache), - "--Exchange.root={}".format(exchange), - ] - - if flags is not None: - cmd.extend(flags) - if assignment is not None: - cmd.append(assignment) - - return run_nbgrader(cmd, retcode=retcode, stdout=False) - - def test_help(self): - """Does the help display without error?""" - run_nbgrader(["list", "--help-all"]) - - def test_list_released(self, exchange, cache, course_dir, fake_home_dir): - self._release("ps1", exchange, cache, course_dir) - self._release("ps1", exchange, cache, course_dir, course="xyz200") - output = self._list(exchange, cache, "ps1", flags=["--course", "abc101"]) - assert output == dedent( - """ - [ListApp | INFO] Released assignments: - [ListApp | INFO] abc101 ps1 - """ - ).lstrip() - assert self._list(exchange, cache, "ps1", flags=["--course", "xyz200"]) == dedent( - """ - [ListApp | INFO] Released assignments: - [ListApp | INFO] xyz200 ps1 - """ - ).lstrip() - assert self._list(exchange, cache, "ps1") == dedent( - """ - [ListApp | INFO] Released assignments: - [ListApp | INFO] abc101 ps1 - [ListApp | INFO] xyz200 ps1 - """ - ).lstrip() - - self._release("ps2", exchange, cache, course_dir) - self._release("ps2", exchange, cache, course_dir, course="xyz200") - assert self._list(exchange, cache, "ps2") == dedent( - """ - [ListApp | INFO] Released assignments: - [ListApp | INFO] abc101 ps2 - [ListApp | INFO] xyz200 ps2 - """ - ).lstrip() - - assert self._list(exchange, cache) == dedent( - """ - [ListApp | INFO] Released assignments: - [ListApp | INFO] abc101 ps1 - [ListApp | INFO] abc101 ps2 - [ListApp | INFO] xyz200 ps1 - [ListApp | INFO] xyz200 ps2 - """ - ).lstrip() - - def test_list_fetched(self, exchange, cache, course_dir): - self._release("ps1", exchange, cache, course_dir) - self._release("ps2", exchange, cache, course_dir) - self._fetch("ps1", exchange, cache) - assert self._list(exchange, cache) == dedent( - """ - [ListApp | INFO] Released assignments: - [ListApp | INFO] abc101 ps1 (already downloaded) - [ListApp | INFO] abc101 ps2 - """ - ).lstrip() - - def test_list_remove_outbound(self, exchange, cache, course_dir): - self._release("ps1", exchange, cache, course_dir) - self._release("ps2", exchange, cache, course_dir) - self._list(exchange, cache, "ps1", flags=["--remove"]) - assert self._list(exchange, cache) == dedent( - """ - [ListApp | INFO] Released assignments: - [ListApp | INFO] abc101 ps2 - """ - ).lstrip() - - self._list(exchange, cache, "ps2", flags=["--remove"]) - assert self._list(exchange, cache, "ps2") == dedent( - """ - [ListApp | INFO] Released assignments: - """ - ).lstrip() - - def test_list_inbound(self, exchange, cache, course_dir): - self._release("ps1", exchange, cache, course_dir) - - assert self._list(exchange, cache, "ps1", flags=["--inbound"]) == dedent( - """ - [ListApp | INFO] Submitted assignments: - """ - ).lstrip() - - self._fetch("ps1", exchange, cache) - self._submit("ps1", exchange, cache) - filename, = os.listdir(os.path.join(exchange, "abc101", "inbound")) - timestamp = filename.split("+")[2] - assert self._list(exchange, cache, "ps1", flags=["--inbound"]) == dedent( - """ - [ListApp | INFO] Submitted assignments: - [ListApp | INFO] abc101 {} ps1 {} (no feedback available) - """.format(get_username(), timestamp) - ).lstrip() - - time.sleep(1) - self._submit("ps1", exchange, cache) - filenames = sorted(os.listdir(os.path.join(exchange, "abc101", "inbound"))) - timestamps = [x.split("+")[2] for x in filenames] - assert self._list(exchange, cache, "ps1", flags=["--inbound"]) == dedent( - """ - [ListApp | INFO] Submitted assignments: - [ListApp | INFO] abc101 {} ps1 {} (no feedback available) - [ListApp | INFO] abc101 {} ps1 {} (no feedback available) - """.format(get_username(), timestamps[0], get_username(), timestamps[1]) - ).lstrip() - - def test_list_inbound_no_random_string(self, exchange, cache, course_dir): - self._release("ps1", exchange, cache, course_dir) - - assert self._list(exchange, cache, "ps1", flags=["--inbound"]) == dedent( - """ - [ListApp | INFO] Submitted assignments: - """ - ).lstrip() - - self._fetch("ps1", exchange, cache) - self._submit("ps1", exchange, cache, flags=["--ExchangeSubmit.add_random_string=False"]) - filename, = os.listdir(os.path.join(exchange, "abc101", "inbound")) - timestamp = filename.split("+")[2] - assert self._list(exchange, cache, "ps1", flags=["--inbound"]) == dedent( - """ - [ListApp | INFO] Submitted assignments: - [ListApp | INFO] abc101 {} ps1 {} (no feedback available) - """.format(get_username(), timestamp) - ).lstrip() - - time.sleep(1) - self._submit("ps1", exchange, cache, flags=["--ExchangeSubmit.add_random_string=False"]) - filenames = sorted(os.listdir(os.path.join(exchange, "abc101", "inbound"))) - timestamps = [x.split("+")[2] for x in filenames] - assert self._list(exchange, cache, "ps1", flags=["--inbound"]) == dedent( - """ - [ListApp | INFO] Submitted assignments: - [ListApp | INFO] abc101 {} ps1 {} (no feedback available) - [ListApp | INFO] abc101 {} ps1 {} (no feedback available) - """.format(get_username(), timestamps[0], get_username(), timestamps[1]) - ).lstrip() - - def test_list_cached(self, exchange, cache, course_dir): - self._release("ps1", exchange, cache, course_dir) - - assert self._list(exchange, cache, "ps1", flags=["--cached"]) == dedent( - """ - [ListApp | INFO] Submitted assignments: - """ - ).lstrip() - - self._fetch("ps1", exchange, cache) - self._submit("ps1", exchange, cache) - filename, = os.listdir(os.path.join(cache, "abc101")) - timestamp = filename.split("+")[2] - assert self._list(exchange, cache, "ps1", flags=["--cached"]) == dedent( - """ - [ListApp | INFO] Submitted assignments: - [ListApp | INFO] abc101 {} ps1 {} (no feedback available) - """.format(get_username(), timestamp) - ).lstrip() - - time.sleep(1) - self._submit("ps1", exchange, cache) - self._list(exchange, cache, "ps1", flags=["--inbound", "--remove"]) - filenames = sorted(os.listdir(os.path.join(cache, "abc101"))) - timestamps = [x.split("+")[2] for x in filenames] - assert self._list(exchange, cache, "ps1", flags=["--cached"]) == dedent( - """ - [ListApp | INFO] Submitted assignments: - [ListApp | INFO] abc101 {} ps1 {} (no feedback available) - [ListApp | INFO] abc101 {} ps1 {} (no feedback available) - """.format(get_username(), timestamps[0], get_username(), timestamps[1]) - ).lstrip() - - def test_list_remove_inbound(self, exchange, cache, course_dir): - self._release("ps1", exchange, cache, course_dir) - self._fetch("ps1", exchange, cache) - self._release("ps2", exchange, cache, course_dir) - self._fetch("ps2", exchange, cache) - - self._submit("ps1", exchange, cache) - self._submit("ps2", exchange, cache) - filenames = sorted(os.listdir(os.path.join(exchange, "abc101", "inbound"))) - timestamps = [x.split("+")[2] for x in filenames] - - self._list(exchange, cache, "ps1", flags=["--inbound", "--remove"]) - assert self._list(exchange, cache, flags=["--inbound"]) == dedent( - """ - [ListApp | INFO] Submitted assignments: - [ListApp | INFO] abc101 {} ps2 {} (no feedback available) - """.format(get_username(), timestamps[1]) - ).lstrip() - assert len(os.listdir(os.path.join(exchange, "abc101", "inbound"))) == 1 - - self._list(exchange, cache, "ps2", flags=["--inbound", "--remove"]) - assert self._list(exchange, cache, flags=["--inbound"]) == dedent( - """ - [ListApp | INFO] Submitted assignments: - """ - ).lstrip() - assert len(os.listdir(os.path.join(exchange, "abc101", "inbound"))) == 0 - - def test_list_remove_cached(self, exchange, cache, course_dir): - self._release("ps1", exchange, cache, course_dir) - self._fetch("ps1", exchange, cache) - self._release("ps2", exchange, cache, course_dir) - self._fetch("ps2", exchange, cache) - - self._submit("ps1", exchange, cache) - self._submit("ps2", exchange, cache) - filenames = sorted(os.listdir(os.path.join(cache, "abc101"))) - timestamps = [x.split("+")[2] for x in filenames] - - self._list(exchange, cache, "ps1", flags=["--cached", "--remove"]) - assert self._list(exchange, cache, flags=["--cached"]) == dedent( - """ - [ListApp | INFO] Submitted assignments: - [ListApp | INFO] abc101 {} ps2 {} (no feedback available) - """.format(get_username(), timestamps[1]) - ).lstrip() - assert len(os.listdir(os.path.join(cache, "abc101"))) == 1 - - self._list(exchange, cache, "ps2", flags=["--cached", "--remove"]) - assert self._list(exchange, cache, flags=["--cached"]) == dedent( - """ - [ListApp | INFO] Submitted assignments: - """ - ).lstrip() - assert len(os.listdir(os.path.join(cache, "abc101"))) == 0 - - def test_list_cached_and_inbound(self, exchange, cache): - self._list(exchange, cache, flags=["--inbound", "--cached"], retcode=1) - - def test_list_without_random_string(self, exchange, cache, course_dir): - self._release("ps1", exchange, cache, course_dir) - self._fetch("ps1", exchange, cache) - - self._submit("ps1", exchange, cache, flags=["--ExchangeSubmit.add_random_string=False"]) - - filename, = os.listdir(os.path.join(exchange, "abc101", "inbound")) - timestamp = filename.split("+")[2] - assert self._list(exchange, cache, "ps1", flags=["--inbound"]) == dedent( - """ - [ListApp | INFO] Submitted assignments: - [ListApp | INFO] abc101 {} ps1 {} (no feedback available) - """.format(get_username(), timestamp) - ).lstrip() - - def test_list_feedback_inbound(self, exchange, cache, course_dir): - self._release_full("ps1", exchange, cache, course_dir) - self._fetch("ps1", exchange, cache) - self._submit("ps1", exchange, cache) - self._make_feedback("ps1", exchange, cache, course_dir) - time.sleep(1) - self._submit("ps1", exchange, cache) - - filenames = sorted(os.listdir(os.path.join(exchange, "abc101", "inbound"))) - timestamps = [x.split("+")[2] for x in filenames] - assert self._list(exchange, cache, "ps1", flags=["--inbound"]) == dedent( - """ - [ListApp | INFO] Submitted assignments: - [ListApp | INFO] abc101 {} ps1 {} (feedback ready to be fetched) - [ListApp | INFO] abc101 {} ps1 {} (no feedback available) - """.format(get_username(), timestamps[0], get_username(), timestamps[1]) - ).lstrip() - - self._fetch_feedback("ps1", exchange, cache) - filenames = sorted(os.listdir(os.path.join(exchange, "abc101", "inbound"))) - timestamps = [x.split("+")[2] for x in filenames] - assert self._list(exchange, cache, "ps1", flags=["--inbound"]) == dedent( - """ - [ListApp | INFO] Submitted assignments: - [ListApp | INFO] abc101 {} ps1 {} (feedback already fetched) - [ListApp | INFO] abc101 {} ps1 {} (no feedback available) - """.format(get_username(), timestamps[0], get_username(), timestamps[1]) - ).lstrip() - - exchange_path = os.path.join(exchange, "abc101", "feedback") - feedback_file, = os.listdir(exchange_path) - with open(os.path.join(exchange_path, feedback_file), "a") as fh: - fh.write("blahblahblah") - assert self._list(exchange, cache, "ps1", flags=["--inbound"]) == dedent( - """ - [ListApp | INFO] Submitted assignments: - [ListApp | INFO] abc101 {} ps1 {} (feedback ready to be fetched) - [ListApp | INFO] abc101 {} ps1 {} (no feedback available) - """.format(get_username(), timestamps[0], get_username(), timestamps[1]) - ).lstrip() - - self._make_feedback("ps1", exchange, cache, course_dir) - filenames = sorted(os.listdir(os.path.join(exchange, "abc101", "inbound"))) - timestamps = [x.split("+")[2] for x in filenames] - assert self._list(exchange, cache, "ps1", flags=["--inbound"]) == dedent( - """ - [ListApp | INFO] Submitted assignments: - [ListApp | INFO] abc101 {} ps1 {} (feedback ready to be fetched) - [ListApp | INFO] abc101 {} ps1 {} (feedback ready to be fetched) - """.format(get_username(), timestamps[0], get_username(), timestamps[1]) - ).lstrip() - - self._fetch_feedback("ps1", exchange, cache) - filenames = sorted(os.listdir(os.path.join(exchange, "abc101", "inbound"))) - timestamps = [x.split("+")[2] for x in filenames] - assert self._list(exchange, cache, "ps1", flags=["--inbound"]) == dedent( - """ - [ListApp | INFO] Submitted assignments: - [ListApp | INFO] abc101 {} ps1 {} (feedback already fetched) - [ListApp | INFO] abc101 {} ps1 {} (feedback already fetched) - """.format(get_username(), timestamps[0], get_username(), timestamps[1]) - ).lstrip() - - def test_list_feedback_cached(self, exchange, cache, course_dir): - self._release_full("ps1", exchange, cache, course_dir) - self._fetch("ps1", exchange, cache) - self._submit("ps1", exchange, cache) - self._make_feedback("ps1", exchange, cache, course_dir) - time.sleep(1) - self._submit("ps1", exchange, cache) - - filenames = sorted(os.listdir(os.path.join(exchange, "abc101", "inbound"))) - timestamps = [x.split("+")[2] for x in filenames] - assert self._list(exchange, cache, "ps1", flags=["--cached"]) == dedent( - """ - [ListApp | INFO] Submitted assignments: - [ListApp | INFO] abc101 {} ps1 {} (feedback ready to be fetched) - [ListApp | INFO] abc101 {} ps1 {} (no feedback available) - """.format(get_username(), timestamps[0], get_username(), timestamps[1]) - ).lstrip() - - self._fetch_feedback("ps1", exchange, cache) - filenames = sorted(os.listdir(os.path.join(exchange, "abc101", "inbound"))) - timestamps = [x.split("+")[2] for x in filenames] - assert self._list(exchange, cache, "ps1", flags=["--cached"]) == dedent( - """ - [ListApp | INFO] Submitted assignments: - [ListApp | INFO] abc101 {} ps1 {} (feedback already fetched) - [ListApp | INFO] abc101 {} ps1 {} (no feedback available) - """.format(get_username(), timestamps[0], get_username(), timestamps[1]) - ).lstrip() - - exchange_path = os.path.join(exchange, "abc101", "feedback") - feedback_file, = os.listdir(exchange_path) - with open(os.path.join(exchange_path, feedback_file), "a") as fh: - fh.write("blahblahblah") - assert self._list(exchange, cache, "ps1", flags=["--cached"]) == dedent( - """ - [ListApp | INFO] Submitted assignments: - [ListApp | INFO] abc101 {} ps1 {} (feedback ready to be fetched) - [ListApp | INFO] abc101 {} ps1 {} (no feedback available) - """.format(get_username(), timestamps[0], get_username(), timestamps[1]) - ).lstrip() - - self._make_feedback("ps1", exchange, cache, course_dir) - filenames = sorted(os.listdir(os.path.join(exchange, "abc101", "inbound"))) - timestamps = [x.split("+")[2] for x in filenames] - assert self._list(exchange, cache, "ps1", flags=["--cached"]) == dedent( - """ - [ListApp | INFO] Submitted assignments: - [ListApp | INFO] abc101 {} ps1 {} (feedback ready to be fetched) - [ListApp | INFO] abc101 {} ps1 {} (feedback ready to be fetched) - """.format(get_username(), timestamps[0], get_username(), timestamps[1]) - ).lstrip() - - self._fetch_feedback("ps1", exchange, cache) - filenames = sorted(os.listdir(os.path.join(exchange, "abc101", "inbound"))) - timestamps = [x.split("+")[2] for x in filenames] - assert self._list(exchange, cache, "ps1", flags=["--cached"]) == dedent( - """ - [ListApp | INFO] Submitted assignments: - [ListApp | INFO] abc101 {} ps1 {} (feedback already fetched) - [ListApp | INFO] abc101 {} ps1 {} (feedback already fetched) - """.format(get_username(), timestamps[0], get_username(), timestamps[1]) - ).lstrip() diff --git a/nbgrader/tests/apps/test_nbgrader_quickstart.py b/nbgrader/tests/apps/test_nbgrader_quickstart.py deleted file mode 100644 index 57c67934b..000000000 --- a/nbgrader/tests/apps/test_nbgrader_quickstart.py +++ /dev/null @@ -1,119 +0,0 @@ -import os -import shutil - -from .. import run_nbgrader -from .base import BaseTestApp - - -class TestNbGraderQuickStart(BaseTestApp): - - def test_help(self): - """Does the help display without error?""" - run_nbgrader(["quickstart", "--help-all"]) - - def test_no_course_id(self): - """Is the help displayed when no course id is given?""" - run_nbgrader(["quickstart"], retcode=1) - - def test_quickstart(self, fake_home_dir): - """Is the quickstart example properly generated?""" - - run_nbgrader(["quickstart", "example"]) - - # it should fail if it already exists - run_nbgrader(["quickstart", "example"], retcode=1) - - # it should succeed if --force is given - os.remove(os.path.join("example", "nbgrader_config.py")) - run_nbgrader(["quickstart", "example", "--force"]) - assert os.path.exists(os.path.join("example", "nbgrader_config.py")) - - # nbgrader validate should work - os.chdir("example") - for nb in os.listdir(os.path.join("source", "ps1")): - if not nb.endswith(".ipynb"): - continue - output = run_nbgrader(["validate", os.path.join("source", "ps1", nb)], stdout=True) - assert output.strip() == "Success! Your notebook passes all the tests." - - # nbgrader generate_assignment should work - run_nbgrader(["generate_assignment", "ps1"]) - - def test_quickstart_overwrite_course_folder_if_structure_not_present(self): - """Is the quickstart example properly generated?""" - - run_nbgrader(["quickstart", "example_without_folder_and_config_file"]) - - # it should fail if it already exists - run_nbgrader(["quickstart", "example_without_folder_and_config_file"], retcode=1) - - # should succeed if both source folder and config file are not present. - shutil.rmtree(os.path.join("example_without_folder_and_config_file", "source")) - os.remove(os.path.join("example_without_folder_and_config_file", "nbgrader_config.py")) - - run_nbgrader(["quickstart", "example_without_folder_and_config_file"]) - assert os.path.exists(os.path.join("example_without_folder_and_config_file", "nbgrader_config.py")) - assert os.path.exists(os.path.join("example_without_folder_and_config_file", "source")) - - # nbgrader validate should work - os.chdir("example_without_folder_and_config_file") - for nb in os.listdir(os.path.join("source", "ps1")): - if not nb.endswith(".ipynb"): - continue - output = run_nbgrader(["validate", os.path.join("source", "ps1", nb)], stdout=True) - assert output.strip() == "Success! Your notebook passes all the tests." - - # nbgrader generate_assignment should work - run_nbgrader(["generate_assignment", "ps1"]) - - def test_quickstart_fails_with_source_folder_removed(self): - """Is the quickstart example properly generated if source folder removed?""" - - run_nbgrader(["quickstart", "example_source_folder_fail"]) - - # it should fail if it already exists - run_nbgrader(["quickstart", "example_source_folder_fail"], retcode=1) - - # it should succeed if source folder not present and create it - shutil.rmtree(os.path.join("example_source_folder_fail", "source")) - - # it should fail if it already source folder or config file exists - run_nbgrader(["quickstart", "example_source_folder_fail"], retcode=1) - - def test_quickstart_fails_with_config_file_removed(self): - """Is the quickstart example properly generated if source folder removed?""" - - run_nbgrader(["quickstart", "example_source_folder_fail"]) - - # it should fail if it already exists - run_nbgrader(["quickstart", "example_source_folder_fail"], retcode=1) - - # it should succeed if source folder not present and create it - os.remove(os.path.join("example_source_folder_fail", "nbgrader_config.py")) - - # it should fail if it already source folder or config file exists - run_nbgrader(["quickstart", "example_source_folder_fail"], retcode=1) - - def test_quickstart_f(self): - """Is the quickstart example properly generated?""" - - run_nbgrader(["quickstart", "example"]) - - # it should fail if it already exists - run_nbgrader(["quickstart", "example"], retcode=1) - - # it should succeed if --force is given - os.remove(os.path.join("example", "nbgrader_config.py")) - run_nbgrader(["quickstart", "example", "-f"]) - assert os.path.exists(os.path.join("example", "nbgrader_config.py")) - - # nbgrader validate should work - os.chdir("example") - for nb in os.listdir(os.path.join("source", "ps1")): - if not nb.endswith(".ipynb"): - continue - output = run_nbgrader(["validate", os.path.join("source", "ps1", nb)], stdout=True) - assert output.strip() == "Success! Your notebook passes all the tests." - - # nbgrader generate_assignment should work - run_nbgrader(["generate_assignment", "ps1"]) diff --git a/nbgrader/tests/apps/test_nbgrader_releaseassignment.py b/nbgrader/tests/apps/test_nbgrader_releaseassignment.py deleted file mode 100644 index ce29b87aa..000000000 --- a/nbgrader/tests/apps/test_nbgrader_releaseassignment.py +++ /dev/null @@ -1,106 +0,0 @@ -import os -import shutil -import stat -import pytest -from os.path import join - -from .. import run_nbgrader -from .base import BaseTestApp -from .conftest import notwindows - - -@notwindows -class TestNbGraderRelease(BaseTestApp): - - def _release(self, assignment, exchange, flags=None, retcode=0): - cmd = [ - "release_assignment", assignment, - "--course", "abc101", - "--Exchange.root={}".format(exchange) - ] - - if flags is not None: - cmd.extend(flags) - - run_nbgrader(cmd, retcode=retcode) - - def test_help(self): - """Does the help display without error?""" - run_nbgrader(["release_assignment", "--help-all"]) - - def test_no_course_id(self, exchange): - """Does releasing without a course id thrown an error?""" - cmd = [ - "release_assignment", "ps1", - "--Exchange.root={}".format(exchange) - ] - run_nbgrader(cmd, retcode=1) - - def test_release(self, exchange, course_dir): - self._copy_file(join("files", "test.ipynb"), join(course_dir, "release", "ps1", "p1.ipynb")) - self._release("ps1", exchange) - assert os.path.isfile(join(exchange, "abc101", "outbound", "ps1", "p1.ipynb")) - - def test_release_deprecated(self, exchange, course_dir): - self._copy_file(join("files", "test.ipynb"), join(course_dir, "release", "ps1", "p1.ipynb")) - run_nbgrader([ - "release_assignment", "ps1", - "--course", "abc101", - "--Exchange.root={}".format(exchange) - ]) - assert os.path.isfile(join(exchange, "abc101", "outbound", "ps1", "p1.ipynb")) - - def test_force_release(self, exchange, course_dir): - self._copy_file(join("files", "test.ipynb"), join(course_dir, "release", "ps1", "p1.ipynb")) - self._release("ps1", exchange) - assert os.path.isfile(join(exchange, "abc101", "outbound", "ps1", "p1.ipynb")) - - self._release("ps1", exchange, retcode=1) - - os.remove(join(exchange, join("abc101", "outbound", "ps1", "p1.ipynb"))) - self._release("ps1", exchange, retcode=1) - - self._release("ps1", exchange, flags=["--force"]) - assert os.path.isfile(join(exchange, "abc101", "outbound", "ps1", "p1.ipynb")) - - def test_force_release_f(self, exchange, course_dir): - self._copy_file(join("files", "test.ipynb"), join(course_dir, "release", "ps1", "p1.ipynb")) - self._release("ps1", exchange) - assert os.path.isfile(join(exchange, "abc101", "outbound", "ps1", "p1.ipynb")) - - self._release("ps1", exchange, retcode=1) - - os.remove(join(exchange, join("abc101", "outbound", "ps1", "p1.ipynb"))) - self._release("ps1", exchange, retcode=1) - - self._release("ps1", exchange, flags=["-f"]) - assert os.path.isfile(join(exchange, "abc101", "outbound", "ps1", "p1.ipynb")) - - def test_release_with_assignment_flag(self, exchange, course_dir): - self._copy_file(join("files", "test.ipynb"), join(course_dir, "release", "ps1", "p1.ipynb")) - self._release("--assignment=ps1", exchange) - assert os.path.isfile(join(exchange, "abc101", "outbound", "ps1", "p1.ipynb")) - - def test_no_exchange(self, exchange, course_dir): - shutil.rmtree(exchange) - self._copy_file(join("files", "test.ipynb"), join(course_dir, "release", "ps1", "p1.ipynb")) - self._release("--assignment=ps1", exchange) - assert os.path.isfile(join(exchange, "abc101", "outbound", "ps1", "p1.ipynb")) - - def test_exchange_bad_perms(self, exchange, course_dir): - perms = stat.S_IRUSR|stat.S_IWUSR|stat.S_IXUSR|stat.S_IRGRP - os.chmod(exchange, perms) - self._copy_file(join("files", "test.ipynb"), join(course_dir, "release", "ps1", "p1.ipynb")) - self._release("--assignment=ps1", exchange) - assert os.path.isfile(join(exchange, "abc101", "outbound", "ps1", "p1.ipynb")) - - @notwindows - @pytest.mark.parametrize("groupshared", [False, True]) - def test_permissions(self, exchange, course_dir, groupshared): - if groupshared: - with open("nbgrader_config.py", "a") as fh: - fh.write("""c.CourseDirectory.groupshared = True""") - self._copy_file(join("files", "test.ipynb"), join(course_dir, "release", "ps1", "p1.ipynb")) - self._release("--assignment=ps1", exchange) - assert self._get_permissions(join(exchange, "abc101", "outbound", "ps1")) == ("755" if not groupshared else "2775") - assert self._get_permissions(join(exchange, "abc101", "outbound", "ps1", "p1.ipynb")) == ("644" if not groupshared else "664") diff --git a/nbgrader/tests/apps/test_nbgrader_releasefeedback.py b/nbgrader/tests/apps/test_nbgrader_releasefeedback.py deleted file mode 100644 index 88e72da8b..000000000 --- a/nbgrader/tests/apps/test_nbgrader_releasefeedback.py +++ /dev/null @@ -1,138 +0,0 @@ -import os -import sys -from os.path import join, exists, isfile -import pytest - -from ...utils import notebook_hash, make_unique_key -from .. import run_nbgrader -from .base import BaseTestApp -from .conftest import notwindows - - -class TestNbGraderReleaseFeedback(BaseTestApp): - - def test_help(self): - """Does the help display without error?""" - run_nbgrader(["release_feedback", "--help-all"]) - - def test_second_argument(self): - """Does the help display without error?""" - run_nbgrader(["release_feedback", "ps1", "second_arg"], retcode=1) - - def test_no_argument(self): - """Does the help display without error?""" - run_nbgrader(["release_feedback"], retcode=1) - - @notwindows - def test_single_file(self, db, course_dir, exchange): - """Can feedback be generated for an unchanged assignment?""" - run_nbgrader(["db", "assignment", "add", "ps1", "--db", db, "--duedate", - "2015-02-02 14:58:23.948203 America/Los_Angeles"]) - run_nbgrader(["db", "student", "add", "foo", "--db", db]) - run_nbgrader(["db", "student", "add", "bar", "--db", db]) - self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb")) - run_nbgrader(["assign", "ps1", "--db", db]) - nb_path = join(course_dir, "submitted", "foo", "ps1", "p1.ipynb") - self._copy_file(join("files", "submitted-unchanged.ipynb"), nb_path) - self._copy_file(join("files", "timestamp.txt"), join(course_dir, "submitted", "foo", "ps1", "timestamp.txt")) - - run_nbgrader(["autograde", "ps1", "--db", db]) - run_nbgrader(["generate_feedback", "ps1", "--db", db]) - run_nbgrader(["release_feedback", "ps1", "--Exchange.root={}".format(exchange), '--course', 'abc101']) - unique_key = make_unique_key("abc101", "ps1", "p1", "foo", "2019-05-30 11:44:01.911849 UTC") - nb_hash = notebook_hash(nb_path, unique_key) - assert exists(join(exchange, "abc101", "feedback", "{}.html".format(nb_hash))) - # release feedback should overwrite without error - run_nbgrader(["release_feedback", "ps1", "--Exchange.root={}".format(exchange), '--course', 'abc101']) - - @notwindows - def test_single_student(self, db, course_dir, exchange): - """Can feedback be generated for an unchanged assignment?""" - run_nbgrader(["db", "assignment", "add", "ps1", "--db", db, "--duedate", - "2015-02-02 14:58:23.948203 America/Los_Angeles"]) - run_nbgrader(["db", "student", "add", "foo", "--db", db]) - run_nbgrader(["db", "student", "add", "bar", "--db", db]) - self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb")) - run_nbgrader(["assign", "ps1", "--db", db]) - nb_path = join(course_dir, "submitted", "foo", "ps1", "p1.ipynb") - self._copy_file(join("files", "submitted-unchanged.ipynb"), nb_path) - self._copy_file(join("files", "timestamp.txt"), join(course_dir, "submitted", "foo", "ps1", "timestamp.txt")) - nb_path2 = join(course_dir, "submitted", "bar", "ps1", "p1.ipynb") - self._copy_file(join("files", "submitted-changed.ipynb"), nb_path2) - self._copy_file(join("files", "timestamp.txt"), join(course_dir, "submitted", "bar", "ps1", "timestamp.txt")) - - run_nbgrader(["autograde", "ps1", "--db", db]) - run_nbgrader(["generate_feedback", "ps1", "--db", db]) - run_nbgrader(["release_feedback", "ps1", "--Exchange.root={}".format(exchange), '--course', 'abc101', '--student', 'foo']) - unique_key = make_unique_key("abc101", "ps1", "p1", "foo", "2019-05-30 11:44:01.911849 UTC") - nb_hash = notebook_hash(nb_path, unique_key) - assert exists(join(exchange, "abc101", "feedback", "{}.html".format(nb_hash))) - unique_key2 = make_unique_key("abc101", "ps1", "p1", "bar", "2019-05-30 11:44:01.911849 UTC") - nb_hash2 = notebook_hash(nb_path2, unique_key2) - assert not exists(join(exchange, "abc101", "feedback", "{}.html".format(nb_hash2))) - # release feedback should overwrite without error - run_nbgrader(["release_feedback", "ps1", "--Exchange.root={}".format(exchange), '--course', 'abc101']) - - @notwindows - def test_student_id_exclude(self, db, course_dir, exchange): - """Does --CourseDirectory.student_id_exclude=X exclude students?""" - run_nbgrader(["db", "assignment", "add", "ps1", "--db", db]) - run_nbgrader(["db", "student", "add", "foo", "--db", db]) - run_nbgrader(["db", "student", "add", "bar", "--db", db]) - run_nbgrader(["db", "student", "add", "baz", "--db", db]) - self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb")) - run_nbgrader(["assign", "ps1", "--db", db]) - nb_path = join(course_dir, "submitted", "foo", "ps1", "p1.ipynb") - self._copy_file(join("files", "submitted-unchanged.ipynb"), nb_path) - self._copy_file(join("files", "timestamp.txt"), join(course_dir, "submitted", "foo", "ps1", "timestamp.txt")) - nb_path2 = join(course_dir, "submitted", "bar", "ps1", "p1.ipynb") - self._copy_file(join("files", "submitted-changed.ipynb"), nb_path2) - self._copy_file(join("files", "timestamp.txt"), join(course_dir, "submitted", "bar", "ps1", "timestamp.txt")) - - run_nbgrader(["autograde", "ps1", "--db", db]) - run_nbgrader(["generate_feedback", "ps1", "--db", db]) - run_nbgrader(["release_feedback", "ps1", "--Exchange.root={}".format(exchange), '--course', 'abc101', - "--CourseDirectory.student_id_exclude=bar,baz"]) # baz doesn't exist, test still OK though - unique_key = make_unique_key("abc101", "ps1", "p1", "foo", "2019-05-30 11:44:01.911849 UTC") - nb_hash = notebook_hash(nb_path, unique_key) # foo - assert exists(join(exchange, "abc101", "feedback", "{}.html".format(nb_hash))) - unique_key2 = make_unique_key("abc101", "ps1", "p1", "bar", "2019-05-30 11:44:01.911849 UTC") - nb_hash2 = notebook_hash(nb_path2, unique_key2) # bar - assert not exists(join(exchange, "abc101", "feedback", "{}.html".format(nb_hash2))) - # release feedback should overwrite without error - run_nbgrader(["release_feedback", "ps1", "--Exchange.root={}".format(exchange), '--course', 'abc101']) - - - @notwindows - @pytest.mark.parametrize("groupshared", [False, True]) - def test_permissions(self, db, course_dir, exchange, groupshared): - """Are permissions properly set?""" - run_nbgrader(["db", "assignment", "add", "ps1"]) - run_nbgrader(["db", "student", "add", "foo", "--db", db]) - with open("nbgrader_config.py", "a") as fh: - if groupshared: - fh.write("""c.CourseDirectory.groupshared = True\n""") - self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb")) - run_nbgrader(["assign", "ps1", "--db", db]) - nb_path = join(course_dir, "submitted", "foo", "ps1", "p1.ipynb") - self._copy_file(join("files", "submitted-unchanged.ipynb"), nb_path) - self._copy_file(join("files", "timestamp.txt"), join(course_dir, "submitted", "foo", "ps1", "timestamp.txt")) - unique_key = make_unique_key("abc101", "ps1", "p1", "foo", "2019-05-30 11:44:01.911849 UTC") - nb_hash = notebook_hash(nb_path, unique_key) - - self._empty_notebook(join(course_dir, "submitted", "foo", "ps1", "foo.ipynb")) - run_nbgrader(["autograde", "ps1", "--db", db]) - run_nbgrader(["generate_feedback", "ps1", "--db", db]) - run_nbgrader(["release_feedback", "ps1", "--Exchange.root={}".format(exchange), '--course', 'abc101']) - - if groupshared: - perms = '664' - dirperms = '2771' - else: - perms = '644' - dirperms = '711' - - feedback_dir = join(exchange, "abc101", "feedback") - assert self._get_permissions(feedback_dir) == dirperms - os.system("find %s -ls"%feedback_dir) - assert self._get_permissions(join(feedback_dir, nb_hash+".html")) == perms diff --git a/nbgrader/tests/apps/test_nbgrader_submit.py b/nbgrader/tests/apps/test_nbgrader_submit.py deleted file mode 100644 index c70a291be..000000000 --- a/nbgrader/tests/apps/test_nbgrader_submit.py +++ /dev/null @@ -1,251 +0,0 @@ -import os -import datetime -import time -import stat -import pytest - -from os.path import join, isfile, exists - -from ...utils import parse_utc, get_username -from .. import run_nbgrader -from .base import BaseTestApp -from .conftest import notwindows - - -@notwindows -class TestNbGraderSubmit(BaseTestApp): - - def _release(self, assignment, exchange, cache, course_dir, course="abc101"): - self._copy_file(join("files", "test.ipynb"), join(course_dir, "release", "ps1", "p1.ipynb")) - run_nbgrader([ - "release_assignment", assignment, - "--course", course, - "--Exchange.cache={}".format(cache), - "--Exchange.root={}".format(exchange) - ]) - - def _fetch(self, assignment, exchange, cache, course="abc101", flags=None): - cmd = [ - "fetch_assignment", assignment, - "--course", course, - "--Exchange.cache={}".format(cache), - "--Exchange.root={}".format(exchange) - ] - - if flags is not None: - cmd.extend(flags) - - run_nbgrader(cmd) - - def _release_and_fetch(self, assignment, exchange, cache, course_dir, course="abc101"): - self._release(assignment, exchange, cache, course_dir, course=course) - self._fetch(assignment, exchange, cache, course=course) - - def _submit(self, assignment, exchange, cache, flags=None, retcode=0, course="abc101"): - cmd = [ - "submit", assignment, - "--course", course, - "--Exchange.cache={}".format(cache), - "--Exchange.root={}".format(exchange) - ] - - if flags is not None: - cmd.extend(flags) - - run_nbgrader(cmd, retcode=retcode) - - def test_help(self): - """Does the help display without error?""" - run_nbgrader(["submit", "--help-all"]) - - def test_no_course_id(self, exchange, cache, course_dir): - """Does releasing without a course id thrown an error?""" - self._release_and_fetch("ps1", exchange, cache, course_dir) - cmd = [ - "submit", "ps1", - "--Exchange.cache={}".format(cache), - "--Exchange.root={}".format(exchange) - ] - run_nbgrader(cmd, retcode=1) - - def test_submit(self, exchange, cache, course_dir): - self._release_and_fetch("ps1", exchange, cache, course_dir) - now = datetime.datetime.utcnow() - - time.sleep(1) - self._submit("ps1", exchange, cache) - - filename, = os.listdir(join(exchange, "abc101", "inbound")) - username, assignment, timestamp1 = filename.split("+")[:3] - assert username == get_username() - assert assignment == "ps1" - assert parse_utc(timestamp1) > now - assert isfile(join(exchange, "abc101", "inbound", filename, "p1.ipynb")) - assert isfile(join(exchange, "abc101", "inbound", filename, "timestamp.txt")) - with open(join(exchange, "abc101", "inbound", filename, "timestamp.txt"), "r") as fh: - assert fh.read() == timestamp1 - - filename, = os.listdir(join(cache, "abc101")) - username, assignment, timestamp1 = filename.split("+")[:3] - assert username == get_username() - assert assignment == "ps1" - assert parse_utc(timestamp1) > now - assert isfile(join(cache, "abc101", filename, "p1.ipynb")) - assert isfile(join(cache, "abc101", filename, "timestamp.txt")) - with open(join(cache, "abc101", filename, "timestamp.txt"), "r") as fh: - assert fh.read() == timestamp1 - - time.sleep(1) - self._submit("ps1", exchange, cache) - - assert len(os.listdir(join(exchange, "abc101", "inbound"))) == 2 - filename = sorted(os.listdir(join(exchange, "abc101", "inbound")))[1] - username, assignment, timestamp2 = filename.split("+")[:3] - assert username == get_username() - assert assignment == "ps1" - assert parse_utc(timestamp2) > parse_utc(timestamp1) - assert isfile(join(exchange, "abc101", "inbound", filename, "p1.ipynb")) - assert isfile(join(exchange, "abc101", "inbound", filename, "timestamp.txt")) - with open(join(exchange, "abc101", "inbound", filename, "timestamp.txt"), "r") as fh: - assert fh.read() == timestamp2 - - assert len(os.listdir(join(cache, "abc101"))) == 2 - filename = sorted(os.listdir(join(cache, "abc101")))[1] - username, assignment, timestamp2 = filename.split("+")[:3] - assert username == get_username() - assert assignment == "ps1" - assert parse_utc(timestamp2) > parse_utc(timestamp1) - assert isfile(join(cache, "abc101", filename, "p1.ipynb")) - assert isfile(join(cache, "abc101", filename, "timestamp.txt")) - with open(join(cache, "abc101", filename, "timestamp.txt"), "r") as fh: - assert fh.read() == timestamp2 - - def test_submit_extra(self, exchange, cache, course_dir): - self._release_and_fetch("ps1", exchange, cache, course_dir) - self._copy_file(join("files", "test.ipynb"), join("ps1", "p2.ipynb")) - # Check don't fail on extra notebooks submitted without strict flag - self._submit("ps1", exchange, cache) - - def test_submit_extra_strict(self, exchange, cache, course_dir): - self._release_and_fetch("ps1", exchange, cache, course_dir) - self._copy_file(join("files", "test.ipynb"), join("ps1", "p2.ipynb")) - # Check don't fail on extra notebooks submitted with strict flag - self._submit("ps1", exchange, cache, flags=['--strict']) - - def test_submit_missing(self, exchange, cache, course_dir): - self._release_and_fetch("ps1", exchange, cache, course_dir) - self._move_file(join("ps1", "p1.ipynb"), join("ps1", "p2.ipynb")) - # Check don't fail on missing notebooks submitted without strict flag - self._submit("ps1", exchange, cache) - - def test_submit_missing_strict(self, exchange, cache, course_dir): - self._release_and_fetch("ps1", exchange, cache, course_dir) - self._move_file(join("ps1", "p1.ipynb"), join("ps1", "p2.ipynb")) - # Check fail on missting notebooks submitted with strict flag - self._submit("ps1", exchange, cache, flags=['--strict'], retcode=1) - - def test_submit_readonly(self, exchange, cache, course_dir): - self._release_and_fetch("ps1", exchange, cache, course_dir) - os.chmod(join("ps1", "p1.ipynb"), stat.S_IRUSR) - self._submit("ps1", exchange, cache) - - filename, = os.listdir(join(exchange, "abc101", "inbound")) - perms = os.stat(join(exchange, "abc101", "inbound", filename, "p1.ipynb")).st_mode - perms = str(oct(perms & (stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)))[-3:] - assert int(perms[0]) >= 4 - assert int(perms[1]) == 4 - assert int(perms[2]) == 4 - - def test_submit_assignment_flag(self, exchange, cache, course_dir): - self._release_and_fetch("ps1", exchange, cache, course_dir) - self._submit("--assignment=ps1", exchange, cache) - - def test_submit_with_student_id(self, exchange, cache, course_dir): - self._release_and_fetch("ps1", exchange, cache, course_dir) - self._submit("ps1", exchange, cache, flags=["--student=foobar_student", ]) - filename, = os.listdir(join(cache, "abc101")) - username, assignment, timestamp1 = filename.split("+")[:3] - assert username == "foobar_student" - assert assignment == "ps1" - # '*' and '+' are forbidden - self._submit("ps1", exchange, cache, flags=["--student=foobar+student", ], retcode=1) - self._submit("ps1", exchange, cache, flags=["--student=foobar*student", ], retcode=1) - - def test_submit_multiple_courses(self, exchange, cache, course_dir): - self._release("ps1", exchange, cache, course_dir, course="abc101") - self._release("ps1", exchange, cache, course_dir, course="abc102") - self._fetch( - "ps1", exchange, cache, course="abc101", - flags=["--Exchange.path_includes_course=True"]) - self._fetch( - "ps1", exchange, cache, course="abc102", - flags=["--Exchange.path_includes_course=True"]) - - self._submit( - "ps1", exchange, cache, course="abc101", - flags=["--Exchange.path_includes_course=True"]) - - filename, = os.listdir(join(exchange, "abc101", "inbound")) - username, assignment, _ = filename.split("+")[:3] - assert username == get_username() - assert assignment == "ps1" - assert isfile(join(exchange, "abc101", "inbound", filename, "p1.ipynb")) - assert isfile(join(exchange, "abc101", "inbound", filename, "timestamp.txt")) - - filename, = os.listdir(join(cache, "abc101")) - username, assignment, _ = filename.split("+")[:3] - assert username == get_username() - assert assignment == "ps1" - assert isfile(join(cache, "abc101", filename, "p1.ipynb")) - assert isfile(join(cache, "abc101", filename, "timestamp.txt")) - - self._submit( - "ps1", exchange, cache, course="abc102", - flags=["--Exchange.path_includes_course=True"]) - - filename, = os.listdir(join(exchange, "abc102", "inbound")) - username, assignment, _ = filename.split("+")[:3] - assert username == get_username() - assert assignment == "ps1" - assert isfile(join(exchange, "abc102", "inbound", filename, "p1.ipynb")) - assert isfile(join(exchange, "abc102", "inbound", filename, "timestamp.txt")) - - filename, = os.listdir(join(cache, "abc102")) - username, assignment, _ = filename.split("+")[:3] - assert username == get_username() - assert assignment == "ps1" - assert isfile(join(cache, "abc102", filename, "p1.ipynb")) - assert isfile(join(cache, "abc102", filename, "timestamp.txt")) - - def test_submit_exclude(self, exchange, cache, course_dir): - self._release_and_fetch("ps1", exchange, cache, course_dir) - self._make_file(join("ps1", "foo.pyc")) - self._submit("ps1", exchange, cache) - filename, = os.listdir(join(exchange, "abc101", "inbound")) - assert not exists(join(exchange, "abc101", "inbound", filename, "foo.pyc")) - - def test_submit_include(self, exchange, cache, course_dir): - self._release_and_fetch("ps1", exchange, cache, course_dir) - self._make_file(join("ps1", "foo.txt")) - self._submit("ps1", exchange, cache, - flags=['--CourseDirectory.include=["*.ipynb"]']) - filename, = os.listdir(join(exchange, "abc101", "inbound")) - assert not exists(join(exchange, "abc101", "inbound", filename, "foo.txt")) - - def test_submit_max_file_size(self, exchange, cache, course_dir): - self._release_and_fetch("ps1", exchange, cache, course_dir) - self._make_file(join("ps1", "small_file"), contents="x" * 2000) - self._make_file(join("ps1", "large_file"), contents="x" * 2001) - self._submit("ps1", exchange, cache, - flags=['--CourseDirectory.max_file_size=2']) - filename, = os.listdir(join(exchange, "abc101", "inbound")) - assert exists(join(exchange, "abc101", "inbound", filename, "small_file")) - assert not exists(join(exchange, "abc101", "inbound", filename, "large_file")) - - def test_submit_max_dir_size(self, exchange, cache, course_dir): - self._release_and_fetch("ps1", exchange, cache, course_dir) - self._make_file(join("ps1", "small_file"), contents="x" * 2000) - self._make_file(join("ps1", "large_file"), contents="x" * 2001) - with pytest.raises(RuntimeError): - self._submit("ps1", exchange, cache, - flags=['--CourseDirectory.max_dir_size=3']) \ No newline at end of file diff --git a/nbgrader/tests/apps/test_nbgrader_update.py b/nbgrader/tests/apps/test_nbgrader_update.py deleted file mode 100644 index beb00100e..000000000 --- a/nbgrader/tests/apps/test_nbgrader_update.py +++ /dev/null @@ -1,114 +0,0 @@ -from os.path import join - -from .. import run_nbgrader -from .base import BaseTestApp - - -class TestNbGraderUpdate(BaseTestApp): - - def test_help(self): - """Does the help display without error?""" - run_nbgrader(["update", "--help-all"]) - - def test_no_args(self): - """Is there an error if no arguments are given?""" - run_nbgrader(["update"], retcode=1) - - def test_missing_file(self): - """Is there an error if the file doesn't exist?""" - run_nbgrader(["update", "foo"], retcode=1) - - def test_not_a_notebook(self): - """Are non-notebooks ignored?""" - with open("foo", "w") as fh: - fh.write("blah") - run_nbgrader(["update", "foo"]) - - def test_single_notebook_v0(self): - """Does it work with just a single notebook?""" - self._copy_file(join("files", "test-v0.ipynb"), "p1.ipynb") - run_nbgrader(["update", "p1.ipynb"]) - - def test_single_notebook_v1(self): - """Does it work with just a single notebook?""" - self._copy_file(join("files", "test-v1.ipynb"), "p1.ipynb") - run_nbgrader(["update", "p1.ipynb"]) - - def test_single_notebook_v2(self): - """Does it work with just a single notebook?""" - self._copy_file(join("files", "test-v2.ipynb"), "p2.ipynb") - run_nbgrader(["update", "p2.ipynb"]) - - def test_validate(self): - """Does turning validation on/off work correctly?""" - - # updating shouldn't work if we're validating, too - self._copy_file(join("files", "test-v0-invalid.ipynb"), "p1.ipynb") - run_nbgrader(["update", "p1.ipynb"], retcode=1) - - # updating should work, but then validation should fail - self._copy_file(join("files", "test-v0-invalid.ipynb"), "p1.ipynb") - run_nbgrader(["update", "p1.ipynb", "--UpdateApp.validate=False"]) - run_nbgrader(["validate", "p1.ipynb"], retcode=1) - - def test_validate_too_new(self): - """Does turning validation on/off work correctly when the schema is too new?""" - - # updating shouldn't work if we're validating, too - self._copy_file(join("files", "too-new.ipynb"), "p1.ipynb") - run_nbgrader(["update", "p1.ipynb"], retcode=1) - - # updating should work, but then validation should fail - self._copy_file(join("files", "too-new.ipynb"), "p1.ipynb") - run_nbgrader(["update", "p1.ipynb", "--UpdateApp.validate=False"]) - run_nbgrader(["validate", "p1.ipynb"], retcode=1) - - def test_update_assign(self, db, course_dir): - run_nbgrader(["db", "assignment", "add", "ps1", "--db", db]) - run_nbgrader(["db", "student", "add", "foo", "--db", db]) - run_nbgrader(["db", "student", "add", "bar", "--db", db]) - - self._copy_file(join("files", "test-v0.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb")) - run_nbgrader(["generate_assignment", "ps1", "--db", db], retcode=1) - - # now update the metadata - run_nbgrader(["update", course_dir]) - - # now assign should suceed - run_nbgrader(["generate_assignment", "ps1", "--db", db]) - - def test_update_autograde(self, db, course_dir): - run_nbgrader(["db", "assignment", "add", "ps1", "--db", db]) - run_nbgrader(["db", "student", "add", "foo", "--db", db]) - run_nbgrader(["db", "student", "add", "bar", "--db", db]) - - self._copy_file(join("files", "test.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb")) - run_nbgrader(["generate_assignment", "ps1", "--db", db]) - - # autograde should fail on old metadata, too - self._copy_file(join("files", "test-v0.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p1.ipynb")) - run_nbgrader(["autograde", "ps1", "--db", db], retcode=1) - - # now update the metadata - run_nbgrader(["update", course_dir]) - - # now autograde should suceed - run_nbgrader(["autograde", "ps1", "--db", db]) - - def test_update_autograde_old_assign(self, db, course_dir): - run_nbgrader(["db", "assignment", "add", "ps1", "--db", db]) - run_nbgrader(["db", "student", "add", "foo", "--db", db]) - run_nbgrader(["db", "student", "add", "bar", "--db", db]) - - self._copy_file(join("files", "test-v0.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb")) - run_nbgrader(["generate_assignment", "ps1", "--db", db, "--CheckCellMetadata.enabled=False"]) - - # autograde should fail on old metadata, too - self._copy_file(join(course_dir, "release", "ps1", "p1.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p1.ipynb")) - run_nbgrader(["autograde", "ps1", "--db", db], retcode=1) - - # now update the metadata - run_nbgrader(["update", join(course_dir, "submitted")]) - - # now autograde should suceed - run_nbgrader(["autograde", "ps1", "--db", db]) diff --git a/nbgrader/tests/apps/test_nbgrader_validate.py b/nbgrader/tests/apps/test_nbgrader_validate.py deleted file mode 100644 index e5b10d294..000000000 --- a/nbgrader/tests/apps/test_nbgrader_validate.py +++ /dev/null @@ -1,189 +0,0 @@ -from os.path import join -from textwrap import dedent - -from .. import run_nbgrader -from .base import BaseTestApp - - -class TestNbGraderValidate(BaseTestApp): - - def test_help(self): - """Does the help display without error?""" - run_nbgrader(["validate", "--help-all"]) - - def test_validate_unchanged(self): - """Does the validation fail on an unchanged notebook?""" - self._copy_file(join("files", "submitted-unchanged.ipynb"), "submitted-unchanged.ipynb") - output = run_nbgrader(["validate", "submitted-unchanged.ipynb"], stdout=True) - assert ( - output.splitlines()[0] - == "VALIDATION FAILED ON 3 CELL(S)! If you submit your assignment as it is, you WILL NOT" - ) - - def test_validate_changed(self): - """Does the validation pass on an changed notebook?""" - self._copy_file(join("files", "submitted-changed.ipynb"), "submitted-changed.ipynb") - output = run_nbgrader(["validate", "submitted-changed.ipynb"], stdout=True) - assert output.strip() == "Success! Your notebook passes all the tests." - - def test_validate_subdir(self): - """Does the validation fail on an unchanged notebook?""" - self._copy_file(join("files", "open_relative_file.ipynb"), "my_subdir/open_relative_file.ipynb") - self._copy_file(join("files", "data.txt"), "my_subdir/data.txt") - output = run_nbgrader(["validate", "my_subdir/open_relative_file.ipynb"], stdout=True) - assert output.strip() == "Success! Your notebook passes all the tests." - - def test_validate_zero_points(self): - """Does validation correctly fail when cell has zero points?""" - self._copy_file(join("files", "validation-zero-points.ipynb"), "validation-zero-points.ipynb") - output = run_nbgrader(["validate", "validation-zero-points.ipynb"], stdout=True) - assert ( - output.splitlines()[0] - == "VALIDATION FAILED ON 1 CELL(S)! If you submit your assignment as it is, you WILL NOT" - ) - - def test_invert_validate_unchanged(self): - """Does the inverted validation pass on an unchanged notebook?""" - self._copy_file(join("files", "submitted-unchanged.ipynb"), "submitted-unchanged.ipynb") - output = run_nbgrader(["validate", "submitted-unchanged.ipynb", "--invert"], stdout=True) - assert output.splitlines()[0] == "NOTEBOOK PASSED ON 1 CELL(S)!" - - def test_invert_validate_changed(self): - """Does the inverted validation fail on a changed notebook?""" - self._copy_file(join("files", "submitted-changed.ipynb"), "submitted-changed.ipynb") - output = run_nbgrader(["validate", "submitted-changed.ipynb", "--invert"], stdout=True) - assert output.splitlines()[0] == "NOTEBOOK PASSED ON 2 CELL(S)!" - - def test_grade_cell_changed(self): - """Does the validate fail if a grade cell has changed?""" - self._copy_file(join("files", "submitted-grade-cell-changed.ipynb"), "submitted-grade-cell-changed.ipynb") - output = run_nbgrader(["validate", "submitted-grade-cell-changed.ipynb"], stdout=True) - assert ( - output.splitlines()[0] - == "THE CONTENTS OF 1 TEST CELL(S) HAVE CHANGED! This might mean that even though the tests" - ) - - def test_grade_cell_changed_ignore_checksums(self): - """Does the validate pass if a grade cell has changed but we're ignoring checksums?""" - self._copy_file(join("files", "submitted-grade-cell-changed.ipynb"), "submitted-grade-cell-changed.ipynb") - output = run_nbgrader([ - "validate", "submitted-grade-cell-changed.ipynb", - "--Validator.ignore_checksums=True" - ], stdout=True) - assert output.splitlines()[0] == "Success! Your notebook passes all the tests." - - def test_invert_grade_cell_changed(self): - """Does the validate fail if a grade cell has changed, even with --invert?""" - self._copy_file(join("files", "submitted-grade-cell-changed.ipynb"), "submitted-grade-cell-changed.ipynb") - output = run_nbgrader(["validate", "submitted-grade-cell-changed.ipynb", "--invert"], stdout=True) - assert ( - output.splitlines()[0] - == "THE CONTENTS OF 1 TEST CELL(S) HAVE CHANGED! This might mean that even though the tests" - ) - - def test_invert_grade_cell_changed_ignore_checksums(self): - """Does the validate fail if a grade cell has changed with --invert and ignoring checksums?""" - self._copy_file(join("files", "submitted-grade-cell-changed.ipynb"), "submitted-grade-cell-changed.ipynb") - output = run_nbgrader([ - "validate", "submitted-grade-cell-changed.ipynb", - "--invert", - "--Validator.ignore_checksums=True" - ], stdout=True) - assert output.splitlines()[0] == "NOTEBOOK PASSED ON 2 CELL(S)!" - - def test_validate_unchanged_ignore_checksums(self): - """Does the validation fail on an unchanged notebook with ignoring checksums?""" - self._copy_file(join("files", "submitted-unchanged.ipynb"), "submitted-unchanged.ipynb") - output = run_nbgrader([ - "validate", "submitted-unchanged.ipynb", - "--Validator.ignore_checksums=True" - ], stdout=True) - assert ( - output.splitlines()[0] - == "VALIDATION FAILED ON 1 CELL(S)! If you submit your assignment as it is, you WILL NOT" - ) - - def test_locked_cell_changed(self): - """Does the validate fail if a locked cell has changed?""" - self._copy_file(join("files", "submitted-locked-cell-changed.ipynb"), "submitted-locked-cell-changed.ipynb") - output = run_nbgrader(["validate", "submitted-locked-cell-changed.ipynb"], stdout=True) - assert ( - output.splitlines()[0] - == "THE CONTENTS OF 2 TEST CELL(S) HAVE CHANGED! This might mean that even though the tests" - ) - - def test_locked_cell_changed_ignore_checksums(self): - """Does the validate pass if a locked cell has changed but we're ignoring checksums?""" - self._copy_file(join("files", "submitted-locked-cell-changed.ipynb"), "submitted-locked-cell-changed.ipynb") - output = run_nbgrader([ - "validate", "submitted-locked-cell-changed.ipynb", - "--Validator.ignore_checksums=True" - ], stdout=True) - assert ( - output.splitlines()[0] - == "VALIDATION FAILED ON 1 CELL(S)! If you submit your assignment as it is, you WILL NOT" - ) - - def test_invert_locked_cell_changed(self): - """Does the validate fail if a locked cell has changed, even with --invert?""" - self._copy_file(join("files", "submitted-locked-cell-changed.ipynb"), "submitted-locked-cell-changed.ipynb") - output = run_nbgrader(["validate", "submitted-locked-cell-changed.ipynb", "--invert"], stdout=True) - assert ( - output.splitlines()[0] - == "THE CONTENTS OF 2 TEST CELL(S) HAVE CHANGED! This might mean that even though the tests" - ) - - def test_invert_locked_cell_changed_ignore_checksums(self): - """Does the validate fail if a locked cell has changed with --invert and ignoring checksums?""" - self._copy_file(join("files", "submitted-locked-cell-changed.ipynb"), "submitted-locked-cell-changed.ipynb") - output = run_nbgrader([ - "validate", "submitted-locked-cell-changed.ipynb", - "--invert", - "--Validator.ignore_checksums=True" - ], stdout=True) - assert output.splitlines()[0] == "NOTEBOOK PASSED ON 1 CELL(S)!" - - def test_validate_glob(self): - """Does the validation work when we glob filenames?""" - self._copy_file(join("files", "submitted-unchanged.ipynb"), "nb1.ipynb") - self._copy_file(join("files", "submitted-changed.ipynb"), "nb2.ipynb") - self._copy_file(join("files", "submitted-changed.ipynb"), "nb3.ipynb") - run_nbgrader(["validate", "*.ipynb"]) - run_nbgrader(["validate", "nb1.ipynb", "nb2.ipynb"]) - run_nbgrader(["validate", "nb1.ipynb", "nb2.ipynb", "nb3.ipynb"]) - run_nbgrader(["validate"], retcode=1) - - def test_validate_with_validating_envvar(self, db, course_dir): - self._copy_file(join("files", "validating-environment-variable.ipynb"), "nb1.ipynb") - output = run_nbgrader(["validate", "nb1.ipynb"], stdout=True) - assert ( - output.splitlines()[0] - == "VALIDATION FAILED ON 1 CELL(S)! If you submit your assignment as it is, you WILL NOT" - ) - - def test_validate_timeout(self, db, course_dir): - """Does validate accept timeout configuration correctly?""" - self._copy_file(join("files", "timeout.ipynb"), "nb1.ipynb") - output = run_nbgrader(["validate", "nb1.ipynb"], stdout=True) - assert output.strip() == "Success! Your notebook passes all the tests." - - # timeout=1 leads to an asyncio error on Windows - output = run_nbgrader(["validate", "--Execute.timeout=2", "nb1.ipynb"], stdout=True) - assert output.splitlines()[-2].strip() == "CellTimeoutError: No reply from kernel before timeout" - - def test_validate_timeout_config(self, db, course_dir): - """Is the timeout error message configurable""" - self._copy_file(join("files", "timeout.ipynb"), "nb1.ipynb") - # supplying a list as dict value (for traceback) on cli was annoying - # writing this into a config file is easier - self._make_file("nbgrader_config.py", - dedent(""" - c = get_config() - c.Execute.error_on_timeout = { - "ename": "CustomError", - "evalue": "", - "traceback": ["Custom"], - } - """)) - output = run_nbgrader(["validate", "--Execute.timeout=2", "nb1.ipynb"], stdout=True) - assert output.splitlines()[-2].strip() == "Custom" diff --git a/nbgrader/tests/apps/test_nbgrader_zip_collect.py b/nbgrader/tests/apps/test_nbgrader_zip_collect.py deleted file mode 100644 index 2db3156bc..000000000 --- a/nbgrader/tests/apps/test_nbgrader_zip_collect.py +++ /dev/null @@ -1,691 +0,0 @@ -# -*- coding: utf-8 -*- - -import io -import os -import pytest -import zipfile - -from textwrap import dedent -from os.path import join - -from .base import BaseTestApp -from .. import run_nbgrader -from ...utils import rmtree - - -@pytest.fixture -def archive_dir(request, course_dir): - path = os.path.join(course_dir, "downloaded", "ps1", "archive") - os.makedirs(path) - - def fin(): - rmtree(path) - request.addfinalizer(fin) - - return path - - -def _count_zip_files(path): - with zipfile.ZipFile(path, 'r') as zip_file: - return len(zip_file.namelist()) - - -class TestNbGraderZipCollect(BaseTestApp): - - def _make_notebook(self, dest, *args): - notebook = '{}_{}_attempt_{}_{}.ipynb'.format(*args) - self._empty_notebook(join(dest, notebook)) - - def test_help(self): - """Does the help display without error?""" - run_nbgrader(["zip_collect", "--help-all"]) - - def test_args(self): - # Should fail with no assignment id - run_nbgrader(["zip_collect"], retcode=1) - - def test_no_archive_dir(self, course_dir): - # Should not fail with no archive_directory - run_nbgrader(["zip_collect", "ps1"]) - - def test_empty_folders(self, course_dir, archive_dir): - os.makedirs(join(archive_dir, "..", "extracted")) - run_nbgrader(["zip_collect", "ps1"]) - assert not os.path.isdir(join(course_dir, "submitted")) - - def test_extract_single_notebook(self, course_dir, archive_dir): - extracted_dir = join(archive_dir, "..", "extracted") - self._make_notebook(archive_dir, - 'ps1', 'hacker', '2016-01-30-15-30-10', 'problem1') - - run_nbgrader(["zip_collect", "ps1"]) - assert os.path.isdir(extracted_dir) - assert len(os.listdir(extracted_dir)) == 1 - - # Run again should fail - run_nbgrader(["zip_collect", "ps1"], retcode=1) - assert os.path.isdir(extracted_dir) - assert len(os.listdir(extracted_dir)) == 1 - - # Run again with --force flag should pass - run_nbgrader(["zip_collect", "--force", "ps1"]) - assert os.path.isdir(extracted_dir) - assert len(os.listdir(extracted_dir)) == 1 - - def test_extract_single_notebook_f(self, course_dir, archive_dir): - extracted_dir = join(archive_dir, "..", "extracted") - self._make_notebook(archive_dir, - 'ps1', 'hacker', '2016-01-30-15-30-10', 'problem1') - - run_nbgrader(["zip_collect", "ps1"]) - assert os.path.isdir(extracted_dir) - assert len(os.listdir(extracted_dir)) == 1 - - # Run again should fail - run_nbgrader(["zip_collect", "ps1"], retcode=1) - assert os.path.isdir(extracted_dir) - assert len(os.listdir(extracted_dir)) == 1 - - # Run again with --force flag should pass - run_nbgrader(["zip_collect", "-f", "ps1"]) - assert os.path.isdir(extracted_dir) - assert len(os.listdir(extracted_dir)) == 1 - - def test_extract_sub_dir_single_notebook(self, course_dir, archive_dir): - extracted_dir = join(archive_dir, "..", "extracted") - self._make_notebook(join(archive_dir, 'hacker'), - 'ps1', 'hacker', '2016-01-30-15-30-10', 'problem1') - - run_nbgrader(["zip_collect", "ps1"]) - assert os.path.isdir(extracted_dir) - assert os.path.isdir(join(extracted_dir, "hacker")) - assert len(os.listdir(join(extracted_dir, "hacker"))) == 1 - - def test_extract_archive(self, course_dir, archive_dir): - extracted_dir = join(archive_dir, "..", "extracted", "notebooks") - archive = join(archive_dir, "notebooks.zip") - self._copy_file(join("files", "notebooks.zip"), archive) - - run_nbgrader(["zip_collect", "ps1"]) - assert os.path.isdir(extracted_dir) - assert len(os.listdir(extracted_dir)) == _count_zip_files(archive) - - def test_extract_archive_copies(self, course_dir, archive_dir): - extracted_dir = join(archive_dir, "..", "extracted") - archive1 = join(archive_dir, "notebooks.zip") - archive2 = join(archive_dir, "notebooks_copy.zip") - - self._copy_file(join("files", "notebooks.zip"), archive1) - self._copy_file(join("files", "notebooks.zip"), archive2) - - cnt = 0 - run_nbgrader(["zip_collect", "ps1"]) - nfiles = _count_zip_files(archive1) + _count_zip_files(archive2) - assert os.path.isdir(extracted_dir) - for _, _, files in os.walk(extracted_dir): - cnt += len(files) - assert cnt == nfiles - - def test_collect_no_regexp(self, course_dir, archive_dir): - extracted_dir = join(archive_dir, "..", "extracted") - submitted_dir = join(course_dir, "submitted") - self._make_notebook(archive_dir, - 'ps1', 'hacker', '2016-01-30-15-30-10', 'problem1') - - run_nbgrader(["zip_collect", "--force", "ps1"]) - assert os.path.isdir(extracted_dir) - assert len(os.listdir(extracted_dir)) == 1 - assert not os.path.isdir(submitted_dir) - - def test_collect_bad_regexp(self, course_dir, archive_dir): - extracted_dir = join(archive_dir, "..", "extracted") - submitted_dir = join(course_dir, "submitted") - self._make_notebook(archive_dir, - 'ps1', 'hacker', '2016-01-30-15-30-10', 'problem1') - - with open("nbgrader_config.py", "a") as fh: - fh.write(dedent( - """ - c.FileNameCollectorPlugin.named_regexp = ( - r"Peter piper picked ..." - ) - """ - )) - - run_nbgrader(["zip_collect", "--force", "ps1"]) - assert os.path.isdir(extracted_dir) - assert len(os.listdir(extracted_dir)) == 1 - assert not os.path.isdir(submitted_dir) - - def test_collect_regexp_missing_student_id(self, course_dir, archive_dir): - extracted_dir = join(archive_dir, "..", "extracted") - submitted_dir = join(course_dir, "submitted") - self._make_notebook(archive_dir, - 'ps1', 'hacker', '2016-01-30-15-30-10', 'problem1') - - with open("nbgrader_config.py", "a") as fh: - fh.write(dedent( - """ - c.FileNameCollectorPlugin.named_regexp = ( - r".+_(?P\w+)_attempt_(?P[0-9\-]+)_(?P\w+)" - ) - """ - )) - - run_nbgrader(["zip_collect", "ps1"], retcode=1) - assert os.path.isdir(extracted_dir) - assert len(os.listdir(extracted_dir)) == 1 - assert not os.path.isdir(submitted_dir) - - def test_collect_regexp_bad_student_id_type(self, course_dir, archive_dir): - extracted_dir = join(archive_dir, "..", "extracted") - submitted_dir = join(course_dir, "submitted") - self._make_notebook(archive_dir, - 'ps1', 'hacker', '2016-01-30-15-30-10', 'problem1') - - with open('plugin_one.py', 'w') as fh: - fh.write(dedent( - """ - from nbgrader.plugins import FileNameCollectorPlugin - - class CustomPlugin(FileNameCollectorPlugin): - def collect(self, submitted_file): - info = super(CustomPlugin, self).collect(submitted_file) - if info is not None: - info['student_id'] = 111 - return info - """ - )) - - with open("nbgrader_config.py", "a") as fh: - fh.write(dedent( - """ - c.ZipCollectApp.collector_plugin = 'plugin_one.CustomPlugin' - c.FileNameCollectorPlugin.named_regexp = ( - r".+_(?P\w+)_attempt_(?P[0-9\-]+)_(?P\w+)" - ) - """ - )) - - run_nbgrader(["zip_collect", "ps1"], retcode=1) - assert os.path.isdir(extracted_dir) - assert len(os.listdir(extracted_dir)) == 1 - assert not os.path.isdir(submitted_dir) - - def test_collect_single_notebook(self, course_dir, archive_dir): - extracted_dir = join(archive_dir, "..", "extracted") - submitted_dir = join(course_dir, "submitted") - self._make_notebook(archive_dir, - 'ps1', 'hacker', '2016-01-30-15-30-10', 'problem1') - - with open("nbgrader_config.py", "a") as fh: - fh.write(dedent( - """ - c.FileNameCollectorPlugin.named_regexp = ( - r".+_(?P\w+)_attempt_(?P[0-9\-]+)_(?P\w+)" - ) - """ - )) - - run_nbgrader(["zip_collect", "ps1"]) - assert os.path.isdir(extracted_dir) - assert len(os.listdir(extracted_dir)) == 1 - - assert os.path.isdir(submitted_dir) - assert os.path.isfile(join(submitted_dir, "hacker", "ps1", 'problem1.ipynb')) - assert os.path.isfile(join(submitted_dir, "hacker", "ps1", 'timestamp.txt')) - assert len(os.listdir(join(submitted_dir, "hacker", "ps1"))) == 2 - - def test_collect_single_notebook_attempts(self, course_dir, archive_dir): - extracted_dir = join(archive_dir, "..", "extracted") - submitted_dir = join(course_dir, "submitted") - - self._make_notebook(archive_dir, - 'ps1', 'hacker', '2016-01-30-15-30-10', 'problem1') - self._make_notebook(archive_dir, - 'ps1', 'hacker', '2016-01-30-15-40-10', 'problem1') - self._make_notebook(archive_dir, - 'ps1', 'hacker', '2016-01-30-15-50-10', 'problem1') - - with open('plugin_two.py', 'w') as fh: - fh.write(dedent( - """ - from nbgrader.plugins import FileNameCollectorPlugin - - class CustomPlugin(FileNameCollectorPlugin): - def collect(self, submitted_file): - info = super(CustomPlugin, self).collect(submitted_file) - if info is not None: - info['timestamp'] = '{}-{}-{} {}:{}:{}'.format( - *tuple(info['timestamp'].split('-')) - ) - return info - """ - )) - - with open("nbgrader_config.py", "a") as fh: - fh.write(dedent( - """ - c.ZipCollectApp.collector_plugin = 'plugin_two.CustomPlugin' - c.FileNameCollectorPlugin.named_regexp = ( - r".+_(?P\w+)_attempt_(?P[0-9\-]+)_(?P\w+)" - ) - """ - )) - - run_nbgrader(["zip_collect", "ps1"]) - assert os.path.isdir(extracted_dir) - assert len(os.listdir(extracted_dir)) == 3 - - assert os.path.isdir(submitted_dir) - assert os.path.isfile(join(submitted_dir, "hacker", "ps1", 'problem1.ipynb')) - assert os.path.isfile(join(submitted_dir, "hacker", "ps1", 'timestamp.txt')) - assert len(os.listdir(join(submitted_dir, "hacker", "ps1"))) == 2 - - with open(join(submitted_dir, "hacker", "ps1", 'timestamp.txt')) as ts: - timestamp = ts.read() - assert timestamp == '2016-01-30 15:50:10' - - def test_collect_multiple_notebooks(self, course_dir, archive_dir): - extracted_dir = join(archive_dir, "..", "extracted") - submitted_dir = join(course_dir, "submitted") - self._make_notebook(archive_dir, - 'ps1', 'hacker', '2016-01-30-15-30-10', 'problem1') - self._make_notebook(archive_dir, - 'ps1', 'hacker', '2016-01-30-15-30-10', 'problem2') - self._make_notebook(archive_dir, - 'ps1', 'hacker', '2016-02-10-15-30-10', 'problem1') - self._make_notebook(archive_dir, - 'ps1', 'hacker', '2016-02-10-15-30-10', 'problem2') - - with open("nbgrader_config.py", "a") as fh: - fh.write(dedent( - """ - c.FileNameCollectorPlugin.named_regexp = ( - r".+_(?P\w+)_attempt_(?P[0-9\-]+)_(?P\w+)" - ) - """ - )) - - output = run_nbgrader(["zip_collect", "ps1"]) - assert os.path.isdir(extracted_dir) - assert len(os.listdir(extracted_dir)) == 4 - - assert os.path.isdir(submitted_dir) - assert os.path.isfile(join(submitted_dir, "hacker", "ps1", 'problem1.ipynb')) - assert os.path.isfile(join(submitted_dir, "hacker", "ps1", 'problem2.ipynb')) - assert os.path.isfile(join(submitted_dir, "hacker", "ps1", 'timestamp.txt')) - assert len(os.listdir(join(submitted_dir, "hacker", "ps1"))) == 3 - - # Issue #724 - check multiple attempts are collected properly - assert "Skipped submission file" not in output - msg = "Replacing previously collected submission file" - assert sum([msg in line for line in output.splitlines()]) == 2 - - def test_collect_sub_dir_single_notebook(self, course_dir, archive_dir): - extracted_dir = join(archive_dir, "..", "extracted") - submitted_dir = join(course_dir, "submitted") - - self._make_notebook(archive_dir, - 'ps1', 'hacker', '2016-01-30-15-30-10', 'problem1') - self._make_notebook(join(archive_dir, 'bitdiddle'), - 'ps1', 'bitdiddle', '2016-01-30-15-30-10', 'problem1') - - with open("nbgrader_config.py", "a") as fh: - fh.write(dedent( - """ - c.FileNameCollectorPlugin.named_regexp = ( - r".+_(?P\w+)_attempt_(?P[0-9\-]+)_(?P\w+)" - ) - """ - )) - - run_nbgrader(["zip_collect", "ps1"]) - assert os.path.isdir(extracted_dir) - assert os.path.isdir(submitted_dir) - assert len(os.listdir(submitted_dir)) == 2 - - assert os.path.isfile(join(submitted_dir, "hacker", "ps1", 'problem1.ipynb')) - assert os.path.isfile(join(submitted_dir, "hacker", "ps1", 'timestamp.txt')) - assert len(os.listdir(join(submitted_dir, "hacker", "ps1"))) == 2 - - assert os.path.isfile(join(submitted_dir, "bitdiddle", "ps1", 'problem1.ipynb')) - assert os.path.isfile(join(submitted_dir, "bitdiddle", "ps1", 'timestamp.txt')) - assert len(os.listdir(join(submitted_dir, "bitdiddle", "ps1"))) == 2 - - def test_collect_invalid_notebook(self, course_dir, archive_dir): - extracted_dir = join(archive_dir, "..", "extracted") - submitted_dir = join(course_dir, "submitted") - self._empty_notebook(join(course_dir, 'source', 'ps1', 'problem1.ipynb')) - - run_nbgrader(["db", "assignment", "add", "ps1"]) - with open("nbgrader_config.py", "a") as fh: - fh.write(dedent( - """ - c.FileNameCollectorPlugin.named_regexp = ( - r".+_(?P\w+)_attempt_(?P[0-9\-]+)_(?P\w+)" - ) - """ - )) - - run_nbgrader(["generate_assignment", "ps1"]) - self._make_notebook(archive_dir, - 'ps1', 'hacker', '2016-01-30-15-30-10', 'myproblem1') - - # Should get collected without --strict flag - run_nbgrader(["zip_collect", "ps1"]) - assert os.path.isdir(extracted_dir) - assert len(os.listdir(extracted_dir)) == 1 - - assert os.path.isdir(submitted_dir) - assert os.path.isfile(join(submitted_dir, "hacker", "ps1", 'myproblem1.ipynb')) - assert os.path.isfile(join(submitted_dir, "hacker", "ps1", 'timestamp.txt')) - assert len(os.listdir(join(submitted_dir, "hacker", "ps1"))) == 2 - - # Re-run with --strict flag - self._make_notebook(archive_dir, - 'ps1', 'hacker', '2016-01-30-15-30-10', 'problem1') - - run_nbgrader(["zip_collect", "--force", "--strict", "ps1"]) - assert os.path.isdir(extracted_dir) - assert len(os.listdir(extracted_dir)) == 2 - - assert os.path.isdir(submitted_dir) - assert os.path.isfile(join(submitted_dir, "hacker", "ps1", 'problem1.ipynb')) - assert os.path.isfile(join(submitted_dir, "hacker", "ps1", 'timestamp.txt')) - assert len(os.listdir(join(submitted_dir, "hacker", "ps1"))) == 2 - - def test_collect_timestamp_none(self, course_dir, archive_dir): - extracted_dir = join(archive_dir, "..", "extracted") - submitted_dir = join(course_dir, "submitted") - self._make_notebook(archive_dir, - 'ps1', 'hacker', '2016-01-30-15-30-10', 'problem1') - - with open("nbgrader_config.py", "a") as fh: - fh.write(dedent( - """ - c.FileNameCollectorPlugin.named_regexp = ( - r".+_(?P\w+)_attempt_(?P[0-9\-]+)_(?P\w+)" - ) - """ - )) - - run_nbgrader(["zip_collect", "ps1"]) - assert os.path.isdir(extracted_dir) - assert len(os.listdir(extracted_dir)) == 1 - - assert os.path.isdir(submitted_dir) - assert os.path.isfile(join(submitted_dir, "hacker", "ps1", 'problem1.ipynb')) - assert not os.path.isfile(join(submitted_dir, "hacker", "ps1", 'timestamp.txt')) - assert len(os.listdir(join(submitted_dir, "hacker", "ps1"))) == 1 - - def test_collect_timestamp_empty_str(self, course_dir, archive_dir): - extracted_dir = join(archive_dir, "..", "extracted") - submitted_dir = join(course_dir, "submitted") - self._make_notebook(archive_dir, - 'ps1', 'hacker', '2016-01-30-15-30-10', 'problem1') - - with open('plugin_three.py', 'w') as fh: - fh.write(dedent( - """ - from nbgrader.plugins import FileNameCollectorPlugin - - class CustomPlugin(FileNameCollectorPlugin): - def collect(self, submitted_file): - info = super(CustomPlugin, self).collect(submitted_file) - if info is not None: - info['timestamp'] = "" - return info - """ - )) - - with open("nbgrader_config.py", "a") as fh: - fh.write(dedent( - """ - c.ZipCollectApp.collector_plugin = 'plugin_three.CustomPlugin' - c.FileNameCollectorPlugin.named_regexp = ( - r".+_(?P\w+)_attempt_(?P[0-9\-]+)_(?P\w+)" - ) - """ - )) - - run_nbgrader(["zip_collect", "ps1"]) - assert os.path.isdir(extracted_dir) - assert len(os.listdir(extracted_dir)) == 1 - - assert os.path.isdir(submitted_dir) - assert os.path.isfile(join(submitted_dir, "hacker", "ps1", 'problem1.ipynb')) - assert not os.path.isfile(join(submitted_dir, "hacker", "ps1", 'timestamp.txt')) - assert len(os.listdir(join(submitted_dir, "hacker", "ps1"))) == 1 - - def test_collect_timestamp_bad_str(self, course_dir, archive_dir): - extracted_dir = join(archive_dir, "..", "extracted") - submitted_dir = join(course_dir, "submitted") - self._make_notebook(archive_dir, - 'ps1', 'hacker', '2016-01-30-15-30-10', 'problem1') - - with open('plugin_four.py', 'w') as fh: - fh.write(dedent( - """ - from nbgrader.plugins import FileNameCollectorPlugin - - class CustomPlugin(FileNameCollectorPlugin): - def collect(self, submitted_file): - info = super(CustomPlugin, self).collect(submitted_file) - if info is not None: - info['timestamp'] = "I'm still trying to be a timestamp str" - return info - """ - )) - - with open("nbgrader_config.py", "a") as fh: - fh.write(dedent( - """ - c.ZipCollectApp.collector_plugin = 'plugin_four.CustomPlugin' - c.FileNameCollectorPlugin.named_regexp = ( - r".+_(?P\w+)_attempt_(?P[0-9\-]+)_(?P\w+)" - ) - """ - )) - - run_nbgrader(["zip_collect", "ps1"], retcode=1) - assert os.path.isdir(extracted_dir) - assert len(os.listdir(extracted_dir)) == 1 - assert not os.path.isdir(submitted_dir) - - def test_collect_timestamp_skip_older(self, course_dir, archive_dir): - extracted_dir = join(archive_dir, "..", "extracted") - submitted_dir = join(course_dir, "submitted") - - # submissions are sorted so a before b - os.makedirs(join(archive_dir, 'ps1_hacker_a_2017-01-30-15-30-10')) - with io.open(join(archive_dir, 'ps1_hacker_a_2017-01-30-15-30-10', 'problem1.ipynb'), mode='w', encoding='utf-8') as fh: - fh.write(u'') - os.makedirs(join(archive_dir, 'ps1_hacker_b_2016-01-30-15-30-10')) - with io.open(join(archive_dir, 'ps1_hacker_b_2016-01-30-15-30-10', 'problem1.ipynb'), mode='w', encoding='utf-8') as fh: - fh.write(u'') - - with open('plugin_five.py', 'w') as fh: - fh.write(dedent( - """ - from nbgrader.plugins import FileNameCollectorPlugin - - class CustomPlugin(FileNameCollectorPlugin): - def collect(self, submitted_file): - info = super(CustomPlugin, self).collect(submitted_file) - if info is not None: - info['timestamp'] = '{}-{}-{} {}:{}:{}'.format( - *tuple(info['timestamp'].split('-')) - ) - return info - """ - )) - - with open("nbgrader_config.py", "a") as fh: - fh.write(dedent( - """ - c.ZipCollectApp.collector_plugin = 'plugin_five.CustomPlugin' - c.FileNameCollectorPlugin.valid_ext = ['.ipynb', '.txt'] - c.FileNameCollectorPlugin.named_regexp = ( - r".+ps1_(?P\w+)_[a|b]_(?P[0-9\-]+)\W+(?P.+)" - ) - """ - )) - - run_nbgrader(["zip_collect", "ps1"]) - assert os.path.isdir(extracted_dir) - assert os.path.isdir(submitted_dir) - assert len(os.listdir(submitted_dir)) == 1 - - assert os.path.isfile(join(submitted_dir, "hacker", "ps1", 'problem1.ipynb')) - assert os.path.isfile(join(submitted_dir, "hacker", "ps1", 'timestamp.txt')) - assert len(os.listdir(join(submitted_dir, "hacker", "ps1"))) == 2 - - with open(join(submitted_dir, "hacker", "ps1", 'timestamp.txt'), 'r') as fh: - ts = fh.read() - assert ts == '2017-01-30 15:30:10' - - def test_collect_timestamp_replace_newer(self, course_dir, archive_dir): - extracted_dir = join(archive_dir, "..", "extracted") - submitted_dir = join(course_dir, "submitted") - - # submissions are sorted so a before b - os.makedirs(join(archive_dir, 'ps1_hacker_a_2016-01-30-15-30-10')) - with io.open(join(archive_dir, 'ps1_hacker_a_2016-01-30-15-30-10', 'problem1.ipynb'), mode='w', encoding='utf-8') as fh: - fh.write(u'') - os.makedirs(join(archive_dir, 'ps1_hacker_b_2017-01-30-15-30-10')) - with io.open(join(archive_dir, 'ps1_hacker_b_2017-01-30-15-30-10', 'problem1.ipynb'), mode='w', encoding='utf-8') as fh: - fh.write(u'') - - with open('plugin_six.py', 'w') as fh: - fh.write(dedent( - """ - from nbgrader.plugins import FileNameCollectorPlugin - - class CustomPlugin(FileNameCollectorPlugin): - def collect(self, submitted_file): - info = super(CustomPlugin, self).collect(submitted_file) - if info is not None: - info['timestamp'] = '{}-{}-{} {}:{}:{}'.format( - *tuple(info['timestamp'].split('-')) - ) - return info - """ - )) - - with open("nbgrader_config.py", "a") as fh: - fh.write(dedent( - """ - c.ZipCollectApp.collector_plugin = 'plugin_six.CustomPlugin' - c.FileNameCollectorPlugin.valid_ext = ['.ipynb', '.txt'] - c.FileNameCollectorPlugin.named_regexp = ( - r".+ps1_(?P\w+)_[a|b]_(?P[0-9\-]+)\W+(?P.+)" - ) - """ - )) - - run_nbgrader(["zip_collect", "ps1"]) - assert os.path.isdir(extracted_dir) - assert os.path.isdir(submitted_dir) - assert len(os.listdir(submitted_dir)) == 1 - - assert os.path.isfile(join(submitted_dir, "hacker", "ps1", 'problem1.ipynb')) - assert os.path.isfile(join(submitted_dir, "hacker", "ps1", 'timestamp.txt')) - assert len(os.listdir(join(submitted_dir, "hacker", "ps1"))) == 2 - - with open(join(submitted_dir, "hacker", "ps1", 'timestamp.txt'), 'r') as fh: - ts = fh.read() - assert ts == '2017-01-30 15:30:10' - - def test_collect_timestamp_file(self, course_dir, archive_dir): - extracted_dir = join(archive_dir, "..", "extracted") - submitted_dir = join(course_dir, "submitted") - - os.makedirs(join(archive_dir, 'ps1_hacker')) - with io.open(join(archive_dir, 'ps1_hacker', 'problem1.ipynb'), mode='w', encoding='utf-8') as fh: - fh.write(u'') - with open(join(archive_dir, 'ps1_hacker', 'timestamp.txt'), 'w') as fh: - fh.write('foo') - - with open("nbgrader_config.py", "a") as fh: - fh.write(dedent( - """ - c.FileNameCollectorPlugin.valid_ext = ['.ipynb', '.txt'] - c.FileNameCollectorPlugin.named_regexp = ( - r".+ps1_(?P\w+)\W+(?P.+)" - ) - """ - )) - - run_nbgrader(["zip_collect", "ps1"]) - assert os.path.isdir(extracted_dir) - assert os.path.isdir(submitted_dir) - assert len(os.listdir(submitted_dir)) == 1 - - assert os.path.isfile(join(submitted_dir, "hacker", "ps1", 'problem1.ipynb')) - assert os.path.isfile(join(submitted_dir, "hacker", "ps1", 'timestamp.txt')) - assert len(os.listdir(join(submitted_dir, "hacker", "ps1"))) == 2 - - with open(join(submitted_dir, "hacker", "ps1", 'timestamp.txt'), 'r') as fh: - ts = fh.read() - assert ts == 'foo' - - def test_collect_preserve_sub_dir(self, course_dir, archive_dir): - extracted_dir = join(archive_dir, "..", "extracted") - submitted_dir = join(course_dir, "submitted") - - os.makedirs(join(archive_dir, 'ps1_hacker', 'files')) - with io.open(join(archive_dir, 'ps1_hacker', 'files', 'problem1.ipynb'), mode='w', encoding='utf-8') as fh: - fh.write(u'') - with open(join(archive_dir, 'ps1_hacker', 'timestamp.txt'), 'w') as fh: - fh.write('foo') - - with open("nbgrader_config.py", "a") as fh: - fh.write(dedent( - """ - c.FileNameCollectorPlugin.valid_ext = ['.ipynb', '.txt'] - c.FileNameCollectorPlugin.named_regexp = ( - r".+ps1_(?P\w+)\W+(?P.+)" - ) - """ - )) - - run_nbgrader(["zip_collect", "ps1"]) - assert os.path.isdir(extracted_dir) - assert os.path.isdir(submitted_dir) - assert len(os.listdir(submitted_dir)) == 1 - - assert os.path.isfile(join(submitted_dir, "hacker", "ps1", 'files', 'problem1.ipynb')) - assert os.path.isfile(join(submitted_dir, "hacker", "ps1", 'timestamp.txt')) - assert len(os.listdir(join(submitted_dir, "hacker", "ps1"))) == 2 - - with open(join(submitted_dir, "hacker", "ps1", 'timestamp.txt'), 'r') as fh: - ts = fh.read() - assert ts == 'foo' - - def test_collect_duplicate_fail(self, course_dir, archive_dir): - extracted_dir = join(archive_dir, "..", "extracted") - submitted_dir = join(course_dir, "submitted") - - os.makedirs(join(archive_dir, 'ps1_hacker_01', 'files')) - with io.open(join(archive_dir, 'ps1_hacker_01', 'files', 'problem1.ipynb'), mode='w', encoding='utf-8') as fh: - fh.write(u'') - - os.makedirs(join(archive_dir, 'ps1_hacker_02', 'files')) - with io.open(join(archive_dir, 'ps1_hacker_02', 'files', 'problem1.ipynb'), mode='w', encoding='utf-8') as fh: - fh.write(u'') - - with open("nbgrader_config.py", "a") as fh: - fh.write(dedent( - """ - c.FileNameCollectorPlugin.valid_ext = ['.ipynb', '.txt'] - c.FileNameCollectorPlugin.named_regexp = ( - r".+ps1_(?P\w+)_[0-9]+\W+(?P.+)" - ) - """ - )) - - run_nbgrader(["zip_collect", "ps1"], retcode=1) - assert os.path.isdir(extracted_dir) - assert len(os.listdir(extracted_dir)) == 2 - assert not os.path.isdir(submitted_dir) diff --git a/nbgrader/tests/nbgraderformat/__init__.py b/nbgrader/tests/nbgraderformat/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/nbgrader/tests/nbgraderformat/test_v1.py b/nbgrader/tests/nbgraderformat/test_v1.py deleted file mode 100644 index 2cdef8417..000000000 --- a/nbgrader/tests/nbgraderformat/test_v1.py +++ /dev/null @@ -1,215 +0,0 @@ -import os -import pytest -import tempfile -from nbformat import current_nbformat, read -from nbformat.v4 import new_notebook -from ...nbgraderformat.common import SchemaMismatchError, ValidationError -from ...nbgraderformat.v1 import ( - MetadataValidatorV1, read_v1, reads_v1, write_v1, writes_v1) -from .. import ( - create_code_cell, - create_grade_cell, - create_solution_cell, - create_regular_cell) - - -def test_set_false(): - cell = create_grade_cell("", "code", "foo", 2, 0) - del cell.metadata.nbgrader["solution"] - del cell.metadata.nbgrader["locked"] - - MetadataValidatorV1().upgrade_cell_metadata(cell) - assert not cell.metadata.nbgrader["solution"] - assert not cell.metadata.nbgrader["locked"] - - cell = create_solution_cell("", "code", "foo", 0) - del cell.metadata.nbgrader["grade"] - del cell.metadata.nbgrader["locked"] - - MetadataValidatorV1().upgrade_cell_metadata(cell) - assert not cell.metadata.nbgrader["grade"] - assert not cell.metadata.nbgrader["locked"] - - -def test_remove_metadata(): - cell = create_solution_cell("", "code", "foo", 0) - cell.metadata.nbgrader["solution"] = False - - MetadataValidatorV1().upgrade_cell_metadata(cell) - assert "nbgrader" not in cell.metadata - - -def test_remove_points(): - cell = create_solution_cell("", "code", "foo", 0) - cell.metadata.nbgrader["points"] = 2 - - MetadataValidatorV1().upgrade_cell_metadata(cell) - assert "points" not in cell.metadata.nbgrader - - -def test_set_points(): - cell = create_grade_cell("", "code", "foo", "", 0) - MetadataValidatorV1().upgrade_cell_metadata(cell) - assert cell.metadata.nbgrader["points"] == 0.0 - - cell = create_grade_cell("", "code", "foo", "1.5", 0) - MetadataValidatorV1().upgrade_cell_metadata(cell) - assert cell.metadata.nbgrader["points"] == 1.5 - - cell = create_grade_cell("", "code", "foo", 1, 0) - del cell.metadata.nbgrader["points"] - MetadataValidatorV1().upgrade_cell_metadata(cell) - assert cell.metadata.nbgrader["points"] == 0.0 - - cell = create_grade_cell("", "code", "foo", -1, 0) - MetadataValidatorV1().upgrade_cell_metadata(cell) - assert cell.metadata.nbgrader["points"] == 0.0 - - -def test_extra_keys(): - cell = create_grade_cell("", "code", "foo", "", 0) - cell.metadata.nbgrader["foo"] = "bar" - MetadataValidatorV1().upgrade_cell_metadata(cell) - assert "foo" not in cell.metadata.nbgrader - - -def test_schema_version(): - cell = create_grade_cell("", "code", "foo", "", 0) - del cell.metadata.nbgrader["schema_version"] - MetadataValidatorV1().upgrade_cell_metadata(cell) - assert cell.metadata.nbgrader["schema_version"] == 1 - - -def test_read(): - currdir = os.path.split(__file__)[0] - path = os.path.join(currdir, "..", "apps", "files", "test-v1.ipynb") - read_v1(path, current_nbformat) - - -def test_reads(): - currdir = os.path.split(__file__)[0] - path = os.path.join(currdir, "..", "apps", "files", "test-v1.ipynb") - contents = open(path, "r").read() - reads_v1(contents, current_nbformat) - - -def test_write(): - currdir = os.path.split(__file__)[0] - path = os.path.join(currdir, "..", "apps", "files", "test-v1.ipynb") - nb = read_v1(path, current_nbformat) - with tempfile.TemporaryFile(mode="w") as fh: - write_v1(nb, fh) - - -def test_writes(): - currdir = os.path.split(__file__)[0] - path = os.path.join(currdir, "..", "apps", "files", "test-v1.ipynb") - nb = read_v1(path, current_nbformat) - writes_v1(nb) - - -def test_too_old(): - currdir = os.path.split(__file__)[0] - path = os.path.join(currdir, "..", "apps", "files", "test-v0.ipynb") - with pytest.raises(SchemaMismatchError): - read_v1(path, current_nbformat) - - -def test_too_new(): - currdir = os.path.split(__file__)[0] - path = os.path.join(currdir, "..", "apps", "files", "test.ipynb") - with pytest.raises(SchemaMismatchError): - read_v1(path, current_nbformat) - - -def test_upgrade_notebook_metadata(): - currdir = os.path.split(__file__)[0] - path = os.path.join(currdir, "..", "apps", "files", "test-v0.ipynb") - with open(path, "r") as fh: - nb = read(fh, current_nbformat) - nb = MetadataValidatorV1().upgrade_notebook_metadata(nb) - - -def test_upgrade_cell_metadata(): - cell = create_grade_cell("", "code", "foo", 5, 0) - MetadataValidatorV1().upgrade_cell_metadata(cell) - - cell = create_grade_cell("", "code", "foo", 5, 1) - MetadataValidatorV1().upgrade_cell_metadata(cell) - - cell = create_grade_cell("", "code", "foo", 5, 2) - MetadataValidatorV1().upgrade_cell_metadata(cell) - - -def test_regular_cells(): - validator = MetadataValidatorV1() - - # code cell without nbgrader metadata - cell = create_code_cell() - validator.validate_cell(cell) - validator.upgrade_cell_metadata(cell) - - # code cell with metadata, but not an nbgrader cell - cell = create_regular_cell("", "code", schema_version=1) - del cell.metadata.nbgrader["task"] - validator.validate_cell(cell) - - nb = new_notebook() - cell1 = create_code_cell() - cell2 = create_regular_cell("", "code", schema_version=1) - del cell2.metadata.nbgrader["task"] - nb.cells = [cell1, cell2] - validator.validate_nb(nb) - - -def test_invalid_metadata(): - validator = MetadataValidatorV1() - - # make sure the default cell works ok - cell = create_grade_cell("", "code", "foo", 5, 1) - del cell.metadata.nbgrader["task"] - validator.validate_cell(cell) - - # missing grade_id - cell = create_grade_cell("", "code", "foo", 5, 1) - del cell.metadata.nbgrader["task"] - del cell.metadata.nbgrader["grade_id"] - with pytest.raises(ValidationError): - validator.validate_cell(cell) - - # grade_id is empty - cell = create_grade_cell("", "code", "", 5, 1) - del cell.metadata.nbgrader["task"] - with pytest.raises(ValidationError): - validator.validate_cell(cell) - - # missing points - cell = create_grade_cell("", "code", "foo", 5, 1) - del cell.metadata.nbgrader["task"] - del cell.metadata.nbgrader["points"] - with pytest.raises(ValidationError): - validator.validate_cell(cell) - - # markdown grade cell not marked as a solution cell - cell = create_grade_cell("", "markdown", "foo", 5, 1) - del cell.metadata.nbgrader["task"] - with pytest.raises(ValidationError): - validator.validate_cell(cell) - - # markdown solution cell not marked as a grade cell - cell = create_solution_cell("", "markdown", "foo", 1) - del cell.metadata.nbgrader["task"] - with pytest.raises(ValidationError): - validator.validate_cell(cell) - - -def test_duplicate_cells(): - validator = MetadataValidatorV1() - nb = new_notebook() - cell1 = create_grade_cell("", "code", "foo", 5, 1) - del cell1.metadata.nbgrader["task"] - cell2 = create_grade_cell("", "code", "foo", 5, 1) - del cell2.metadata.nbgrader["task"] - nb.cells = [cell1, cell2] - with pytest.raises(ValidationError): - validator.validate_nb(nb) diff --git a/nbgrader/tests/nbgraderformat/test_v2.py b/nbgrader/tests/nbgraderformat/test_v2.py deleted file mode 100644 index 21b13174b..000000000 --- a/nbgrader/tests/nbgraderformat/test_v2.py +++ /dev/null @@ -1,257 +0,0 @@ -import os -import pytest -import tempfile -from nbformat import current_nbformat, read -from nbformat.v4 import new_notebook -from ...nbgraderformat.common import SchemaMismatchError, ValidationError -from ...nbgraderformat.v2 import ( - MetadataValidatorV2, read_v2, reads_v2, write_v2, writes_v2) -from .. import ( - create_code_cell, - create_grade_cell, - create_solution_cell, - create_regular_cell) - - -def test_set_false(): - cell = create_grade_cell("", "code", "foo", 2, 0) - del cell.metadata.nbgrader["solution"] - del cell.metadata.nbgrader["locked"] - - MetadataValidatorV2().upgrade_cell_metadata(cell) - assert not cell.metadata.nbgrader["solution"] - assert not cell.metadata.nbgrader["locked"] - - cell = create_solution_cell("", "code", "foo", 0) - del cell.metadata.nbgrader["grade"] - del cell.metadata.nbgrader["locked"] - - MetadataValidatorV2().upgrade_cell_metadata(cell) - assert not cell.metadata.nbgrader["grade"] - assert not cell.metadata.nbgrader["locked"] - - -def test_remove_metadata(): - cell = create_solution_cell("", "code", "foo", 0) - cell.metadata.nbgrader["solution"] = False - - MetadataValidatorV2().upgrade_cell_metadata(cell) - assert "nbgrader" not in cell.metadata - - -def test_remove_points(): - cell = create_solution_cell("", "code", "foo", 0) - cell.metadata.nbgrader["points"] = 2 - - MetadataValidatorV2().upgrade_cell_metadata(cell) - assert "points" not in cell.metadata.nbgrader - - -def test_set_points(): - cell = create_grade_cell("", "code", "foo", "", 0) - MetadataValidatorV2().upgrade_cell_metadata(cell) - assert cell.metadata.nbgrader["points"] == 0.0 - - cell = create_grade_cell("", "code", "foo", "1.5", 0) - MetadataValidatorV2().upgrade_cell_metadata(cell) - assert cell.metadata.nbgrader["points"] == 1.5 - - cell = create_grade_cell("", "code", "foo", 1, 0) - del cell.metadata.nbgrader["points"] - MetadataValidatorV2().upgrade_cell_metadata(cell) - assert cell.metadata.nbgrader["points"] == 0.0 - - cell = create_grade_cell("", "code", "foo", -1, 0) - MetadataValidatorV2().upgrade_cell_metadata(cell) - assert cell.metadata.nbgrader["points"] == 0.0 - - -def test_extra_keys(): - cell = create_grade_cell("", "code", "foo", "", 0) - cell.metadata.nbgrader["foo"] = "bar" - MetadataValidatorV2().upgrade_cell_metadata(cell) - assert "foo" not in cell.metadata.nbgrader - - cell = create_grade_cell("", "code", "foo", "", 1) - cell.metadata.nbgrader["foo"] = "bar" - MetadataValidatorV2().upgrade_cell_metadata(cell) - assert "foo" not in cell.metadata.nbgrader - - -def test_schema_version(): - cell = create_grade_cell("", "code", "foo", "", 0) - del cell.metadata.nbgrader["schema_version"] - MetadataValidatorV2().upgrade_cell_metadata(cell) - assert cell.metadata.nbgrader["schema_version"] == 2 - - -def test_cell_type(): - cell = create_grade_cell("", "code", "foo", "", 0) - MetadataValidatorV2().upgrade_cell_metadata(cell) - assert "cell_type" not in cell.metadata.nbgrader - - cell = create_grade_cell("", "code", "foo", "", 0) - cell.metadata.nbgrader["checksum"] = "abcd" - MetadataValidatorV2().upgrade_cell_metadata(cell) - assert cell.metadata.nbgrader['cell_type'] == "code" - - cell = create_grade_cell("", "code", "foo", "", 0) - cell.metadata.nbgrader["checksum"] = "abcd" - cell.metadata.nbgrader["cell_type"] = "markdown" - MetadataValidatorV2().upgrade_cell_metadata(cell) - assert cell.metadata.nbgrader['cell_type'] == "markdown" - - cell = create_grade_cell("", "code", "foo", "", 0) - cell.metadata.nbgrader["checksum"] = "abcd" - cell.metadata.nbgrader["cell_type"] = "code" - MetadataValidatorV2().upgrade_cell_metadata(cell) - assert cell.metadata.nbgrader['cell_type'] == "code" - - -def test_read(): - currdir = os.path.split(__file__)[0] - path = os.path.join(currdir, "..", "apps", "files", "test-v2.ipynb") - read_v2(path, current_nbformat) - - -def test_reads(): - currdir = os.path.split(__file__)[0] - path = os.path.join(currdir, "..", "apps", "files", "test-v2.ipynb") - contents = open(path, "r").read() - reads_v2(contents, current_nbformat) - - -def test_write(): - currdir = os.path.split(__file__)[0] - path = os.path.join(currdir, "..", "apps", "files", "test-v2.ipynb") - nb = read_v2(path, current_nbformat) - with tempfile.TemporaryFile(mode="w") as fh: - write_v2(nb, fh) - - -def test_writes(): - currdir = os.path.split(__file__)[0] - path = os.path.join(currdir, "..", "apps", "files", "test-v2.ipynb") - nb = read_v2(path, current_nbformat) - writes_v2(nb) - - -def test_too_old(): - currdir = os.path.split(__file__)[0] - path = os.path.join(currdir, "..", "apps", "files", "test-v0.ipynb") - with pytest.raises(SchemaMismatchError): - read_v2(path, current_nbformat) - - -def test_too_new(): - currdir = os.path.split(__file__)[0] - path = os.path.join(currdir, "..", "apps", "files", "test.ipynb") - with pytest.raises(SchemaMismatchError): - read_v2(path, current_nbformat) - - -def test_upgrade_notebook_metadata(): - currdir = os.path.split(__file__)[0] - path = os.path.join(currdir, "..", "apps", "files", "test-v0.ipynb") - with open(path, "r") as fh: - nb = read(fh, current_nbformat) - nb = MetadataValidatorV2().upgrade_notebook_metadata(nb) - - -def test_upgrade_cell_metadata(): - cell = create_grade_cell("", "code", "foo", 5, 0) - MetadataValidatorV2().upgrade_cell_metadata(cell) - - cell = create_grade_cell("", "code", "foo", 5, 2) - MetadataValidatorV2().upgrade_cell_metadata(cell) - - cell = create_grade_cell("", "code", "foo", 5, 3) - MetadataValidatorV2().upgrade_cell_metadata(cell) - - -def test_regular_cells(): - validator = MetadataValidatorV2() - - # code cell without nbgrader metadata - cell = create_code_cell() - validator.validate_cell(cell) - validator.upgrade_cell_metadata(cell) - - # code cell with metadata, but not an nbgrader cell - cell = create_regular_cell("", "code", schema_version=2) - del cell.metadata.nbgrader["task"] - validator.validate_cell(cell) - - nb = new_notebook() - cell1 = create_code_cell() - cell2 = create_regular_cell("", "code", schema_version=2) - del cell2.metadata.nbgrader["task"] - nb.cells = [cell1, cell2] - validator.validate_nb(nb) - - -def test_invalid_metadata(): - validator = MetadataValidatorV2() - - # make sure the default cell works ok - cell = create_grade_cell("", "code", "foo", 5, 2) - del cell.metadata.nbgrader["task"] - validator.validate_cell(cell) - - # missing grade_id - cell = create_grade_cell("", "code", "foo", 5, 2) - del cell.metadata.nbgrader["task"] - del cell.metadata.nbgrader["grade_id"] - with pytest.raises(ValidationError): - validator.validate_cell(cell) - - # grade_id is empty - cell = create_grade_cell("", "code", "", 5, 2) - del cell.metadata.nbgrader["task"] - with pytest.raises(ValidationError): - validator.validate_cell(cell) - - # missing points - cell = create_grade_cell("", "code", "foo", 5, 2) - del cell.metadata.nbgrader["task"] - del cell.metadata.nbgrader["points"] - with pytest.raises(ValidationError): - validator.validate_cell(cell) - - # markdown grade cell not marked as a solution cell - cell = create_grade_cell("", "markdown", "foo", 5, 2) - del cell.metadata.nbgrader["task"] - with pytest.raises(ValidationError): - validator.validate_cell(cell) - - # markdown solution cell not marked as a grade cell - cell = create_solution_cell("", "markdown", "foo", 2) - del cell.metadata.nbgrader["task"] - with pytest.raises(ValidationError): - validator.validate_cell(cell) - - -def test_duplicate_cells(): - validator = MetadataValidatorV2() - nb = new_notebook() - cell1 = create_grade_cell("", "code", "foo", 5, 2) - del cell1.metadata.nbgrader["task"] - cell2 = create_grade_cell("", "code", "foo", 5, 2) - del cell2.metadata.nbgrader["task"] - nb.cells = [cell1, cell2] - with pytest.raises(ValidationError): - validator.validate_nb(nb) - - -def test_celltype_changed(caplog): - cell = create_solution_cell("", "code", "foo", 2) - del cell.metadata.nbgrader["task"] - cell.metadata.nbgrader["cell_type"] = "code" - MetadataValidatorV2().validate_cell(cell) - assert "Cell type has changed from markdown to code!" not in caplog.text - - cell = create_solution_cell("", "code", "foo", 2) - del cell.metadata.nbgrader["task"] - cell.metadata.nbgrader["cell_type"] = "markdown" - MetadataValidatorV2().validate_cell(cell) - assert "Cell type has changed from markdown to code!" in caplog.text diff --git a/nbgrader/tests/nbgraderformat/test_v3.py b/nbgrader/tests/nbgraderformat/test_v3.py deleted file mode 100644 index 49062f132..000000000 --- a/nbgrader/tests/nbgraderformat/test_v3.py +++ /dev/null @@ -1,281 +0,0 @@ -import json -import os -import pytest -import tempfile -from nbformat import current_nbformat, read -from nbformat.v4 import new_notebook -from ...nbgraderformat.common import SchemaMismatchError, ValidationError -from ...nbgraderformat.v3 import ( - MetadataValidatorV3, read_v3, reads_v3, write_v3, writes_v3) -from .. import ( - create_code_cell, - create_grade_cell, - create_solution_cell, - create_task_cell, - create_regular_cell) - - -def test_set_false(): - cell = create_grade_cell("", "code", "foo", 2, 0) - del cell.metadata.nbgrader["solution"] - del cell.metadata.nbgrader["locked"] - - MetadataValidatorV3().upgrade_cell_metadata(cell) - assert not cell.metadata.nbgrader["solution"] - assert not cell.metadata.nbgrader["locked"] - - cell = create_solution_cell("", "code", "foo", 0) - del cell.metadata.nbgrader["grade"] - del cell.metadata.nbgrader["locked"] - - MetadataValidatorV3().upgrade_cell_metadata(cell) - assert not cell.metadata.nbgrader["grade"] - assert not cell.metadata.nbgrader["locked"] - - -def test_remove_metadata(): - cell = create_solution_cell("", "code", "foo", 0) - cell.metadata.nbgrader["solution"] = False - - MetadataValidatorV3().upgrade_cell_metadata(cell) - assert "nbgrader" not in cell.metadata - - -def test_remove_points(): - cell = create_solution_cell("", "code", "foo", 0) - cell.metadata.nbgrader["points"] = 2 - - MetadataValidatorV3().upgrade_cell_metadata(cell) - assert "points" not in cell.metadata.nbgrader - - -def test_set_points(): - cell = create_grade_cell("", "code", "foo", "", 0) - MetadataValidatorV3().upgrade_cell_metadata(cell) - assert cell.metadata.nbgrader["points"] == 0.0 - - cell = create_grade_cell("", "code", "foo", "1.5", 0) - MetadataValidatorV3().upgrade_cell_metadata(cell) - assert cell.metadata.nbgrader["points"] == 1.5 - - cell = create_grade_cell("", "code", "foo", 1, 0) - del cell.metadata.nbgrader["points"] - MetadataValidatorV3().upgrade_cell_metadata(cell) - assert cell.metadata.nbgrader["points"] == 0.0 - - cell = create_grade_cell("", "code", "foo", -1, 0) - MetadataValidatorV3().upgrade_cell_metadata(cell) - assert cell.metadata.nbgrader["points"] == 0.0 - - -def test_extra_keys(): - cell = create_grade_cell("", "code", "foo", "", 0) - cell.metadata.nbgrader["foo"] = "bar" - MetadataValidatorV3().upgrade_cell_metadata(cell) - assert "foo" not in cell.metadata.nbgrader - - cell = create_grade_cell("", "code", "foo", "", 1) - cell.metadata.nbgrader["foo"] = "bar" - MetadataValidatorV3().upgrade_cell_metadata(cell) - assert "foo" not in cell.metadata.nbgrader - - cell = create_grade_cell("", "code", "foo", "", 2) - cell.metadata.nbgrader["foo"] = "bar" - MetadataValidatorV3().upgrade_cell_metadata(cell) - assert "foo" not in cell.metadata.nbgrader - - -def test_schema_version(): - cell = create_grade_cell("", "code", "foo", "", 0) - del cell.metadata.nbgrader["schema_version"] - MetadataValidatorV3().upgrade_cell_metadata(cell) - assert cell.metadata.nbgrader["schema_version"] == 3 - - -def test_cell_type(): - cell = create_grade_cell("", "code", "foo", "", 0) - MetadataValidatorV3().upgrade_cell_metadata(cell) - assert "cell_type" not in cell.metadata.nbgrader - - cell = create_grade_cell("", "code", "foo", "", 0) - cell.metadata.nbgrader["checksum"] = "abcd" - MetadataValidatorV3().upgrade_cell_metadata(cell) - assert cell.metadata.nbgrader['cell_type'] == "code" - - cell = create_grade_cell("", "code", "foo", "", 0) - cell.metadata.nbgrader["checksum"] = "abcd" - cell.metadata.nbgrader["cell_type"] = "markdown" - MetadataValidatorV3().upgrade_cell_metadata(cell) - assert cell.metadata.nbgrader['cell_type'] == "markdown" - - cell = create_grade_cell("", "code", "foo", "", 0) - cell.metadata.nbgrader["checksum"] = "abcd" - cell.metadata.nbgrader["cell_type"] = "code" - MetadataValidatorV3().upgrade_cell_metadata(cell) - assert cell.metadata.nbgrader['cell_type'] == "code" - - -def test_task_value(): - cell = create_task_cell("this is a task cell", "markdown", "foo", 0) - assert cell.metadata.nbgrader['task'] - - cell = create_grade_cell("", "code", "foo", "") - assert not cell.metadata.nbgrader['task'] - - cell = create_grade_cell("", "code", "foo", "") - assert not cell.metadata.nbgrader['task'] - - cell = create_solution_cell("", "code", "foo") - assert not cell.metadata.nbgrader['task'] - - -def test_read(): - currdir = os.path.split(__file__)[0] - path = os.path.join(currdir, "..", "apps", "files", "test.ipynb") - read_v3(path, current_nbformat) - - -def test_reads(): - currdir = os.path.split(__file__)[0] - path = os.path.join(currdir, "..", "apps", "files", "test.ipynb") - contents = open(path, "r").read() - reads_v3(contents, current_nbformat) - - -def test_write(): - currdir = os.path.split(__file__)[0] - path = os.path.join(currdir, "..", "apps", "files", "test.ipynb") - nb = read_v3(path, current_nbformat) - with tempfile.TemporaryFile(mode="w") as fh: - write_v3(nb, fh) - - -def test_writes(): - currdir = os.path.split(__file__)[0] - path = os.path.join(currdir, "..", "apps", "files", "test.ipynb") - nb = read_v3(path, current_nbformat) - writes_v3(nb) - - -def test_too_old(): - currdir = os.path.split(__file__)[0] - path = os.path.join(currdir, "..", "apps", "files", "test-v0.ipynb") - with pytest.raises(SchemaMismatchError): - read_v3(path, current_nbformat) - - -def test_too_new(): - currdir = os.path.split(__file__)[0] - path = os.path.join(currdir, "..", "apps", "files", "test.ipynb") - nb = read_v3(path, current_nbformat) - for cell in nb.cells: - if hasattr(cell.metadata, "nbgrader"): - cell.metadata.nbgrader.schema_version += 1 - nb = json.dumps(nb) - with pytest.raises(SchemaMismatchError): - reads_v3(nb, current_nbformat) - - -def test_upgrade_notebook_metadata(): - currdir = os.path.split(__file__)[0] - path = os.path.join(currdir, "..", "apps", "files", "test-v0.ipynb") - with open(path, "r") as fh: - nb = read(fh, current_nbformat) - nb = MetadataValidatorV3().upgrade_notebook_metadata(nb) - - -def test_upgrade_cell_metadata(): - cell = create_grade_cell("", "code", "foo", 5, 0) - MetadataValidatorV3().upgrade_cell_metadata(cell) - - cell = create_grade_cell("", "code", "foo", 5, 3) - MetadataValidatorV3().upgrade_cell_metadata(cell) - - cell = create_grade_cell("", "code", "foo", 5, 4) - MetadataValidatorV3().upgrade_cell_metadata(cell) - - -def test_regular_cells(): - validator = MetadataValidatorV3() - - # code cell without nbgrader metadata - cell = create_code_cell() - validator.validate_cell(cell) - validator.upgrade_cell_metadata(cell) - - # code cell with metadata, but not an nbgrader cell - cell = create_regular_cell("", "code", schema_version=3) - validator.validate_cell(cell) - - nb = new_notebook() - cell1 = create_code_cell() - cell2 = create_regular_cell("", "code", schema_version=3) - nb.cells = [cell1, cell2] - validator.validate_nb(nb) - - -def test_invalid_metadata(): - validator = MetadataValidatorV3() - - # make sure the default cell works ok - cell = create_grade_cell("", "code", "foo", 5, 3) - validator.validate_cell(cell) - - # missing grade_id - cell = create_grade_cell("", "code", "foo", 5, 3) - del cell.metadata.nbgrader["grade_id"] - with pytest.raises(ValidationError): - validator.validate_cell(cell) - - # grade_id is empty - cell = create_grade_cell("", "code", "", 5, 3) - del cell.metadata.nbgrader["task"] - with pytest.raises(ValidationError): - validator.validate_cell(cell) - - # missing points - cell = create_grade_cell("", "code", "foo", 5, 3) - del cell.metadata.nbgrader["points"] - with pytest.raises(ValidationError): - validator.validate_cell(cell) - - # markdown grade cell not marked as a solution cell - cell = create_grade_cell("", "markdown", "foo", 5, 3) - with pytest.raises(ValidationError): - validator.validate_cell(cell) - - # markdown solution cell not marked as a grade cell - cell = create_solution_cell("", "markdown", "foo", 3) - with pytest.raises(ValidationError): - validator.validate_cell(cell) - - # task cell shouldn't be a code cell - cell = create_task_cell("", "markdown", "foo", 5, 3) - validator.validate_cell(cell) - - # task cell shouldn't be able to be a code cell - cell = create_task_cell("", "code", "foo", 5, 3) - with pytest.raises(ValidationError): - validator.validate_cell(cell) - - -def test_duplicate_cells(): - validator = MetadataValidatorV3() - nb = new_notebook() - cell1 = create_grade_cell("", "code", "foo", 5, 3) - cell2 = create_grade_cell("", "code", "foo", 5, 3) - nb.cells = [cell1, cell2] - with pytest.raises(ValidationError): - validator.validate_nb(nb) - - -def test_celltype_changed(caplog): - cell = create_solution_cell("", "code", "foo", 3) - cell.metadata.nbgrader["cell_type"] = "code" - MetadataValidatorV3().validate_cell(cell) - assert "Cell type has changed from markdown to code!" not in caplog.text - - cell = create_solution_cell("", "code", "foo", 3) - cell.metadata.nbgrader["cell_type"] = "markdown" - MetadataValidatorV3().validate_cell(cell) - assert "Cell type has changed from markdown to code!" in caplog.text diff --git a/nbgrader/tests/preprocessors/test_checkcellmetadata.py b/nbgrader/tests/preprocessors/test_checkcellmetadata.py deleted file mode 100644 index bd3cfd385..000000000 --- a/nbgrader/tests/preprocessors/test_checkcellmetadata.py +++ /dev/null @@ -1,103 +0,0 @@ -import pytest -import os - -from ...preprocessors import CheckCellMetadata -from .base import BaseTestPreprocessor -from .. import create_grade_cell, create_solution_cell -from nbformat.v4 import new_notebook -from ...nbgraderformat import ValidationError - -@pytest.fixture -def preprocessor(): - return CheckCellMetadata() - - -class TestCheckCellMetadata(BaseTestPreprocessor): - - def test_duplicate_grade_ids(self, preprocessor): - """Check that an error is raised when there are duplicate grade ids""" - nb = self._read_nb(os.path.join("files", "duplicate-grade-ids.ipynb"), validate=False) - with pytest.raises(ValidationError): - preprocessor.preprocess(nb, {}) - - def test_blank_grade_id(self, preprocessor): - """Check that an error is raised when the grade id is blank""" - nb = self._read_nb(os.path.join("files", "blank-grade-id.ipynb"), validate=False) - with pytest.raises(ValidationError): - preprocessor.preprocess(nb, {}) - - def test_invalid_grade_cell_id(self, preprocessor): - """Check that an error is raised when the grade cell id is invalid""" - resources = dict(grade_ids=[]) - nb = new_notebook() - - nb.cells = [create_grade_cell("", "code", "", 1)] - with pytest.raises(ValidationError): - preprocessor.preprocess(nb, resources) - - nb.cells = [create_grade_cell("", "code", "a b", 1)] - with pytest.raises(ValidationError): - preprocessor.preprocess(nb, resources) - - nb.cells = [create_grade_cell("", "code", "a\"b", 1)] - with pytest.raises(ValidationError): - preprocessor.preprocess(nb, resources) - - nb.cells = [create_solution_cell("", "code", "abc-ABC_0")] - preprocessor.preprocess(nb, resources) - - def test_invalid_solution_cell_id(self, preprocessor): - """Check that an error is raised when the solution id is invalid""" - resources = dict(grade_ids=[]) - nb = new_notebook() - - nb.cells = [create_solution_cell("", "code", "")] - with pytest.raises(ValidationError): - preprocessor.preprocess(nb, resources) - - nb.cells = [create_solution_cell("", "code", "a b")] - with pytest.raises(ValidationError): - preprocessor.preprocess(nb, resources) - - nb.cells = [create_solution_cell("", "code", "a\"b")] - with pytest.raises(ValidationError): - preprocessor.preprocess(nb, resources) - - nb.cells = [create_solution_cell("", "code", "abc-ABC_0")] - preprocessor.preprocess(nb, resources) - - def test_blank_points(self, preprocessor): - """Check that an error is raised if the points are blank""" - nb = self._read_nb(os.path.join("files", "blank-points.ipynb"), validate=False) - with pytest.raises(ValidationError): - preprocessor.preprocess(nb, {}) - - def test_no_duplicate_grade_ids(self, preprocessor): - """Check that no errors are raised when grade ids are unique and not blank""" - nb = self._read_nb(os.path.join("files", "test.ipynb"), validate=False) - preprocessor.preprocess(nb, {}) - - def test_code_cell_solution_grade(self, preprocessor): - """Check that an error is not raised when a code cell is marked as both solution and grade""" - nb = self._read_nb(os.path.join("files", "manually-graded-code-cell.ipynb"), validate=False) - preprocessor.preprocess(nb, {}) - - def test_markdown_cell_grade(self, preprocessor): - """Check that an error is raised when a markdown cell is only marked as grade""" - nb = self._read_nb(os.path.join("files", "bad-markdown-cell-1.ipynb"), validate=False) - with pytest.raises(ValidationError): - preprocessor.preprocess(nb, {}) - - def test_markdown_cell_solution(self, preprocessor): - """Check that an error is raised when a markdown cell is only marked as solution""" - nb = self._read_nb(os.path.join("files", "bad-markdown-cell-2.ipynb"), validate=False) - with pytest.raises(ValidationError): - preprocessor.preprocess(nb, {}) - - def test_cell_type_changed(self, preprocessor): - nb = self._read_nb(os.path.join("files", "cell-type-changed.ipynb"), validate=False) - preprocessor.preprocess(nb, {}) - - def test_no_cell_type(self, preprocessor): - nb = self._read_nb(os.path.join("files", "no-cell-type.ipynb"), validate=False) - preprocessor.preprocess(nb, {}) diff --git a/nbgrader/tests/preprocessors/test_clearhiddentests.py b/nbgrader/tests/preprocessors/test_clearhiddentests.py deleted file mode 100644 index 7185be344..000000000 --- a/nbgrader/tests/preprocessors/test_clearhiddentests.py +++ /dev/null @@ -1,207 +0,0 @@ -import os -import pytest - -from textwrap import dedent -from traitlets.config import Config - -from .base import BaseTestPreprocessor -from .. import create_code_cell, create_text_cell -from ...preprocessors import ClearHiddenTests - - -@pytest.fixture -def preprocessor(): - return ClearHiddenTests() - - -class TestClearHiddenTests(BaseTestPreprocessor): - - def test_remove_hidden_test_region_code(self, preprocessor): - """Are hidden test regions in code cells correctly replaced?""" - cell = create_code_cell() - cell.source = dedent( - """ - assert True - ### BEGIN HIDDEN TESTS - assert True - ### END HIDDEN TESTS - """ - ).strip() - removed_test = preprocessor._remove_hidden_test_region(cell) - assert removed_test - assert cell.source == "assert True" - - def test_remove_hidden_test_region_text(self, preprocessor): - """Are solution regions in text cells correctly replaced?""" - cell = create_text_cell() - cell.source = dedent( - """ - something something - ### BEGIN HIDDEN TESTS - this is a test! - ### END HIDDEN TESTS - """ - ).strip() - removed_test = preprocessor._remove_hidden_test_region(cell) - assert removed_test - assert cell.source == "something something" - - def test_remove_hidded_test_region_no_end(self, preprocessor): - """Is an error thrown when there is no end hidden test statement?""" - cell = create_text_cell() - cell.source = dedent( - """ - something something - ### BEGIN HIDDEN TESTS - this is a test! - """ - ).strip() - - with pytest.raises(RuntimeError): - preprocessor._remove_hidden_test_region(cell) - - def test_remove_hidden_test_region_nested_solution(self, preprocessor): - """Is an error thrown when there are nested hidden test statements?""" - cell = create_text_cell() - cell.source = dedent( - """ - something something - ### BEGIN HIDDEN TESTS - ### BEGIN HIDDEN TESTS - this is a test! - """ - ).strip() - - with pytest.raises(RuntimeError): - preprocessor._remove_hidden_test_region(cell) - - def test_preprocess_code_cell_hidden_test_region(self, preprocessor): - """Is an error thrown when there is a hidden test region but it's not a grade cell?""" - cell = create_code_cell() - cell.source = dedent( - """ - assert True - ### BEGIN HIDDEN TESTS - assert True - ### END HIDDEN TESTS - """ - ).strip() - resources = dict() - with pytest.raises(RuntimeError): - preprocessor.preprocess_cell(cell, resources, 1) - - def test_preprocess_code_grade_cell_hidden_test_region(self, preprocessor): - """Is a code grade cell correctly cleared when there is a hidden test region?""" - cell = create_code_cell() - cell.source = dedent( - """ - assert True - ### BEGIN HIDDEN TESTS - assert True - ### END HIDDEN TESTS - """ - ).strip() - cell.metadata['nbgrader'] = dict(grade=True) - resources = dict() - cell = preprocessor.preprocess_cell(cell, resources, 1)[0] - - assert cell.source == "assert True" - assert cell.metadata.nbgrader['grade'] - - def test_preprocess_text_grade_cell_hidden_test_region(self, preprocessor): - """Is a text grade cell correctly cleared when there is a hidden test region?""" - cell = create_text_cell() - cell.source = dedent( - """ - assert True - ### BEGIN HIDDEN TESTS - assert True - ### END HIDDEN TESTS - """ - ).strip() - cell.metadata['nbgrader'] = dict(grade=True) - - resources = dict() - cell = preprocessor.preprocess_cell(cell, resources, 1)[0] - assert cell.source == "assert True" - assert cell.metadata.nbgrader['grade'] - - def test_preprocess_text_grade_cell_region_indented(self, preprocessor): - """Is a text grade cell correctly cleared and indented when there is a hidden test region?""" - cell = create_text_cell() - cell.source = dedent( - """ - assert True - ### BEGIN HIDDEN TESTS - assert True - ### END HIDDEN TESTS - """ - ).strip() - cell.metadata['nbgrader'] = dict(grade=True) - resources = dict() - - cell = preprocessor.preprocess_cell(cell, resources, 1)[0] - assert cell.source == "assert True" - assert cell.metadata.nbgrader['grade'] - - def test_preprocess_text_cell_metadata(self, preprocessor): - """Is an error thrown when a hidden test region exists in a non-grade text cell?""" - cell = create_text_cell() - cell.source = dedent( - """ - assert True - ### BEGIN HIDDEN TESTS - assert True - ### END HIDDEN TESTS - """ - ).strip() - - resources = dict() - with pytest.raises(RuntimeError): - preprocessor.preprocess_cell(cell, resources, 1) - - # now disable enforcing metadata - preprocessor.enforce_metadata = False - cell, _ = preprocessor.preprocess_cell(cell, resources, 1) - assert cell.source == "assert True" - assert 'nbgrader' not in cell.metadata - - def test_dont_remove_hidden_test_region(self, preprocessor): - """Is false returned when there is no hidden test region?""" - cell = create_text_cell() - removed_test = preprocessor._remove_hidden_test_region(cell) - assert not removed_test - - def test_preprocess_code_cell_no_region(self, preprocessor): - """Is a code cell not cleared when there is no hidden test region?""" - cell = create_code_cell() - cell.source = """assert True""" - cell.metadata['nbgrader'] = dict() - - resources = dict() - cell = preprocessor.preprocess_cell(cell, resources, 1)[0] - assert cell.source == """assert True""" - assert not cell.metadata.nbgrader.get('grade', False) - - def test_preprocess_text_cell_no_region(self, preprocessor): - """Is a text grade cell not cleared when there is no hidden test region?""" - cell = create_text_cell() - cell.source = """assert True""" - cell.metadata['nbgrader'] = dict() - - resources = dict() - cell = preprocessor.preprocess_cell(cell, resources, 1)[0] - assert cell.source == "assert True" - assert not cell.metadata.nbgrader.get('grade', False) - - def test_preprocess_notebook(self, preprocessor): - """Is the test notebook processed without error?""" - nb = self._read_nb(os.path.join("files", "test.ipynb")) - preprocessor.preprocess(nb, {}) - - def test_remove_celltoolbar(self, preprocessor): - """Is the celltoolbar removed?""" - nb = self._read_nb(os.path.join("files", "test.ipynb")) - nb.metadata['celltoolbar'] = 'Create Assignment' - nb = preprocessor.preprocess(nb, {})[0] - assert 'celltoolbar' not in nb.metadata diff --git a/nbgrader/tests/preprocessors/test_clearmarkscheme.py b/nbgrader/tests/preprocessors/test_clearmarkscheme.py deleted file mode 100644 index 20c32042c..000000000 --- a/nbgrader/tests/preprocessors/test_clearmarkscheme.py +++ /dev/null @@ -1,184 +0,0 @@ -import os -import pytest - -from textwrap import dedent -from traitlets.config import Config - -from .base import BaseTestPreprocessor -from .. import create_code_cell, create_text_cell, create_task_cell -from ...preprocessors import ClearMarkScheme - - -@pytest.fixture -def preprocessor(): - return ClearMarkScheme() - - -class TestClearMarkScheme(BaseTestPreprocessor): - - def test_remove_mark_scheme_region_code(self, preprocessor): - """Are mark scheme regions in code cells correctly replaced?""" - source = dedent( - """ - assert True - ### BEGIN MARK SCHEME - assert True - ### END MARK SCHEME - """ - ).strip() - cell = create_task_cell(source, 'markdown', 'some-task-id', 2) - removed_test = preprocessor._remove_mark_scheme_region(cell) - assert removed_test - assert cell.source == "assert True" - - def test_remove_mark_scheme_region_no_end(self, preprocessor): - """Is an error thrown when there is no end hidden test statement?""" - source = dedent( - """ - something something - ### BEGIN MARK SCHEME - this is a test! - """ - ).strip() - cell = create_task_cell(source, 'markdown', 'some-task-id', 2) - - with pytest.raises(RuntimeError): - preprocessor._remove_mark_scheme_region(cell) - - def test_remove_mark_scheme_region_nested_solution(self, preprocessor): - """Is an error thrown when there are nested hidden test statements?""" - source = dedent( - """ - something something - ### BEGIN MARK SCHEME - ### BEGIN MARK SCHEME - this is a test! - """ - ).strip() - cell = create_task_cell(source, 'markdown', 'some-task-id', 2) - - with pytest.raises(RuntimeError): - preprocessor._remove_mark_scheme_region(cell) - - def test_preprocess_code_cell_mark_scheme_region(self, preprocessor): - """Is an error thrown when there is a mark region but it's not a task cell?""" - cell = create_code_cell() - cell.source = dedent( - """ - assert True - ### BEGIN MARK SCHEME - assert True - ### END MARK SCHEME - """ - ).strip() - resources = dict() - with pytest.raises(RuntimeError): - preprocessor.preprocess_cell(cell, resources, 1) - - def test_preprocess_text_cell_metadata(self, preprocessor): - """Is an error thrown when a mark scheme region exists in a non-task text cell?""" - cell = create_text_cell() - cell.source = dedent( - """ - assert True - ### BEGIN MARK SCHEME - assert True - ### END MARK SCHEME - """ - ).strip() - - resources = dict() - with pytest.raises(RuntimeError): - preprocessor.preprocess_cell(cell, resources, 1) - - # now disable enforcing metadata - preprocessor.enforce_metadata = False - cell, _ = preprocessor.preprocess_cell(cell, resources, 1) - assert cell.source == "assert True" - assert 'nbgrader' not in cell.metadata - - def test_dont_remove_mark_scheme_region(self, preprocessor): - """Is false returned when there is no hidden test region?""" - source = "nothing to remove" - cell = create_task_cell(source, 'markdown', 'some-task-id', 2) - removed_test = preprocessor._remove_mark_scheme_region(cell) - assert not removed_test - - def test_preprocess_notebook(self, preprocessor): - """Is the test notebook processed without error?""" - nb = self._read_nb(os.path.join("files", "test_taskcell.ipynb")) - preprocessor.preprocess(nb, {}) - - # attachment detection tests - def test_attachment_in_mark_scheme(self, preprocessor): - """Is an error raised when there is an attachment in the marking scheme?""" - source = dedent( - """ - assert True - ### BEGIN MARK SCHEME - ![](attachment:image.png) - ### END MARK SCHEME - """ - ).strip() - cell = create_task_cell(source, 'markdown', 'some-task-id', 2) - with pytest.raises(RuntimeError): - preprocessor._remove_mark_scheme_region(cell) - - source = dedent( - """ - assert True - ### BEGIN MARK SCHEME - ![alt text](attachment:answers.png) - ### END MARK SCHEME - """ - ).strip() - cell = create_task_cell(source, 'markdown', 'some-task-id', 2) - with pytest.raises(RuntimeError): - preprocessor._remove_mark_scheme_region(cell) - - source = dedent( - """ - assert True - ### BEGIN MARK SCHEME - Text text text text text. - ![](attachment:image1.jpg) - Grade grade grade. - ![](attachment:image2.png) - Mark mark mark mark. - ### END MARK SCHEME - """ - ).strip() - cell = create_task_cell(source, 'markdown', 'some-task-id', 2) - with pytest.raises(RuntimeError): - preprocessor._remove_mark_scheme_region(cell) - - def test_attachment_suppress_error(self, preprocessor): - """Can the error be suppressed?""" - source = dedent( - """ - assert True - ### BEGIN MARK SCHEME - ![](attachment:image.png) - ### END MARK SCHEME - """ - ).strip() - cell = create_task_cell(source, 'markdown', 'some-task-id', 2) - preprocessor.check_attachment_leakage = False - removed_test = preprocessor._remove_mark_scheme_region(cell) - assert removed_test - assert cell.source == "assert True" - - def test_attachment_not_in_mark_scheme(self, preprocessor): - """Attachments outside of marking schemes shouldn't be touched""" - source = dedent( - """ - ![](attachment:image.png) - ### BEGIN MARK SCHEME - assert True - ### END MARK SCHEME - """ - ).strip() - cell = create_task_cell(source, 'markdown', 'some-task-id', 2) - removed_test = preprocessor._remove_mark_scheme_region(cell) - assert removed_test - assert cell.source == "![](attachment:image.png)" diff --git a/nbgrader/tests/preprocessors/test_clearsolutions.py b/nbgrader/tests/preprocessors/test_clearsolutions.py deleted file mode 100644 index 8fa74bda4..000000000 --- a/nbgrader/tests/preprocessors/test_clearsolutions.py +++ /dev/null @@ -1,238 +0,0 @@ -import pytest -import os - -from textwrap import dedent -from traitlets.config import Config -from ...preprocessors import ClearSolutions -from .base import BaseTestPreprocessor -from .. import create_code_cell, create_text_cell - - -@pytest.fixture -def preprocessor(): - return ClearSolutions() - - -class TestClearSolutions(BaseTestPreprocessor): - - def test_replace_solution_region_code(self, preprocessor): - """Are solution regions in code cells correctly replaced?""" - cell = create_code_cell() - replaced_solution = preprocessor._replace_solution_region(cell, "python") - assert replaced_solution - assert cell.source == dedent( - """ - print("something") - # YOUR CODE HERE - raise NotImplementedError() - """ - ).strip() - - def test_replace_solution_region_text(self, preprocessor): - """Are solution regions in text cells correctly replaced?""" - cell = create_text_cell() - cell.source = dedent( - """ - something something - ### BEGIN SOLUTION - this is the answer! - ### END SOLUTION - """ - ).strip() - replaced_solution = preprocessor._replace_solution_region(cell, "python") - assert replaced_solution - assert cell.source == "something something\nYOUR ANSWER HERE" - - def test_dont_replace_solution_region(self, preprocessor): - """Is false returned when there is no solution region?""" - cell = create_text_cell() - replaced_solution = preprocessor._replace_solution_region(cell, "python") - assert not replaced_solution - - def test_replace_solution_region_no_end(self, preprocessor): - """Is an error thrown when there is no end solution statement?""" - cell = create_text_cell() - cell.source = dedent( - """ - something something - ### BEGIN SOLUTION - this is the answer! - """ - ).strip() - - with pytest.raises(RuntimeError): - preprocessor._replace_solution_region(cell, "python") - - def test_replace_solution_region_nested_solution(self, preprocessor): - """Is an error thrown when there are nested solution statements?""" - cell = create_text_cell() - cell.source = dedent( - """ - something something - ### BEGIN SOLUTION - ### BEGIN SOLUTION - this is the answer! - ### END SOLUTION - """ - ).strip() - - with pytest.raises(RuntimeError): - preprocessor._replace_solution_region(cell, "python") - - def test_preprocess_code_solution_cell_solution_region(self, preprocessor): - """Is a code solution cell correctly cleared when there is a solution region?""" - cell = create_code_cell() - cell.metadata['nbgrader'] = dict(solution=True) - resources = dict(language="python") - cell = preprocessor.preprocess_cell(cell, resources, 1)[0] - - assert cell.source == dedent( - """ - print("something") - # YOUR CODE HERE - raise NotImplementedError() - """ - ).strip() - assert cell.metadata.nbgrader['solution'] - - def test_preprocess_code_cell_solution_region(self, preprocessor): - """Is an error thrown when there is a solution region but it's not a solution cell?""" - cell = create_code_cell() - resources = dict(language="python") - with pytest.raises(RuntimeError): - preprocessor.preprocess_cell(cell, resources, 1) - - def test_preprocess_code_solution_cell_no_region(self, preprocessor): - """Is a code solution cell correctly cleared when there is no solution region?""" - cell = create_code_cell() - cell.source = """print("the answer!")""" - cell.metadata['nbgrader'] = dict(solution=True) - resources = dict(language="python") - - cell = preprocessor.preprocess_cell(cell, resources, 1)[0] - assert cell.source == dedent( - """ - # YOUR CODE HERE - raise NotImplementedError() - """ - ).strip() - assert cell.metadata.nbgrader['solution'] - - def test_preprocess_code_cell_no_region(self, preprocessor): - """Is a code cell not cleared when there is no solution region?""" - cell = create_code_cell() - cell.source = """print("the answer!")""" - cell.metadata['nbgrader'] = dict() - resources = dict(language="python") - cell = preprocessor.preprocess_cell(cell, resources, 1)[0] - - assert cell.source == """print("the answer!")""" - assert not cell.metadata.nbgrader.get('solution', False) - - def test_preprocess_text_solution_cell_no_region(self, preprocessor): - """Is a text grade cell correctly cleared when there is no solution region?""" - cell = create_text_cell() - cell.metadata['nbgrader'] = dict(solution=True) - resources = dict(language="python") - cell = preprocessor.preprocess_cell(cell, resources, 1)[0] - - assert cell.source == "YOUR ANSWER HERE" - assert cell.metadata.nbgrader['solution'] - - def test_preprocess_text_cell_no_region(self, preprocessor): - """Is a text grade cell not cleared when there is no solution region?""" - cell = create_text_cell() - cell.metadata['nbgrader'] = dict() - resources = dict(language="python") - cell = preprocessor.preprocess_cell(cell, resources, 1)[0] - - assert cell.source == "this is the answer!\n" - assert not cell.metadata.nbgrader.get('solution', False) - - def test_preprocess_text_solution_cell_region(self, preprocessor): - """Is a text grade cell correctly cleared when there is a solution region?""" - cell = create_text_cell() - cell.source = dedent( - """ - something something - ### BEGIN SOLUTION - this is the answer! - ### END SOLUTION - """ - ).strip() - cell.metadata['nbgrader'] = dict(solution=True) - resources = dict(language="python") - - cell = preprocessor.preprocess_cell(cell, resources, 1)[0] - assert cell.source == "something something\nYOUR ANSWER HERE" - assert cell.metadata.nbgrader['solution'] - - def test_preprocess_text_solution_cell_region_indented(self, preprocessor): - """Is a text grade cell correctly cleared and indented when there is a solution region?""" - cell = create_text_cell() - cell.source = dedent( - """ - something something - ### BEGIN SOLUTION - this is the answer! - ### END SOLUTION - """ - ).strip() - cell.metadata['nbgrader'] = dict(solution=True) - resources = dict(language="python") - - cell = preprocessor.preprocess_cell(cell, resources, 1)[0] - assert cell.source == "something something\n YOUR ANSWER HERE" - assert cell.metadata.nbgrader['solution'] - - def test_preprocess_text_cell_metadata(self, preprocessor): - """Is an error thrown when a solution region exists in a non-solution text cell?""" - cell = create_text_cell() - cell.source = dedent( - """ - something something - ### BEGIN SOLUTION - this is the answer! - ### END SOLUTION - """ - ).strip() - - resources = dict(language="python") - with pytest.raises(RuntimeError): - preprocessor.preprocess_cell(cell, resources, 1) - - # now disable enforcing metadata - preprocessor.enforce_metadata = False - cell, _ = preprocessor.preprocess_cell(cell, resources, 1) - assert cell.source == "something something\nYOUR ANSWER HERE" - assert 'nbgrader' not in cell.metadata - - def test_preprocess_notebook(self, preprocessor): - """Is the test notebook processed without error?""" - nb = self._read_nb(os.path.join("files", "test.ipynb")) - preprocessor.preprocess(nb, {}) - - def test_remove_celltoolbar(self, preprocessor): - """Is the celltoolbar removed?""" - nb = self._read_nb(os.path.join("files", "test.ipynb")) - nb.metadata['celltoolbar'] = 'Create Assignment' - nb = preprocessor.preprocess(nb, {})[0] - assert 'celltoolbar' not in nb.metadata - - def test_old_config(self): - """Are deprecations handled cleanly?""" - c = Config() - c.ClearSolutions.code_stub = "foo" - pp = ClearSolutions(config=c) - assert pp.code_stub == dict(python="foo") - - def test_language_missing(self, preprocessor): - nb = self._read_nb(os.path.join("files", "test.ipynb")) - nb.metadata['kernelspec'] = {} - nb.metadata['kernelspec']['language'] = "javascript" - - with pytest.raises(ValueError): - preprocessor.preprocess(nb, {}) - - preprocessor.code_stub = dict(javascript="foo") - preprocessor.preprocess(nb, {}) diff --git a/nbgrader/tests/preprocessors/test_computechecksums.py b/nbgrader/tests/preprocessors/test_computechecksums.py deleted file mode 100644 index 59955d2c4..000000000 --- a/nbgrader/tests/preprocessors/test_computechecksums.py +++ /dev/null @@ -1,119 +0,0 @@ -import pytest - -from ...preprocessors import ComputeChecksums -from ...utils import compute_checksum -from .base import BaseTestPreprocessor -from .. import ( - create_code_cell, create_text_cell, - create_grade_cell, create_solution_cell, create_locked_cell) - - -@pytest.fixture -def preprocessor(): - pp = ComputeChecksums() - pp.comment_index = 0 - return pp - - -class TestComputeChecksums(BaseTestPreprocessor): - - def test_code_cell_no_checksum(self, preprocessor): - """Test that no checksum is computed for a regular code cell""" - cell, _ = preprocessor.preprocess_cell( - create_code_cell(), {}, 0) - assert "nbgrader" not in cell.metadata or "checksum" not in cell.metadata.nbgrader - - def test_text_cell_no_checksum(self, preprocessor): - """Test that no checksum is computed for a regular text cell""" - cell, _ = preprocessor.preprocess_cell( - create_text_cell(), {}, 0) - assert "nbgrader" not in cell.metadata or "checksum" not in cell.metadata.nbgrader - - def test_checksum_grade_cell_type(self, preprocessor): - """Test that the checksum is computed for grade cells of different cell types""" - cell1 = create_grade_cell("", "code", "foo", 1) - cell1 = preprocessor.preprocess_cell(cell1, {}, 0)[0] - cell2 = create_grade_cell("", "markdown", "foo", 1) - cell2 = preprocessor.preprocess_cell(cell2, {}, 0)[0] - - assert cell1.metadata.nbgrader["checksum"] == compute_checksum(cell1) - assert cell2.metadata.nbgrader["checksum"] == compute_checksum(cell2) - assert cell1.metadata.nbgrader["checksum"] != cell2.metadata.nbgrader["checksum"] - - def test_checksum_solution_cell_type(self, preprocessor): - """Test that the checksum is computed for solution cells of different cell types""" - cell1 = create_solution_cell("", "code", "foo") - cell1 = preprocessor.preprocess_cell(cell1, {}, 0)[0] - cell2 = create_solution_cell("", "markdown", "foo") - cell2 = preprocessor.preprocess_cell(cell2, {}, 0)[0] - - assert cell1.metadata.nbgrader["checksum"] == compute_checksum(cell1) - assert cell2.metadata.nbgrader["checksum"] == compute_checksum(cell2) - assert cell1.metadata.nbgrader["checksum"] != cell2.metadata.nbgrader["checksum"] - - def test_checksum_locked_cell_type(self, preprocessor): - """Test that the checksum is computed for locked cells""" - cell1 = create_locked_cell("", "code", "foo") - cell1 = preprocessor.preprocess_cell(cell1, {}, 0)[0] - cell2 = create_locked_cell("", "markdown", "foo") - cell2 = preprocessor.preprocess_cell(cell2, {}, 0)[0] - - assert cell1.metadata.nbgrader["checksum"] == compute_checksum(cell1) - assert cell2.metadata.nbgrader["checksum"] == compute_checksum(cell2) - assert cell1.metadata.nbgrader["checksum"] != cell2.metadata.nbgrader["checksum"] - - def test_checksum_points(self, preprocessor): - """Test that the checksum is computed for grade cells with different points""" - cell1 = create_grade_cell("", "code", "foo", 1) - cell1 = preprocessor.preprocess_cell(cell1, {}, 0)[0] - cell2 = create_grade_cell("", "code", "foo", 2) - cell2 = preprocessor.preprocess_cell(cell2, {}, 0)[0] - - assert cell1.metadata.nbgrader["checksum"] == compute_checksum(cell1) - assert cell2.metadata.nbgrader["checksum"] == compute_checksum(cell2) - assert cell1.metadata.nbgrader["checksum"] != cell2.metadata.nbgrader["checksum"] - - def test_checksum_grade_id(self, preprocessor): - """Test that the checksum is computed for grade cells with different ids""" - cell1 = create_grade_cell("", "code", "foo", 1) - cell1 = preprocessor.preprocess_cell(cell1, {}, 0)[0] - cell2 = create_grade_cell("", "code", "bar", 1) - cell2 = preprocessor.preprocess_cell(cell2, {}, 0)[0] - - assert cell1.metadata.nbgrader["checksum"] == compute_checksum(cell1) - assert cell2.metadata.nbgrader["checksum"] == compute_checksum(cell2) - assert cell1.metadata.nbgrader["checksum"] != cell2.metadata.nbgrader["checksum"] - - def test_checksum_grade_source(self, preprocessor): - """Test that the checksum is computed for grade cells with different sources""" - cell1 = create_grade_cell("a", "code", "foo", 1) - cell1 = preprocessor.preprocess_cell(cell1, {}, 0)[0] - cell2 = create_grade_cell("b", "code", "foo", 1) - cell2 = preprocessor.preprocess_cell(cell2, {}, 0)[0] - - assert cell1.metadata.nbgrader["checksum"] == compute_checksum(cell1) - assert cell2.metadata.nbgrader["checksum"] == compute_checksum(cell2) - assert cell1.metadata.nbgrader["checksum"] != cell2.metadata.nbgrader["checksum"] - - def test_checksum_solution_source(self, preprocessor): - """Test that the checksum is computed for solution cells with different sources""" - cell1 = create_solution_cell("a", "code", "foo") - cell1 = preprocessor.preprocess_cell(cell1, {}, 0)[0] - cell2 = create_solution_cell("b", "code", "foo") - cell2 = preprocessor.preprocess_cell(cell2, {}, 0)[0] - - assert cell1.metadata.nbgrader["checksum"] == compute_checksum(cell1) - assert cell2.metadata.nbgrader["checksum"] == compute_checksum(cell2) - assert cell1.metadata.nbgrader["checksum"] != cell2.metadata.nbgrader["checksum"] - - def test_checksum_grade_and_solution(self, preprocessor): - """Test that a checksum is created for grade cells that are also solution cells""" - cell1 = create_grade_cell("", "markdown", "foo", 1) - cell1 = preprocessor.preprocess_cell(cell1, {}, 0)[0] - cell2 = create_grade_cell("", "markdown", "foo", 1) - cell2.metadata.nbgrader["solution"] = True - cell2 = preprocessor.preprocess_cell(cell2, {}, 0)[0] - - assert cell1.metadata.nbgrader["checksum"] == compute_checksum(cell1) - assert cell2.metadata.nbgrader["checksum"] == compute_checksum(cell2) - assert cell1.metadata.nbgrader["checksum"] != cell2.metadata.nbgrader["checksum"] diff --git a/nbgrader/tests/preprocessors/test_deduplicateids.py b/nbgrader/tests/preprocessors/test_deduplicateids.py deleted file mode 100644 index c0e0642f4..000000000 --- a/nbgrader/tests/preprocessors/test_deduplicateids.py +++ /dev/null @@ -1,53 +0,0 @@ -import pytest - -from nbformat.v4 import new_notebook - -from ...preprocessors import DeduplicateIds -from .base import BaseTestPreprocessor -from .. import ( - create_grade_cell, create_solution_cell, create_locked_cell) - - -@pytest.fixture -def preprocessor(): - pp = DeduplicateIds() - return pp - - -class TestDeduplicateIds(BaseTestPreprocessor): - - def test_duplicate_grade_cell(self, preprocessor): - cell1 = create_grade_cell("hello", "code", "foo", 2) - cell2 = create_grade_cell("goodbye", "code", "foo", 2) - nb = new_notebook() - nb.cells.append(cell1) - nb.cells.append(cell2) - - nb, resources = preprocessor.preprocess(nb, {}) - - assert nb.cells[0].metadata.nbgrader == {} - assert nb.cells[1].metadata.nbgrader != {} - - def test_duplicate_solution_cell(self, preprocessor): - cell1 = create_solution_cell("hello", "code", "foo") - cell2 = create_solution_cell("goodbye", "code", "foo") - nb = new_notebook() - nb.cells.append(cell1) - nb.cells.append(cell2) - - nb, resources = preprocessor.preprocess(nb, {}) - - assert nb.cells[0].metadata.nbgrader == {} - assert nb.cells[1].metadata.nbgrader != {} - - def test_duplicate_locked_cell(self, preprocessor): - cell1 = create_locked_cell("hello", "code", "foo") - cell2 = create_locked_cell("goodbye", "code", "foo") - nb = new_notebook() - nb.cells.append(cell1) - nb.cells.append(cell2) - - nb, resources = preprocessor.preprocess(nb, {}) - - assert nb.cells[0].metadata.nbgrader == {} - assert nb.cells[1].metadata.nbgrader != {} diff --git a/nbgrader/tests/preprocessors/test_getgrades.py b/nbgrader/tests/preprocessors/test_getgrades.py deleted file mode 100644 index b41178740..000000000 --- a/nbgrader/tests/preprocessors/test_getgrades.py +++ /dev/null @@ -1,133 +0,0 @@ -import pytest - -from nbformat.v4 import new_notebook, new_output - -from ...preprocessors import SaveCells, SaveAutoGrades, GetGrades -from ...api import Gradebook -from ...utils import compute_checksum -from .base import BaseTestPreprocessor -from .. import ( - create_grade_cell, create_solution_cell, create_grade_and_solution_cell) - - -@pytest.fixture -def preprocessors(): - return (SaveCells(), SaveAutoGrades(), GetGrades()) - - -@pytest.fixture -def gradebook(request, db): - gb = Gradebook(db) - gb.add_assignment("ps0") - gb.add_student("bar") - - def fin(): - gb.close() - request.addfinalizer(fin) - - return gb - - -@pytest.fixture -def resources(db): - return { - "nbgrader": { - "db_url": db, - "assignment": "ps0", - "notebook": "test", - "student": "bar" - } - } - - -class TestGetGrades(BaseTestPreprocessor): - - def test_save_correct_code(self, preprocessors, gradebook, resources): - """Is a passing code cell correctly graded?""" - cell = create_grade_cell("hello", "code", "foo", 1) - cell.metadata.nbgrader['checksum'] = compute_checksum(cell) - nb = new_notebook() - nb.cells.append(cell) - preprocessors[0].preprocess(nb, resources) - gradebook.add_submission("ps0", "bar") - preprocessors[1].preprocess(nb, resources) - preprocessors[2].preprocess(nb, resources) - - assert cell.metadata.nbgrader['score'] == 1 - assert cell.metadata.nbgrader['points'] == 1 - assert 'comment' not in cell.metadata.nbgrader - - def test_save_incorrect_code(self, preprocessors, gradebook, resources): - """Is a failing code cell correctly graded?""" - cell = create_grade_cell("hello", "code", "foo", 1) - cell.metadata.nbgrader['checksum'] = compute_checksum(cell) - cell.outputs = [new_output('error', ename="NotImplementedError", evalue="", traceback=["error"])] - nb = new_notebook() - nb.cells.append(cell) - preprocessors[0].preprocess(nb, resources) - gradebook.add_submission("ps0", "bar") - preprocessors[1].preprocess(nb, resources) - preprocessors[2].preprocess(nb, resources) - - assert cell.metadata.nbgrader['score'] == 0 - assert cell.metadata.nbgrader['points'] == 1 - assert 'comment' not in cell.metadata.nbgrader - - def test_save_unchanged_code(self, preprocessors, gradebook, resources): - """Is an unchanged code cell given the correct comment?""" - cell = create_solution_cell("hello", "code", "foo") - cell.metadata.nbgrader['checksum'] = compute_checksum(cell) - nb = new_notebook() - nb.cells.append(cell) - preprocessors[0].preprocess(nb, resources) - gradebook.add_submission("ps0", "bar") - preprocessors[1].preprocess(nb, resources) - preprocessors[2].preprocess(nb, resources) - - assert cell.metadata.nbgrader['comment'] == "No response." - - def test_save_changed_code(self, preprocessors, gradebook, resources): - """Is an unchanged code cell given the correct comment?""" - cell = create_solution_cell("hello", "code", "foo") - cell.metadata.nbgrader['checksum'] = compute_checksum(cell) - nb = new_notebook() - nb.cells.append(cell) - preprocessors[0].preprocess(nb, resources) - gradebook.add_submission("ps0", "bar") - cell.source = "hello!" - preprocessors[1].preprocess(nb, resources) - preprocessors[2].preprocess(nb, resources) - - assert cell.metadata.nbgrader['comment'] is None - - def test_save_unchanged_markdown(self, preprocessors, gradebook, resources): - """Is an unchanged markdown cell correctly graded?""" - cell = create_grade_and_solution_cell("hello", "markdown", "foo", 1) - cell.metadata.nbgrader['checksum'] = compute_checksum(cell) - nb = new_notebook() - nb.cells.append(cell) - preprocessors[0].preprocess(nb, resources) - gradebook.add_submission("ps0", "bar") - preprocessors[1].preprocess(nb, resources) - preprocessors[2].preprocess(nb, resources) - - assert cell.metadata.nbgrader['score'] == 0 - assert cell.metadata.nbgrader['points'] == 1 - assert cell.metadata.nbgrader['comment'] == "No response." - - def test_save_changed_markdown(self, preprocessors, gradebook, resources): - """Is a changed markdown cell correctly graded?""" - cell = create_grade_and_solution_cell("hello", "markdown", "foo", 1) - cell.metadata.nbgrader['checksum'] = compute_checksum(cell) - nb = new_notebook() - nb.cells.append(cell) - preprocessors[0].preprocess(nb, resources) - gradebook.add_submission("ps0", "bar") - cell.source = "hello!" - preprocessors[1].preprocess(nb, resources) - preprocessors[2].preprocess(nb, resources) - - assert cell.metadata.nbgrader['score'] == 0 - assert cell.metadata.nbgrader['points'] == 1 - - assert cell.metadata.nbgrader['comment'] is None diff --git a/nbgrader/tests/preprocessors/test_headerfooter.py b/nbgrader/tests/preprocessors/test_headerfooter.py deleted file mode 100644 index 608c6dc4c..000000000 --- a/nbgrader/tests/preprocessors/test_headerfooter.py +++ /dev/null @@ -1,48 +0,0 @@ -import pytest -import os - -from ...preprocessors import IncludeHeaderFooter -from .base import BaseTestPreprocessor - - -@pytest.fixture -def preprocessor(): - return IncludeHeaderFooter() - - -class TestIncludeHeaderFooter(BaseTestPreprocessor): - - def test_concatenate_nothing(self, preprocessor): - """Are the cells the same if there is no header or footer?""" - orig_nb = self._read_nb(os.path.join("files", "test.ipynb")) - nb = preprocessor.preprocess(orig_nb, {})[0] - assert nb == orig_nb - - def test_concatenate_header(self, preprocessor): - """Is the header prepended correctly?""" - preprocessor.header = os.path.join(os.path.dirname(__file__), "files", "header.ipynb") - cells = self._read_nb(os.path.join("files", "header.ipynb")).cells[:] - orig_nb = self._read_nb(os.path.join("files", "test.ipynb")) - orig_cells = orig_nb.cells[:] - nb = preprocessor.preprocess(orig_nb, {})[0] - assert nb.cells == (cells + orig_cells) - - def test_concatenate_footer(self, preprocessor): - """Is the footer appended correctly?""" - preprocessor.footer = os.path.join(os.path.dirname(__file__), "files", "header.ipynb") - cells = self._read_nb(os.path.join("files", "header.ipynb")).cells[:] - orig_nb = self._read_nb(os.path.join("files", "test.ipynb")) - orig_cells = orig_nb.cells[:] - nb = preprocessor.preprocess(orig_nb, {})[0] - assert nb.cells == (orig_cells + cells) - - def test_concatenate_header_and_footer(self, preprocessor): - """Are the header and footer appended correctly?""" - preprocessor.header = os.path.join(os.path.dirname(__file__), "files", "header.ipynb") - preprocessor.footer = os.path.join(os.path.dirname(__file__), "files", "header.ipynb") - header_cells = self._read_nb(os.path.join("files", "header.ipynb")).cells[:] - footer_cells = self._read_nb(os.path.join("files", "header.ipynb")).cells[:] - orig_nb = self._read_nb(os.path.join("files", "test.ipynb")) - orig_cells = orig_nb.cells[:] - nb = preprocessor.preprocess(orig_nb, {})[0] - assert nb.cells == (header_cells + orig_cells + footer_cells) diff --git a/nbgrader/tests/preprocessors/test_instantiatetests.py b/nbgrader/tests/preprocessors/test_instantiatetests.py new file mode 100644 index 000000000..8dc78a75a --- /dev/null +++ b/nbgrader/tests/preprocessors/test_instantiatetests.py @@ -0,0 +1,286 @@ +import pytest +import shutil +import os +from textwrap import dedent +from ...preprocessors import InstantiateTests +from .base import BaseTestPreprocessor +from .. import create_code_cell, create_text_cell, create_autotest_solution_cell, create_autotest_test_cell, create_file_loader_cell +from nbformat.v4 import new_notebook +from nbclient.client import NotebookClient + + +@pytest.fixture +def preprocessor(): + return InstantiateTests() + + +class TestInstantiateTests(BaseTestPreprocessor): + + def test_load_test_template_file(self, preprocessor): + resources = { + 'kernel_name': 'python3', + 'metadata': {'path': 'nbgrader/docs/source/user_guide'} + } + preprocessor._load_test_template_file(resources=resources) + assert preprocessor.test_templates_by_type is not None + assert preprocessor.dispatch_template is not None + assert preprocessor.success_code is not None + assert preprocessor.hash_template is not None + assert preprocessor.check_template is not None + assert preprocessor.normalize_template is not None + assert preprocessor.setup_code is not None + + def test_has_sanitizers(self, preprocessor): + assert 'python' in preprocessor.sanitizers.keys() + assert 'python3' in preprocessor.sanitizers.keys() + assert 'ir' in preprocessor.sanitizers.keys() + + def test_has_comment_strs(self, preprocessor): + assert 'python' in preprocessor.comment_strs.keys() + assert 'python3' in preprocessor.comment_strs.keys() + assert 'ir' in preprocessor.comment_strs.keys() + + # test that autotest generates assert statements + def test_replace_autotest_code(self, preprocessor): + sol_cell = create_autotest_solution_cell() + test_cell = create_autotest_test_cell() + test_cell.metadata['nbgrader'] = {'grade': True} + nb = new_notebook() + nb.metadata['kernelspec'] = { + "name": "python3" + } + nb.cells.append(sol_cell) + nb.cells.append(test_cell) + resources = { + 'metadata': {'path': 'nbgrader/docs/source/user_guide/'} + } + nb, resources = preprocessor.preprocess(nb, resources) + assert 'assert' in nb['cells'][1]['source'] + + # test that autotest generates consistent output given the same input + def test_consistent_release_version(self, preprocessor): + sol_cell = create_autotest_solution_cell() + test_cell = create_autotest_test_cell() + test_cell.metadata['nbgrader'] = {'grade': True} + + # create and process first notebook + nb1 = new_notebook() + nb1.metadata['kernelspec'] = { + "name": "python3" + } + nb1.cells.append(sol_cell) + nb1.cells.append(test_cell) + resources1 = { + 'metadata': {'path': 'nbgrader/docs/source/user_guide/'} + } + nb1, resources1 = preprocessor.preprocess(nb1, resources1) + + # create and process second notebook + nb2 = new_notebook() + nb2.metadata['kernelspec'] = { + "name": "python3" + } + nb2.cells.append(sol_cell) + nb2.cells.append(test_cell) + resources2 = { + 'metadata': {'path': 'nbgrader/docs/source/user_guide/'} + } + nb2, resources2 = preprocessor.preprocess(nb2, resources2) + assert nb1['cells'][1]['source'] == nb2['cells'][1]['source'] + + # test that autotest starts a kernel that uses the `path` metadata as working directory + # with the right path, the kernel should load the file + def test_kernel_right_workingdir(self, preprocessor, caplog): + sol_cell = create_autotest_solution_cell() + test_cell = create_autotest_test_cell() + load_cell = create_file_loader_cell('grades.csv') + test_cell.metadata['nbgrader'] = {'grade': True} + + nb = new_notebook() + nb.metadata['kernelspec'] = { + "name": "python3" + } + nb.cells.append(sol_cell) + nb.cells.append(test_cell) + nb.cells.append(load_cell) + resources = { + 'metadata': {'path': 'nbgrader/docs/source/user_guide/'} + } + nb, resources = preprocessor.preprocess(nb, resources) + + # test that autotest starts a kernel that uses the `path` metadata as working directory + # without the right path, the kernel should report an error + def test_kernel_wrong_workingdir(self, preprocessor, caplog): + sol_cell = create_autotest_solution_cell() + test_cell = create_autotest_test_cell() + load_cell = create_file_loader_cell('grades.csv') + test_cell.metadata['nbgrader'] = {'grade': True} + + nb = new_notebook() + nb.metadata['kernelspec'] = { + "name": "python3" + } + nb.cells.append(sol_cell) + nb.cells.append(test_cell) + nb.cells.append(load_cell) + resources = { + 'metadata': {'path': 'nbgrader/docs/source/user_guide/source/'} + } + # make sure autotest doesn't fail prior to running the + # preprocessor because it can't find autotests.yml + # we want it to fail because it can't find a resource file + shutil.copyfile('nbgrader/docs/source/user_guide/autotests.yml', 'nbgrader/docs/source/user_guide/source/autotests.yml') + with pytest.raises(Exception): + nb, resources = preprocessor.preprocess(nb, resources) + # remove the temporary resource + os.remove('nbgrader/docs/source/user_guide/source/autotests.yml') + + assert "FileNotFoundError" in caplog.text + + # test that a warning is thrown when we set enforce_metadata = False and have an AUTOTEST directive in a + # non-grade cell + def test_warning_autotest_nongrade(self, preprocessor, caplog): + preprocessor.enforce_metadata = False + sol_cell = create_autotest_solution_cell() + test_cell = create_autotest_test_cell() + test_cell.metadata['nbgrader'] = {'grade': False} + nb = new_notebook() + nb.metadata['kernelspec'] = { + "name": "python3" + } + nb.cells.append(sol_cell) + nb.cells.append(test_cell) + resources = { + 'metadata': {'path': 'nbgrader/docs/source/user_guide/'} + } + + nb, resources = preprocessor.preprocess(nb, resources) + assert "AutoTest region detected in a non-grade cell; " in caplog.text + + # test that an error is thrown when we have an AUTOTEST directive in a non-grade cell + def test_error_autotest_nongrade(self, preprocessor, caplog): + sol_cell = create_autotest_solution_cell() + test_cell = create_autotest_test_cell() + test_cell.metadata['nbgrader'] = {'grade': False} + nb = new_notebook() + nb.metadata['kernelspec'] = { + "name": "python3" + } + nb.cells.append(sol_cell) + nb.cells.append(test_cell) + resources = { + 'metadata': {'path': 'nbgrader/docs/source/user_guide/'} + } + with pytest.raises(Exception): + nb, resources = preprocessor.preprocess(nb, resources) + + assert "AutoTest region detected in a non-grade cell; " in caplog.text + + # test that invalid python statements in AUTOTEST directives cause errors + def test_error_bad_autotest_code(self, preprocessor): + sol_cell = create_autotest_solution_cell() + test_cell = create_autotest_test_cell() + test_cell.source = """ + ### AUTOTEST length(answer) + """ + test_cell.metadata['nbgrader'] = {'grade': True} + nb = new_notebook() + nb.metadata['kernelspec'] = { + "name": "python3" + } + nb.cells.append(sol_cell) + nb.cells.append(test_cell) + resources = { + 'metadata': {'path': 'nbgrader/docs/source/user_guide/'} + } + with pytest.raises(Exception): + nb, resources = preprocessor.preprocess(nb, resources) + + # test the code generated for some basic types; ensure correct solution gives success, a few wrong solutions give + # failures + def test_int_autotest(self, preprocessor): + sol_cell = create_autotest_solution_cell() + sol_cell.source = """ + answer = 7 + """ + test_cell = create_autotest_test_cell() + + test_cell.metadata['nbgrader'] = {'grade': True} + nb = new_notebook() + nb.metadata['kernelspec'] = { + "name": "python3" + } + nb.cells.append(sol_cell) + nb.cells.append(test_cell) + resources = { + 'metadata': {'path': 'nbgrader/docs/source/user_guide/'} + } + nb, resources = preprocessor.preprocess(nb, resources) + executed_nb = NotebookClient(nb=nb).execute() + assert executed_nb['cells'][1]['outputs'][0]['text'] == 'Success!\n' + + def test_float_autotest(self, preprocessor): + sol_cell = create_autotest_solution_cell() + sol_cell.source = """ + answer = 7.7 + """ + test_cell = create_autotest_test_cell() + + test_cell.metadata['nbgrader'] = {'grade': True} + nb = new_notebook() + nb.metadata['kernelspec'] = { + "name": "python3" + } + nb.cells.append(sol_cell) + nb.cells.append(test_cell) + resources = { + 'metadata': {'path': 'nbgrader/docs/source/user_guide/'} + } + nb, resources = preprocessor.preprocess(nb, resources) + executed_nb = NotebookClient(nb=nb).execute() + assert executed_nb['cells'][1]['outputs'][0]['text'] == 'Success!\n' + + def test_string_autotest(self, preprocessor): + sol_cell = create_autotest_solution_cell() + sol_cell.source = """ + answer = 'seven' + """ + test_cell = create_autotest_test_cell() + + test_cell.metadata['nbgrader'] = {'grade': True} + nb = new_notebook() + nb.metadata['kernelspec'] = { + "name": "python3" + } + nb.cells.append(sol_cell) + nb.cells.append(test_cell) + resources = { + 'metadata': {'path': 'nbgrader/docs/source/user_guide/'} + } + nb, resources = preprocessor.preprocess(nb, resources) + executed_nb = NotebookClient(nb=nb).execute() + assert executed_nb['cells'][1]['outputs'][0]['text'] == 'Success!\n' + + def test_list_autotest(self, preprocessor): + sol_cell = create_autotest_solution_cell() + sol_cell.source = """ + answer = [1, 2, 3, 4, 5, 6, 7] + """ + test_cell = create_autotest_test_cell() + + test_cell.metadata['nbgrader'] = {'grade': True} + nb = new_notebook() + nb.metadata['kernelspec'] = { + "name": "python3" + } + nb.cells.append(sol_cell) + nb.cells.append(test_cell) + resources = { + 'metadata': {'path': 'nbgrader/docs/source/user_guide/'} + } + nb, resources = preprocessor.preprocess(nb, resources) + executed_nb = NotebookClient(nb=nb).execute() + assert executed_nb['cells'][1]['outputs'][0]['text'] == 'Success!\n' + + + diff --git a/nbgrader/tests/preprocessors/test_limitoutput.py b/nbgrader/tests/preprocessors/test_limitoutput.py deleted file mode 100644 index 6afed2058..000000000 --- a/nbgrader/tests/preprocessors/test_limitoutput.py +++ /dev/null @@ -1,38 +0,0 @@ -import pytest -import os - -from textwrap import dedent -from ...preprocessors import LimitOutput -from .base import BaseTestPreprocessor -from .. import create_code_cell, create_text_cell - - -@pytest.fixture -def preprocessor(): - return LimitOutput() - - -class TestLimitOutput(BaseTestPreprocessor): - - def test_long_output(self): - nb = self._read_nb(os.path.join("files", "long-output.ipynb")) - cell, = nb.cells - output, = cell.outputs - assert len(output.text.split("\n")) > 1000 - - pp = LimitOutput() - nb, resources = pp.preprocess(nb, {}) - - cell, = nb.cells - output, = cell.outputs - assert len(output.text.split("\n")) == 1000 - - def test_infinite_recursion(self): - nb = self._read_nb(os.path.join("files", "infinite-recursion.ipynb")) - - pp = LimitOutput() - nb, resources = pp.preprocess(nb, {}) - - cell, = nb.cells - output, = cell.outputs - assert len(output.traceback) == 100 diff --git a/nbgrader/tests/preprocessors/test_lockcells.py b/nbgrader/tests/preprocessors/test_lockcells.py deleted file mode 100644 index 98a40ebe2..000000000 --- a/nbgrader/tests/preprocessors/test_lockcells.py +++ /dev/null @@ -1,170 +0,0 @@ -import pytest -import itertools -import os - -from ...preprocessors import LockCells -from .base import BaseTestPreprocessor -from .. import create_code_cell - - -@pytest.fixture -def preprocessor(): - return LockCells() - - -class TestLockCells(BaseTestPreprocessor): - - @staticmethod - def deletable(cell): - return cell.metadata.get('deletable', True) - - @staticmethod - def editable(cell): - return cell.metadata.get('editable', True) - - def test_solution_cell_undeletable(self, preprocessor): - """Do solution cells become undeletable?""" - preprocessor.lock_solution_cells = True - preprocessor.lock_grade_cells = False - preprocessor.lock_all_cells = False - preprocessor.lock_readonly_cells = False - cell = create_code_cell() - cell.metadata['nbgrader'] = {} - cell.metadata['nbgrader']['solution'] = True - assert self.deletable(cell) - assert self.editable(cell) - new_cell = preprocessor.preprocess_cell(cell, {}, 0)[0] - assert not self.deletable(new_cell) - assert self.editable(cell) - - def test_solution_cell_unchanged(self, preprocessor): - """Do solution cells remain unchanged?""" - preprocessor.lock_solution_cells = False - preprocessor.lock_grade_cells = False - preprocessor.lock_all_cells = False - preprocessor.lock_readonly_cells = False - cell = create_code_cell() - cell.metadata['nbgrader'] = {} - cell.metadata['nbgrader']['solution'] = True - assert self.deletable(cell) - assert self.editable(cell) - new_cell = preprocessor.preprocess_cell(cell, {}, 0)[0] - assert self.deletable(new_cell) - assert self.editable(cell) - - def test_locked_cell_undeletable(self, preprocessor): - """Do locked cells become undeletable?""" - preprocessor.lock_solution_cells = False - preprocessor.lock_grade_cells = False - preprocessor.lock_all_cells = False - preprocessor.lock_readonly_cells = True - cell = create_code_cell() - cell.metadata['nbgrader'] = {} - cell.metadata['nbgrader']['locked'] = True - assert self.deletable(cell) - assert self.editable(cell) - new_cell = preprocessor.preprocess_cell(cell, {}, 0)[0] - assert not self.deletable(new_cell) - assert not self.editable(cell) - - def test_grade_cell_undeletable(self, preprocessor): - """Do grade cells become undeletable?""" - preprocessor.lock_solution_cells = False - preprocessor.lock_grade_cells = True - preprocessor.lock_all_cells = False - preprocessor.lock_readonly_cells = False - cell = create_code_cell() - cell.metadata['nbgrader'] = {} - cell.metadata['nbgrader']['grade'] = True - assert self.deletable(cell) - assert self.editable(cell) - new_cell = preprocessor.preprocess_cell(cell, {}, 0)[0] - assert not self.deletable(new_cell) - assert not self.editable(cell) - - def test_grade_cell_unchanged(self, preprocessor): - """Do grade cells remain unchanged?""" - preprocessor.lock_solution_cells = False - preprocessor.lock_grade_cells = False - preprocessor.lock_all_cells = False - preprocessor.lock_readonly_cells = False - cell = create_code_cell() - cell.metadata['nbgrader'] = {} - cell.metadata['nbgrader']['grade'] = True - assert self.deletable(cell) - assert self.editable(cell) - new_cell = preprocessor.preprocess_cell(cell, {}, 0)[0] - assert self.deletable(new_cell) - assert self.editable(cell) - - def test_grade_and_solution_cell_undeletable(self, preprocessor): - """Do grade and solution cells become undeletable?""" - preprocessor.lock_solution_cells = False - preprocessor.lock_grade_cells = True - preprocessor.lock_all_cells = False - preprocessor.lock_readonly_cells = False - cell = create_code_cell() - cell.metadata['nbgrader'] = {} - cell.metadata['nbgrader']['grade'] = True - cell.metadata['nbgrader']['solution'] = True - assert self.deletable(cell) - assert self.editable(cell) - new_cell = preprocessor.preprocess_cell(cell, {}, 0)[0] - assert not self.deletable(new_cell) - assert self.editable(cell) - - def test_grade_and_solution_cell_unchanged(self, preprocessor): - """Do grade and solution cells remain unchanged?""" - preprocessor.lock_solution_cells = False - preprocessor.lock_grade_cells = False - preprocessor.lock_all_cells = False - preprocessor.lock_readonly_cells = False - cell = create_code_cell() - cell.metadata['nbgrader'] = {} - cell.metadata['nbgrader']['grade'] = True - cell.metadata['nbgrader']['solution'] = True - assert self.deletable(cell) - assert self.editable(cell) - new_cell = preprocessor.preprocess_cell(cell, {}, 0)[0] - assert self.deletable(new_cell) - assert self.editable(cell) - - def test_cell_undeletable(self, preprocessor): - """Do normal cells become undeletable?""" - preprocessor.lock_solution_cells = False - preprocessor.lock_grade_cells = False - preprocessor.lock_all_cells = True - preprocessor.lock_readonly_cells = False - cell = create_code_cell() - cell.metadata['nbgrader'] = {} - assert self.deletable(cell) - assert self.editable(cell) - new_cell = preprocessor.preprocess_cell(cell, {}, 0)[0] - assert not self.deletable(new_cell) - assert not self.editable(cell) - - def test_cell_unchanged(self, preprocessor): - """Do normal cells remain unchanged?""" - preprocessor.lock_solution_cells = False - preprocessor.lock_grade_cells = False - preprocessor.lock_all_cells = False - preprocessor.lock_readonly_cells = False - cell = create_code_cell() - cell.metadata['nbgrader'] = {} - assert self.deletable(cell) - assert self.editable(cell) - new_cell = preprocessor.preprocess_cell(cell, {}, 0)[0] - assert self.deletable(new_cell) - assert self.editable(cell) - - @pytest.mark.parametrize( - "lock_solution_cells, lock_grade_cells, lock_all_cells, lock_readonly_cells", - list(itertools.product([True, False], [True, False], [True, False], [True, False])) - ) - def test_preprocess_nb(self, preprocessor, lock_solution_cells, lock_grade_cells, lock_all_cells, lock_readonly_cells): - """Is the test notebook processed without error?""" - preprocessor.lock_solution_cells = lock_solution_cells - preprocessor.lock_grade_cells = lock_grade_cells - preprocessor.lock_all_cells = lock_all_cells - preprocessor.lock_readonly_cells = lock_readonly_cells - preprocessor.preprocess(self._read_nb(os.path.join("files", "test.ipynb")), {}) diff --git a/nbgrader/tests/preprocessors/test_overwritecells.py b/nbgrader/tests/preprocessors/test_overwritecells.py deleted file mode 100644 index 4f768f77c..000000000 --- a/nbgrader/tests/preprocessors/test_overwritecells.py +++ /dev/null @@ -1,267 +0,0 @@ -import pytest - -from nbformat.v4 import new_notebook, new_markdown_cell - -from ...preprocessors import SaveCells, OverwriteCells -from ...api import Gradebook -from ...utils import compute_checksum -from .base import BaseTestPreprocessor -from .. import ( - create_grade_cell, create_solution_cell, create_grade_and_solution_cell, - create_locked_cell, create_task_cell) - - -@pytest.fixture -def preprocessors(): - return (SaveCells(), OverwriteCells()) - - -@pytest.fixture -def gradebook(request, db): - gb = Gradebook(db) - gb.add_assignment("ps0") - - def fin(): - gb.close() - - request.addfinalizer(fin) - - return gb - - -@pytest.fixture -def resources(db, gradebook): - return { - "nbgrader": { - "db_url": db, - "assignment": "ps0", - "notebook": "test" - } - } - - -class TestOverwriteCells(BaseTestPreprocessor): - - def test_overwrite_points(self, preprocessors, resources): - """Are points overwritten for grade cells?""" - cell = create_grade_cell("hello", "code", "foo", 1) - cell.metadata.nbgrader['checksum'] = compute_checksum(cell) - nb = new_notebook() - nb.cells.append(cell) - nb, resources = preprocessors[0].preprocess(nb, resources) - - cell.metadata.nbgrader["points"] = 2 - nb, resources = preprocessors[1].preprocess(nb, resources) - - assert cell.metadata.nbgrader["points"] == 1 - - def test_overwrite_grade_source(self, preprocessors, resources): - """Is the source overwritten for grade cells?""" - cell = create_grade_cell("hello", "code", "foo", 1) - cell.metadata.nbgrader['checksum'] = compute_checksum(cell) - nb = new_notebook() - nb.cells.append(cell) - nb, resources = preprocessors[0].preprocess(nb, resources) - - cell.source = "hello!" - nb, resources = preprocessors[1].preprocess(nb, resources) - - assert cell.source == "hello" - - def test_overwrite_locked_source_code(self, preprocessors, resources): - """Is the source overwritten for locked code cells?""" - cell = create_locked_cell("hello", "code", "foo") - cell.metadata.nbgrader['checksum'] = compute_checksum(cell) - nb = new_notebook() - nb.cells.append(cell) - nb, resources = preprocessors[0].preprocess(nb, resources) - - cell.source = "hello!" - nb, resources = preprocessors[1].preprocess(nb, resources) - - assert cell.source == "hello" - - def test_overwrite_locked_source_markdown(self, preprocessors, resources): - """Is the source overwritten for locked markdown cells?""" - cell = create_locked_cell("hello", "markdown", "foo") - cell.metadata.nbgrader['checksum'] = compute_checksum(cell) - nb = new_notebook() - nb.cells.append(cell) - nb, resources = preprocessors[0].preprocess(nb, resources) - - cell.source = "hello!" - nb, resources = preprocessors[1].preprocess(nb, resources) - - assert cell.source == "hello" - - def test_dont_overwrite_grade_and_solution_source(self, preprocessors, resources): - """Is the source not overwritten for grade+solution cells?""" - cell = create_grade_and_solution_cell("hello", "code", "foo", 1) - cell.metadata.nbgrader['checksum'] = compute_checksum(cell) - nb = new_notebook() - nb.cells.append(cell) - nb, resources = preprocessors[0].preprocess(nb, resources) - - cell.source = "hello!" - nb, resources = preprocessors[1].preprocess(nb, resources) - - assert cell.source == "hello!" - - def test_dont_overwrite_solution_source(self, preprocessors, resources): - """Is the source not overwritten for solution cells?""" - cell = create_solution_cell("hello", "code", "foo") - cell.metadata.nbgrader['checksum'] = compute_checksum(cell) - nb = new_notebook() - nb.cells.append(cell) - nb, resources = preprocessors[0].preprocess(nb, resources) - - cell.source = "hello!" - nb, resources = preprocessors[1].preprocess(nb, resources) - - assert cell.source == "hello!" - - def test_overwrite_grade_cell_type(self, preprocessors, resources): - """Is the cell type overwritten for grade cells?""" - cell = create_grade_cell("hello", "code", "foo", 1) - cell.metadata.nbgrader['checksum'] = compute_checksum(cell) - nb = new_notebook() - nb.cells.append(cell) - nb, resources = preprocessors[0].preprocess(nb, resources) - - cell.cell_type = "markdown" - nb, resources = preprocessors[1].preprocess(nb, resources) - - assert cell.cell_type == "code" - - def test_overwrite_solution_cell_type(self, preprocessors, resources): - """Is the cell type overwritten for solution cells?""" - cell = create_solution_cell("hello", "code", "foo") - cell.metadata.nbgrader['checksum'] = compute_checksum(cell) - nb = new_notebook() - nb.cells.append(cell) - nb, resources = preprocessors[0].preprocess(nb, resources) - - cell.cell_type = "markdown" - nb, resources = preprocessors[1].preprocess(nb, resources) - - assert cell.cell_type == "code" - - def test_overwrite_locked_cell_type(self, preprocessors, resources): - """Is the cell type overwritten for locked cells?""" - cell = create_locked_cell("hello", "code", "foo") - cell.metadata.nbgrader['checksum'] = compute_checksum(cell) - nb = new_notebook() - nb.cells.append(cell) - nb, resources = preprocessors[0].preprocess(nb, resources) - - cell.cell_type = "markdown" - nb, resources = preprocessors[1].preprocess(nb, resources) - - assert cell.cell_type == "code" - - def test_overwrite_grade_checksum(self, preprocessors, resources): - """Is the checksum overwritten for grade cells?""" - cell = create_grade_cell("hello", "code", "foo", 1) - cell.metadata.nbgrader['checksum'] = compute_checksum(cell) - nb = new_notebook() - nb.cells.append(cell) - nb, resources = preprocessors[0].preprocess(nb, resources) - - cell.metadata.nbgrader["checksum"] = "1234" - nb, resources = preprocessors[1].preprocess(nb, resources) - - assert cell.metadata.nbgrader["checksum"] == compute_checksum(cell) - - def test_overwrite_solution_checksum(self, preprocessors, resources): - """Is the checksum overwritten for solution cells?""" - cell = create_solution_cell("hello", "code", "foo") - cell.metadata.nbgrader['checksum'] = compute_checksum(cell) - nb = new_notebook() - nb.cells.append(cell) - nb, resources = preprocessors[0].preprocess(nb, resources) - - cell.metadata.nbgrader["checksum"] = "1234" - nb, resources = preprocessors[1].preprocess(nb, resources) - - assert cell.metadata.nbgrader["checksum"] == compute_checksum(cell) - - def test_overwrite_locked_checksum(self, preprocessors, resources): - """Is the checksum overwritten for locked cells?""" - cell = create_locked_cell("hello", "code", "foo") - cell.metadata.nbgrader['checksum'] = compute_checksum(cell) - nb = new_notebook() - nb.cells.append(cell) - nb, resources = preprocessors[0].preprocess(nb, resources) - - cell.metadata.nbgrader["checksum"] = "1234" - nb, resources = preprocessors[1].preprocess(nb, resources) - - assert cell.metadata.nbgrader["checksum"] == compute_checksum(cell) - - def test_nonexistent_grade_id(self, preprocessors, resources): - """Are cells not in the database ignored?""" - cell = create_grade_cell("", "code", "", 1) - cell.metadata.nbgrader['grade'] = False - nb = new_notebook() - nb.cells.append(cell) - nb, resources = preprocessors[0].preprocess(nb, resources) - nb, resources = preprocessors[1].preprocess(nb, resources) - assert 'grade_id' not in cell.metadata.nbgrader - - cell = create_grade_cell("", "code", "foo", 1) - cell.metadata.nbgrader['grade'] = False - nb = new_notebook() - nb.cells.append(cell) - nb, resources = preprocessors[0].preprocess(nb, resources) - nb, resources = preprocessors[1].preprocess(nb, resources) - assert 'grade_id' not in cell.metadata.nbgrader - - # Tests for adding missing cells back - def test_add_missing_cells(self, preprocessors, resources): - """ - Note: This test will produce warnings (from OverwriteCells preprocessor) by default. - Current implementation of adding missing cells should: - - add missing cells right after the previous grade/solution cell, as the best approximation of their location - - add task cells at the end (because we can't detect their location in the notebook), in order of appearance - """ - - cells = [ - create_solution_cell("Code assignment", "code", "code_solution"), - create_grade_cell("some tests", "code", "code_test1", 1), - create_grade_cell("more tests", "code", "code_test2", 1), - new_markdown_cell("some description"), - create_grade_and_solution_cell("write answer here", "markdown", "md_manually_graded1", 1), - create_grade_and_solution_cell("write answer here", "markdown", "md_manually_graded2", 1), - new_markdown_cell("some description"), - create_task_cell("some task description", "markdown", "task_cell1", 1), - new_markdown_cell("some description"), - create_task_cell("some task description", "markdown", "task_cell2", 1), - ] - # Add checksums to suppress warning - nbgrader_cells = [0, 1, 2, 4, 5, 7, 9] - for idx, cell in enumerate(cells): - if idx in nbgrader_cells: - cell.metadata.nbgrader["checksum"] = compute_checksum(cell) - - expected_order = [0, 1, 2, 4, 5, 3, 6, 8, 7, 9] - expected = [cells[i].metadata.nbgrader["grade_id"] if "nbgrader" in cells[i].metadata else "markdown" for i in expected_order] - - nb = new_notebook() - nb.cells = cells - - # save to database - nb, resources = preprocessors[0].preprocess(nb, resources) - - # remove grade/task cells to test their restoration - nb.cells.pop(9) - nb.cells.pop(7) - nb.cells.pop(5) - nb.cells.pop(4) - nb.cells.pop(2) - nb.cells.pop(1) - - # restore - preprocessors[1].add_missing_cells = True - nb, resources = preprocessors[1].preprocess(nb, resources) - result = [cell["metadata"]["nbgrader"]["grade_id"] if "nbgrader" in cell["metadata"] else "markdown" for cell in nb.cells] - assert expected == result diff --git a/nbgrader/tests/preprocessors/test_overwritekernelspec.py b/nbgrader/tests/preprocessors/test_overwritekernelspec.py deleted file mode 100644 index de97c13da..000000000 --- a/nbgrader/tests/preprocessors/test_overwritekernelspec.py +++ /dev/null @@ -1,59 +0,0 @@ -import json -import pytest - -from nbformat import validate -from nbformat.v4 import new_notebook - -from ...preprocessors import SaveCells, OverwriteKernelspec -from ...api import Gradebook -from .base import BaseTestPreprocessor - - -@pytest.fixture -def preprocessors(): - return (SaveCells(), OverwriteKernelspec()) - - -@pytest.fixture -def gradebook(request, db): - gb = Gradebook(db) - gb.add_assignment("ps0") - - def fin(): - gb.close() - request.addfinalizer(fin) - - return gb - - -@pytest.fixture -def resources(db, gradebook): - return { - "nbgrader": { - "db_url": db, - "assignment": "ps0", - "notebook": "test" - } - } - - -class TestOverwriteKernelSpec(BaseTestPreprocessor): - - def test_overwrite_kernelspec(self, preprocessors, resources, gradebook): - kernelspec = dict( - display_name='blarg', - name='python3', - language='python', - ) - - nb = new_notebook() - nb.metadata['kernelspec'] = kernelspec - nb, resources = preprocessors[0].preprocess(nb, resources) - - nb.metadata['kernelspec'] = {} - nb, resources = preprocessors[1].preprocess(nb, resources) - - validate(nb) - notebook = gradebook.find_notebook("test", "ps0") - assert nb.metadata['kernelspec'] == kernelspec - assert json.loads(notebook.kernelspec) == kernelspec diff --git a/nbgrader/tests/preprocessors/test_saveautogrades.py b/nbgrader/tests/preprocessors/test_saveautogrades.py deleted file mode 100644 index 2b51be838..000000000 --- a/nbgrader/tests/preprocessors/test_saveautogrades.py +++ /dev/null @@ -1,216 +0,0 @@ -import pytest - -from nbformat.v4 import new_notebook, new_output - -from ...preprocessors import SaveCells, SaveAutoGrades -from ...api import Gradebook -from ...utils import compute_checksum -from .base import BaseTestPreprocessor -from .. import ( - create_grade_cell, create_grade_and_solution_cell, create_solution_cell) - - -@pytest.fixture -def preprocessors(): - return (SaveCells(), SaveAutoGrades()) - - -@pytest.fixture -def gradebook(request, db): - gb = Gradebook(db) - gb.add_assignment("ps0") - gb.add_student("bar") - - def fin(): - gb.close() - request.addfinalizer(fin) - - return gb - - -@pytest.fixture -def resources(db): - return { - "nbgrader": { - "db_url": db, - "assignment": "ps0", - "notebook": "test", - "student": "bar" - } - } - - -class TestSaveAutoGrades(BaseTestPreprocessor): - - def test_grade_correct_code(self, preprocessors, gradebook, resources): - """Is a passing code cell correctly graded?""" - cell = create_grade_cell("hello", "code", "foo", 1) - cell.metadata.nbgrader['checksum'] = compute_checksum(cell) - nb = new_notebook() - nb.cells.append(cell) - preprocessors[0].preprocess(nb, resources) - gradebook.add_submission("ps0", "bar") - preprocessors[1].preprocess(nb, resources) - - grade_cell = gradebook.find_grade("foo", "test", "ps0", "bar") - assert grade_cell.score == 1 - assert grade_cell.max_score == 1 - assert grade_cell.auto_score == 1 - assert grade_cell.manual_score == None - assert not grade_cell.needs_manual_grade - - def test_grade_incorrect_code(self, preprocessors, gradebook, resources): - """Is a failing code cell correctly graded?""" - cell = create_grade_cell("hello", "code", "foo", 1) - cell.metadata.nbgrader['checksum'] = compute_checksum(cell) - cell.outputs = [new_output('error', ename="NotImplementedError", evalue="", traceback=["error"])] - nb = new_notebook() - nb.cells.append(cell) - preprocessors[0].preprocess(nb, resources) - gradebook.add_submission("ps0", "bar") - preprocessors[1].preprocess(nb, resources) - - grade_cell = gradebook.find_grade("foo", "test", "ps0", "bar") - assert grade_cell.score == 0 - assert grade_cell.max_score == 1 - assert grade_cell.auto_score == 0 - assert grade_cell.manual_score == None - assert not grade_cell.needs_manual_grade - - def test_grade_unchanged_markdown(self, preprocessors, gradebook, resources): - """Is an unchanged markdown cell correctly graded?""" - cell = create_grade_and_solution_cell("hello", "markdown", "foo", 1) - cell.metadata.nbgrader['checksum'] = compute_checksum(cell) - nb = new_notebook() - nb.cells.append(cell) - preprocessors[0].preprocess(nb, resources) - gradebook.add_submission("ps0", "bar") - preprocessors[1].preprocess(nb, resources) - - grade_cell = gradebook.find_grade("foo", "test", "ps0", "bar") - assert grade_cell.score == 0 - assert grade_cell.max_score == 1 - assert grade_cell.auto_score == 0 - assert grade_cell.manual_score == None - assert not grade_cell.needs_manual_grade - - def test_grade_changed_markdown(self, preprocessors, gradebook, resources): - """Is a changed markdown cell correctly graded?""" - cell = create_grade_and_solution_cell("hello", "markdown", "foo", 1) - cell.metadata.nbgrader['checksum'] = compute_checksum(cell) - nb = new_notebook() - nb.cells.append(cell) - preprocessors[0].preprocess(nb, resources) - gradebook.add_submission("ps0", "bar") - cell.source = "hello!" - preprocessors[1].preprocess(nb, resources) - - grade_cell = gradebook.find_grade("foo", "test", "ps0", "bar") - assert grade_cell.score == 0 - assert grade_cell.max_score == 1 - assert grade_cell.auto_score == None - assert grade_cell.manual_score == None - assert grade_cell.needs_manual_grade - - def test_comment_unchanged_code(self, preprocessors, gradebook, resources): - """Is an unchanged code cell given the correct comment?""" - cell = create_solution_cell("hello", "code", "foo") - cell.metadata.nbgrader['checksum'] = compute_checksum(cell) - nb = new_notebook() - nb.cells.append(cell) - preprocessors[0].preprocess(nb, resources) - gradebook.add_submission("ps0", "bar") - preprocessors[1].preprocess(nb, resources) - - comment = gradebook.find_comment("foo", "test", "ps0", "bar") - assert comment.auto_comment == "No response." - - def test_comment_changed_code(self, preprocessors, gradebook, resources): - """Is a changed code cell given the correct comment?""" - cell = create_solution_cell("hello", "code", "foo") - cell.metadata.nbgrader['checksum'] = compute_checksum(cell) - nb = new_notebook() - nb.cells.append(cell) - preprocessors[0].preprocess(nb, resources) - gradebook.add_submission("ps0", "bar") - cell.source = "hello!" - preprocessors[1].preprocess(nb, resources) - - comment = gradebook.find_comment("foo", "test", "ps0", "bar") - assert comment.auto_comment is None - - def test_comment_unchanged_markdown(self, preprocessors, gradebook, resources): - """Is an unchanged markdown cell given the correct comment?""" - cell = create_grade_and_solution_cell("hello", "markdown", "foo", 1) - cell.metadata.nbgrader['checksum'] = compute_checksum(cell) - nb = new_notebook() - nb.cells.append(cell) - preprocessors[0].preprocess(nb, resources) - gradebook.add_submission("ps0", "bar") - preprocessors[1].preprocess(nb, resources) - - comment = gradebook.find_comment("foo", "test", "ps0", "bar") - assert comment.auto_comment == "No response." - - def test_comment_changed_markdown(self, preprocessors, gradebook, resources): - """Is a changed markdown cell given the correct comment?""" - cell = create_grade_and_solution_cell("hello", "markdown", "foo", 1) - cell.metadata.nbgrader['checksum'] = compute_checksum(cell) - nb = new_notebook() - nb.cells.append(cell) - preprocessors[0].preprocess(nb, resources) - gradebook.add_submission("ps0", "bar") - cell.source = "hello!" - preprocessors[1].preprocess(nb, resources) - - comment = gradebook.find_comment("foo", "test", "ps0", "bar") - assert comment.auto_comment is None - - def test_grade_existing_manual_grade(self, preprocessors, gradebook, resources): - """Is a failing code cell correctly graded?""" - cell = create_grade_and_solution_cell("hello", "markdown", "foo", 1) - nb = new_notebook() - nb.cells.append(cell) - preprocessors[0].preprocess(nb, resources) - gradebook.add_submission("ps0", "bar") - cell.source = "hello!" - preprocessors[1].preprocess(nb, resources) - - grade_cell = gradebook.find_grade("foo", "test", "ps0", "bar") - assert grade_cell.score == 0 - assert grade_cell.max_score == 1 - assert grade_cell.auto_score == None - assert grade_cell.manual_score == None - assert grade_cell.needs_manual_grade - - grade_cell.manual_score = 1 - grade_cell.needs_manual_grade = False - gradebook.db.commit() - - preprocessors[1].preprocess(nb, resources) - - grade_cell = gradebook.find_grade("foo", "test", "ps0", "bar") - assert grade_cell.score == 1 - assert grade_cell.max_score == 1 - assert grade_cell.auto_score == None - assert grade_cell.manual_score == 1 - assert grade_cell.needs_manual_grade - - def test_grade_existing_auto_comment(self, preprocessors, gradebook, resources): - """Is a failing code cell correctly graded?""" - cell = create_grade_and_solution_cell("hello", "markdown", "foo", 1) - cell.metadata.nbgrader['checksum'] = compute_checksum(cell) - nb = new_notebook() - nb.cells.append(cell) - preprocessors[0].preprocess(nb, resources) - gradebook.add_submission("ps0", "bar") - preprocessors[1].preprocess(nb, resources) - - comment = gradebook.find_comment("foo", "test", "ps0", "bar") - assert comment.auto_comment == "No response." - - nb.cells[-1].source = 'goodbye' - preprocessors[1].preprocess(nb, resources) - - gradebook.db.refresh(comment) - assert comment.auto_comment is None diff --git a/nbgrader/tests/preprocessors/test_savecells.py b/nbgrader/tests/preprocessors/test_savecells.py deleted file mode 100644 index 11ff7e9f5..000000000 --- a/nbgrader/tests/preprocessors/test_savecells.py +++ /dev/null @@ -1,311 +0,0 @@ -import json -import pytest - -from nbformat import validate -from nbformat.v4 import new_notebook - -from ...preprocessors import SaveCells -from ...api import Gradebook -from ...utils import compute_checksum -from .base import BaseTestPreprocessor -from .. import ( - create_grade_cell, create_solution_cell, create_grade_and_solution_cell, - create_locked_cell) - - -@pytest.fixture -def preprocessor(): - return SaveCells() - - -@pytest.fixture -def gradebook(request, db): - gb = Gradebook(db) - gb.add_assignment("ps0") - - def fin(): - gb.close() - request.addfinalizer(fin) - - return gb - - -@pytest.fixture -def resources(db, gradebook): - return { - "nbgrader": { - "db_url": db, - "assignment": "ps0", - "notebook": "test", - } - } - - -class TestSaveCells(BaseTestPreprocessor): - - def test_save_code_grade_cell(self, preprocessor, gradebook, resources): - cell = create_grade_cell("hello", "code", "foo", 1) - cell.metadata.nbgrader['checksum'] = compute_checksum(cell) - nb = new_notebook() - nb.cells.append(cell) - - nb, resources = preprocessor.preprocess(nb, resources) - - grade_cell = gradebook.find_grade_cell("foo", "test", "ps0") - assert grade_cell.max_score == 1 - assert grade_cell.cell_type == "code" - - source_cell = gradebook.find_source_cell("foo", "test", "ps0") - assert source_cell.source == "hello" - assert source_cell.checksum == cell.metadata.nbgrader["checksum"] - assert source_cell.cell_type == "code" - assert source_cell.locked - - def test_save_code_solution_cell(self, preprocessor, gradebook, resources): - cell = create_solution_cell("hello", "code", "foo") - cell.metadata.nbgrader['checksum'] = compute_checksum(cell) - nb = new_notebook() - nb.cells.append(cell) - - nb, resources = preprocessor.preprocess(nb, resources) - - gradebook.find_solution_cell("foo", "test", "ps0") - - source_cell = gradebook.find_source_cell("foo", "test", "ps0") - assert source_cell.source == "hello" - assert source_cell.checksum == cell.metadata.nbgrader["checksum"] - assert source_cell.cell_type == "code" - assert not source_cell.locked - - def test_save_markdown_solution_cell(self, preprocessor, gradebook, resources): - cell = create_solution_cell("hello", "markdown", "foo") - cell.metadata.nbgrader['checksum'] = compute_checksum(cell) - nb = new_notebook() - nb.cells.append(cell) - - nb, resources = preprocessor.preprocess(nb, resources) - - gradebook.find_solution_cell("foo", "test", "ps0") - - source_cell = gradebook.find_source_cell("foo", "test", "ps0") - assert source_cell.source == "hello" - assert source_cell.checksum == cell.metadata.nbgrader["checksum"] - assert source_cell.cell_type == "markdown" - assert not source_cell.locked - - def test_save_code_grade_and_solution_cell(self, preprocessor, gradebook, resources): - cell = create_grade_and_solution_cell("hello", "code", "foo", 1) - cell.metadata.nbgrader['checksum'] = compute_checksum(cell) - nb = new_notebook() - nb.cells.append(cell) - - nb, resources = preprocessor.preprocess(nb, resources) - - grade_cell = gradebook.find_grade_cell("foo", "test", "ps0") - assert grade_cell.max_score == 1 - assert grade_cell.cell_type == "code" - - gradebook.find_solution_cell("foo", "test", "ps0") - - source_cell = gradebook.find_source_cell("foo", "test", "ps0") - assert source_cell.source == "hello" - assert source_cell.checksum == cell.metadata.nbgrader["checksum"] - assert source_cell.cell_type == "code" - assert not source_cell.locked - - def test_save_markdown_grade_and_solution_cell(self, preprocessor, gradebook, resources): - cell = create_grade_and_solution_cell("hello", "markdown", "foo", 1) - cell.metadata.nbgrader['checksum'] = compute_checksum(cell) - nb = new_notebook() - nb.cells.append(cell) - - nb, resources = preprocessor.preprocess(nb, resources) - - grade_cell = gradebook.find_grade_cell("foo", "test", "ps0") - assert grade_cell.max_score == 1 - assert grade_cell.cell_type == "markdown" - - gradebook.find_solution_cell("foo", "test", "ps0") - - source_cell = gradebook.find_source_cell("foo", "test", "ps0") - assert source_cell.source == "hello" - assert source_cell.checksum == cell.metadata.nbgrader["checksum"] - assert source_cell.cell_type == "markdown" - assert not source_cell.locked - - def test_save_locked_code_cell(self, preprocessor, gradebook, resources): - cell = create_locked_cell("hello", "code", "foo") - cell.metadata.nbgrader['checksum'] = compute_checksum(cell) - nb = new_notebook() - nb.cells.append(cell) - - nb, resources = preprocessor.preprocess(nb, resources) - - source_cell = gradebook.find_source_cell("foo", "test", "ps0") - assert source_cell.source == "hello" - assert source_cell.checksum == cell.metadata.nbgrader["checksum"] - assert source_cell.cell_type == "code" - assert source_cell.locked - - def test_save_locked_markdown_cell(self, preprocessor, gradebook, resources): - cell = create_locked_cell("hello", "markdown", "foo") - cell.metadata.nbgrader['checksum'] = compute_checksum(cell) - nb = new_notebook() - nb.cells.append(cell) - - nb, resources = preprocessor.preprocess(nb, resources) - - source_cell = gradebook.find_source_cell("foo", "test", "ps0") - assert source_cell.source == "hello" - assert source_cell.checksum == cell.metadata.nbgrader["checksum"] - assert source_cell.cell_type == "markdown" - assert source_cell.locked - - def test_save_new_cell(self, preprocessor, gradebook, resources): - cell1 = create_grade_and_solution_cell("hello", "markdown", "foo", 2) - cell2 = create_grade_and_solution_cell("hello", "markdown", "bar", 1) - - nb = new_notebook() - nb.cells.append(cell1) - nb, resources = preprocessor.preprocess(nb, resources) - - notebook = gradebook.find_notebook("test", "ps0") - assert len(notebook.grade_cells) == 1 - assert len(notebook.solution_cells) == 1 - assert len(notebook.source_cells) == 1 - - nb.cells.append(cell2) - nb, resources = preprocessor.preprocess(nb, resources) - - gradebook.db.refresh(notebook) - assert len(notebook.grade_cells) == 2 - assert len(notebook.solution_cells) == 2 - assert len(notebook.source_cells) == 2 - - def test_save_new_cell_with_submissions(self, preprocessor, gradebook, resources): - cell1 = create_grade_and_solution_cell("hello", "markdown", "foo", 2) - cell2 = create_grade_and_solution_cell("hello", "markdown", "bar", 1) - - nb = new_notebook() - nb.cells.append(cell1) - nb, resources = preprocessor.preprocess(nb, resources) - - notebook = gradebook.find_notebook("test", "ps0") - assert len(notebook.grade_cells) == 1 - assert len(notebook.solution_cells) == 1 - assert len(notebook.source_cells) == 1 - - gradebook.add_student("hacker123") - gradebook.add_submission("ps0", "hacker123") - nb.cells.append(cell2) - - with pytest.raises(RuntimeError): - nb, resources = preprocessor.preprocess(nb, resources) - - def test_remove_cell(self, preprocessor, gradebook, resources): - cell1 = create_grade_and_solution_cell("hello", "markdown", "foo", 2) - cell2 = create_grade_and_solution_cell("hello", "markdown", "bar", 1) - - nb = new_notebook() - nb.cells.append(cell1) - nb.cells.append(cell2) - nb, resources = preprocessor.preprocess(nb, resources) - - notebook = gradebook.find_notebook("test", "ps0") - assert len(notebook.grade_cells) == 2 - assert len(notebook.solution_cells) == 2 - assert len(notebook.source_cells) == 2 - - nb.cells = nb.cells[:-1] - nb, resources = preprocessor.preprocess(nb, resources) - - gradebook.db.refresh(notebook) - assert len(notebook.grade_cells) == 1 - assert len(notebook.solution_cells) == 1 - assert len(notebook.source_cells) == 1 - - def test_remove_cell_with_submissions(self, preprocessor, gradebook, resources): - cell1 = create_grade_and_solution_cell("hello", "markdown", "foo", 2) - cell2 = create_grade_and_solution_cell("hello", "markdown", "bar", 1) - - nb = new_notebook() - nb.cells.append(cell1) - nb.cells.append(cell2) - nb, resources = preprocessor.preprocess(nb, resources) - - notebook = gradebook.find_notebook("test", "ps0") - assert len(notebook.grade_cells) == 2 - assert len(notebook.solution_cells) == 2 - assert len(notebook.source_cells) == 2 - - gradebook.add_student("hacker123") - gradebook.add_submission("ps0", "hacker123") - nb.cells = nb.cells[:-1] - - with pytest.raises(RuntimeError): - nb, resources = preprocessor.preprocess(nb, resources) - - def test_modify_cell(self, preprocessor, gradebook, resources): - nb = new_notebook() - nb.cells.append(create_grade_and_solution_cell("hello", "markdown", "foo", 2)) - nb, resources = preprocessor.preprocess(nb, resources) - - notebook = gradebook.find_notebook("test", "ps0") - grade_cell = gradebook.find_grade_cell("foo", "test", "ps0") - solution_cell = gradebook.find_solution_cell("foo", "test", "ps0") - source_cell = gradebook.find_source_cell("foo", "test", "ps0") - assert grade_cell.max_score == 2 - assert source_cell.source == "hello" - - nb.cells[-1] = create_grade_and_solution_cell("goodbye", "markdown", "foo", 1) - nb, resources = preprocessor.preprocess(nb, resources) - - gradebook.db.refresh(notebook) - gradebook.db.refresh(grade_cell) - gradebook.db.refresh(solution_cell) - gradebook.db.refresh(source_cell) - assert grade_cell.max_score == 1 - assert source_cell.source == "goodbye" - - def test_modify_cell_with_submissions(self, preprocessor, gradebook, resources): - nb = new_notebook() - nb.cells.append(create_grade_and_solution_cell("hello", "markdown", "foo", 2)) - nb, resources = preprocessor.preprocess(nb, resources) - - notebook = gradebook.find_notebook("test", "ps0") - grade_cell = gradebook.find_grade_cell("foo", "test", "ps0") - solution_cell = gradebook.find_solution_cell("foo", "test", "ps0") - source_cell = gradebook.find_source_cell("foo", "test", "ps0") - assert grade_cell.max_score == 2 - assert source_cell.source == "hello" - - gradebook.add_student("hacker123") - submission = gradebook.add_submission("ps0", "hacker123").notebooks[0] - assert len(notebook.submissions) == 1 - - nb.cells[-1] = create_grade_and_solution_cell("goodbye", "markdown", "foo", 1) - nb, resources = preprocessor.preprocess(nb, resources) - - gradebook.db.refresh(notebook) - gradebook.db.refresh(submission) - gradebook.db.refresh(grade_cell) - gradebook.db.refresh(solution_cell) - gradebook.db.refresh(source_cell) - assert len(notebook.submissions) == 1 - assert grade_cell.max_score == 1 - assert source_cell.source == "goodbye" - - def test_save_kernelspec(self, preprocessor, gradebook, resources): - kernelspec = dict( - display_name='blarg', - name='python3', - language='python', - ) - - nb = new_notebook() - nb.metadata['kernelspec'] = kernelspec - nb, resources = preprocessor.preprocess(nb, resources) - - validate(nb) - notebook = gradebook.find_notebook("test", "ps0") - assert json.loads(notebook.kernelspec) == kernelspec diff --git a/nbgrader/tests/ui-tests/assignment_list.spec.ts b/nbgrader/tests/ui-tests/assignment_list.spec.ts index 87ae7d702..b54d4ca83 100644 --- a/nbgrader/tests/ui-tests/assignment_list.spec.ts +++ b/nbgrader/tests/ui-tests/assignment_list.spec.ts @@ -124,6 +124,7 @@ const addCourses = async (request: APIRequestContext, tmpPath: string) => { `${tmpPath}/source/Problem Set 1/problem2.ipynb`, `${tmpPath}/source/Problem Set 1/Problem 2.ipynb` ); + await contents.createDirectory(`${tmpPath}/source/ps.01`); await contents.uploadFile( path.resolve(__dirname, "files", "empty.ipynb"), diff --git a/pyproject.toml b/pyproject.toml index d52bfeeba..a5c1c9156 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -47,6 +47,7 @@ dependencies = [ "rapidfuzz>=1.8", "requests>=2.26", "sqlalchemy>=1.4,<3", + "PyYAML>=6.0", ] version = "0.9.0a1"