diff --git a/.github/workflows/spellchecker.yml b/.github/workflows/spellchecker.yml new file mode 100644 index 000000000..2b0309af4 --- /dev/null +++ b/.github/workflows/spellchecker.yml @@ -0,0 +1,186 @@ +name: Check Spelling + +# Comment management is handled through a secondary job, for details see: +# https://github.com/check-spelling/check-spelling/wiki/Feature%3A-Restricted-Permissions +# +# `jobs.comment-push` runs when a push is made to a repository and the `jobs.spelling` job needs to make a comment +# (in odd cases, it might actually run just to collapse a comment, but that's fairly rare) +# it needs `contents: write` in order to add a comment. +# +# `jobs.comment-pr` runs when a pull_request is made to a repository and the `jobs.spelling` job needs to make a comment +# or collapse a comment (in the case where it had previously made a comment and now no longer needs to show a comment) +# it needs `pull-requests: write` in order to manipulate those comments. + +# Updating pull request branches is managed via comment handling. +# For details, see: https://github.com/check-spelling/check-spelling/wiki/Feature:-Update-expect-list +# +# These elements work together to make it happen: +# +# `on.issue_comment` +# This event listens to comments by users asking to update the metadata. +# +# `jobs.update` +# This job runs in response to an issue_comment and will push a new commit +# to update the spelling metadata. +# +# `with.experimental_apply_changes_via_bot` +# Tells the action to support and generate messages that enable it +# to make a commit to update the spelling metadata. +# +# `with.ssh_key` +# In order to trigger workflows when the commit is made, you can provide a +# secret (typically, a write-enabled github deploy key). +# +# For background, see: https://github.com/check-spelling/check-spelling/wiki/Feature:-Update-with-deploy-key + +# Sarif reporting +# +# Access to Sarif reports is generally restricted (by GitHub) to members of the repository. +# +# Requires enabling `security-events: write` +# and configuring the action with `use_sarif: 1` +# +# For information on the feature, see: https://github.com/check-spelling/check-spelling/wiki/Feature:-Sarif-output + +# Minimal workflow structure: +# +# on: +# push: +# ... +# pull_request_target: +# ... +# jobs: +# # you only want the spelling job, all others should be omitted +# spelling: +# # remove `security-events: write` and `use_sarif: 1` +# # remove `experimental_apply_changes_via_bot: 1` +# ... otherwise adjust the `with:` as you wish + +on: + push: + branches: + - "**" + tags-ignore: + - "**" + pull_request_target: + branches: + - "**" + types: + - 'opened' + - 'reopened' + - 'synchronize' + issue_comment: + types: + - 'created' + +jobs: + spelling: + name: Check Spelling + permissions: + contents: read + pull-requests: read + actions: read + security-events: write + outputs: + followup: ${{ steps.spelling.outputs.followup }} + runs-on: ubuntu-latest + if: ${{ contains(github.event_name, 'pull_request') || github.event_name == 'push' }} + concurrency: + group: spelling-${{ github.event.pull_request.number || github.ref }} + # note: If you use only_check_changed_files, you do not want cancel-in-progress + cancel-in-progress: true + steps: + - name: check-spelling + id: spelling + uses: check-spelling/check-spelling@main + with: + config: .spelling + checkout: true + check_file_names: 1 + spell_check_this: check-spelling/spell-check-this@prerelease + post_comment: 0 + use_magic_file: 1 + report-timing: 1 + warnings: bad-regex,binary-file,deprecated-feature,large-file,limited-references,no-newline-at-eof,noisy-file,non-alpha-in-dictionary,token-is-substring,unexpected-line-ending,whitespace-in-dictionary,minified-file,unsupported-configuration,no-files-to-check + experimental_apply_changes_via_bot: 1 + use_sarif: ${{ (!github.event.pull_request || (github.event.pull_request.head.repo.full_name == github.repository)) && 1 }} + extra_dictionary_limit: 20 + extra_dictionaries: + cspell:software-terms/dict/softwareTerms.txt + cspell:python/src/python/python-lib.txt + cspell:python/src/python/python.txt + cspell:python/src/common/extra.txt + cspell:php/dict/php.txt + cspell:r/src/r.txt + cspell:aws/aws.txt + cspell:django/dict/django.txt + cspell:filetypes/filetypes.txt + cspell:node/dict/node.txt + cspell:golang/dict/go.txt + cspell:fullstack/dict/fullstack.txt + cspell:java/src/java.txt + cspell:k8s/dict/k8s.txt + cspell:css/dict/css.txt + cspell:npm/dict/npm.txt + cspell:latex/dict/latex.txt + cspell:latex/samples/sample-words.txt + cspell:html-symbol-entities/entities.txt + cspell:html/dict/html.txt + cspell:cpp/src/ecosystem.txt + cspell:mnemonics/src/mnemonics.txt + + comment-push: + name: Report (Push) + # If your workflow isn't running on push, you can remove this job + runs-on: ubuntu-latest + needs: spelling + permissions: + contents: write + if: (success() || failure()) && needs.spelling.outputs.followup && github.event_name == 'push' + steps: + - name: comment + uses: check-spelling/check-spelling@main + with: + checkout: true + spell_check_this: check-spelling/spell-check-this@prerelease + task: ${{ needs.spelling.outputs.followup }} + + comment-pr: + name: Report (PR) + # If you workflow isn't running on pull_request*, you can remove this job + runs-on: ubuntu-latest + needs: spelling + permissions: + contents: read + pull-requests: write + if: (success() || failure()) && needs.spelling.outputs.followup && contains(github.event_name, 'pull_request') + steps: + - name: comment + uses: check-spelling/check-spelling@main + with: + checkout: true + spell_check_this: check-spelling/spell-check-this@prerelease + task: ${{ needs.spelling.outputs.followup }} + experimental_apply_changes_via_bot: 1 + + update: + name: Update PR + permissions: + contents: write + pull-requests: write + actions: read + runs-on: ubuntu-latest + if: ${{ + github.event_name == 'issue_comment' && + github.event.issue.pull_request && + contains(github.event.comment.body, '@check-spelling-bot apply')}} + concurrency: + group: spelling-update-${{ github.event.issue.number }} + cancel-in-progress: false + steps: + - name: apply spelling updates + uses: check-spelling/check-spelling@main + with: + experimental_apply_changes_via_bot: 1 + checkout: true + ssh_key: "${{ secrets.CHECK_SPELLING }}" \ No newline at end of file diff --git a/.gitignore b/.gitignore index d40b4a5df..33a03d0ed 100644 --- a/.gitignore +++ b/.gitignore @@ -25,6 +25,7 @@ testing/config_segmentation_temp.yaml testing/failures.log coverage.xml mlcube/workspace/* +!.spelling/.spelling/expect.txt !mlcube/workspace/config.yml !mlcube/workspace/channelIDs.yml tutorials/classification_medmnist_notebook/dataset/pathmnist diff --git a/.spelling/.spelling/expect.txt b/.spelling/.spelling/expect.txt new file mode 100644 index 000000000..fe6a792b1 --- /dev/null +++ b/.spelling/.spelling/expect.txt @@ -0,0 +1,735 @@ +Abhishek +Abousamra +acdfbac +acsconv +adadelta +adagrad +adamax +adamw +addcdiv +addcmul +addgroup +addoption +ademamix +agc +agni +Aimilia +aimiliag +albumentations +allclose +allcontributors +allsigned +amsgrad +Anirban +anonymization +applyaugs +apptainer +Aqubvel +arange +archs +arcname +argmax +argwhere +Arnout +arxiv +asarray +astype +atleast +augs +auroc +autobuild +autocast +autodetermined +auxilary +avgs +awaa +Babak +bacf +backprop +backpropagate +backpropagation +Baheti +Baid +Bakas +Bashyam +batchnorm +bdfc +beggining +bgr +Bhalerao +bibtex +bincount +biomedicalimaging +Bjoern +blabla +brahim +brainage +Brox +btw +Buildx +BVd +BVpye +capsys +cbica +cbig +cca +CCE +cdc +cdir +cel +cencoder +centercrop +cff +Chitalia +christos +Chunrui +Chv +cla +classif +classitk +codacy +codecov +CODEOWNERS +codeql +codereview +codespace +Colab +colorconv +colorjitter +colorlog +commandline +configfile +configgenerator +convs +cooldown +cosineannealing +cosineannealinglr +cosinesimilarity +croporpad +cropzero +ctc +CUBLAS +cudnn +cycliclr +datacenter +dataframe +dataprep +datestampnow +Davatzikos +dcce +dclog +dcm +dcmtk +deac +deadsnakes +DEBIAN +deconv +deepresunet +Deepthi +deepunet +denseblock +denselayer +densenet +depthconv +Despina +despinak +devcontainer +dfu +dicelog +dicom +dicomanonymizer +digestpath +disaggregating +discussioncomment +distilbert +DLF +DNN +dockerfiles +dockerized +dockertag +doi +Dokania +dotnet +downsamp +downsample +downsampling +doxygen +dpn +dqbm +dropna +dtype +dynunet +edac +edt +edu +eep +efc +efficientnet +efficientnetb +EIuqemz +elementwise +embeddings +Emre +ener +entrypoints +Ethem +excepthook +exctype +exponentiallr +fcn +Fdocker +fepegar +figsize +filenaming +filereader +fillna +finetuned +flaim +fnull +frgfm +fromarray +fromiter +Fsegmentation +Fulcio +Fworkflows +gandlf +Gastounioti +gbp +gcam +gcampp +GDCM +gdown +gdp +gelu +geometricanirban +Getka +getoption +getsizeof +ggcam +ghc +gle +glx +Gonz +Gotkowski +gpu +Grenko +gridaggregator +gridsampler +groundtruth +Guley +gumbel +Haghighi +Hamamc +Hamamci +hausdorff +healper +heatmaps +hexops +hft +histo +histopath +holocron +hookwrapper +HOUNSFIELD +hpc +hstack +HTR +huggingface +hyperparameters +idxs +ihc +iloc +imagenet +imbalanced +imread +imsave +imshow +imwrite +indeces +inlinehilite +inp +inputdata +instancenorm +interp +ISBI +issn +itcr +iterrows +itk +itkimage +itksnap +jaccard +JAX +JBHI +JDu +JSTARS +Junwen +jupyter +Jupyterlab +kaiming +kakumarabhishek +Karargyris +Karkada +keepdim +keleshev +kenshohara +KFold +kickstart +kld +Kontos +ksel +kspace +Kullback +Kurc +labelsample +labelsampler +lambd +layerchange +Lbtnaq +ldir +leakyrelu +Leibler +levelname +levelno +libgl +libjpeg +libpython +libsm +libvips +libxext +lightresunet +lightunet +linalg +linenums +lineplot +linspace +linting +lly +logit +logpt +logsoftmax +logvar +longreprtext +lps +lrelu +LROn +lstsq +lucidrains +macenko +mainrun +makereport +mathews +matplotlib +matthews +maxpool +mbergman +mcc +mcclog +MCD +mcr +MCT +mde +mdmc +medcam +medmnist +medperf +medpy +Megh +mencoder +menze +metr +miccai +missingprediction +mkdocs +mlco +mlcommons +mlcube +mlcubedir +mlp +modeified +modelbase +modelcard +modeldir +modelio +monai +Mouchtaris +moveaxis +mpp +mps +mri +msa +mscadocs +msdnet +mse +msle +Mukhopadhyay +multiclass +multidim +multilabel +mytagname +nadam +nans +naveenk +ncbi +ncc +ndarray +ndexbio +ndim +ndimage +ndlf +nesterov +neuroimage +nfnets +nibabel +nicl +NIf +nifti +nih +nii +nlabel +nnf +nonroot +normtype +notsigned +novograd +nsd +nuitka +numel +numlay +nvidia +octicons +offis +OFWCPDRE +ohif +onefile +onlatest +onnx +openfl +openslide +opensource +openvino +opm +opset +Orhun +ossar +outconv +outputdir +outputfile +palletsprojects +Panchumarthy +pathmnist +pati +pbar +pchs +Pdocker +pearson +Phenomics +pkl +plt +pmwiki +pnas +Prashant +prcomment +predmask +preds +probs +Prunner +prv +psnr +psutil +pth +PTk +pubmed +purelib +pwadry +pydantic +pydicom +pyinstaller +pymdownx +pypa +pyplot +pytorch +pyversion +qsub +qubvel +radam +Radeon +radiomics +radxtools +ramework +randomaffine +randomanisotropy +randombiasfield +randomblur +randomelasticdeformation +randomflip +randommotion +randomnoise +randomswap +rdp +reco +recoverconfig +reducelronplateau +reduceonplateau +reencoded +refering +Rekor +relativized +relu +rensen +Reparameterization +reparameterize +rescaler +residualunet +resnet +resunet +rgbatorgb +rgbtorgba +rigourous +Ritesh +rmsprop +rocm +rocmdocs +Ronneberger +rowvar +ruifrok +runnning +runtest +Saltz +samplewise +Sarthak +sarthakpati +savefig +sbakas +sbia +scikit +scipy +screenshots +scse +sdata +sdnet +seaborn +Seac +sebastianffx +securefederatedai +segmap +segmask +segmentor +Sens +sessionstart +setbiasranges +setcutoffrange +setsigmaranges +Sezgin +sge +Shahira +shubham +siddhesh +sigstore +silu +Simonyan +simpleitk +sitk +skimage +sklearn +slurm +smi +socio +Soham +Sotirios +sparseadam +spellchecker +Sprop +Spyridon +ssim +stackexchange +stainextract +stainlib +steplr +stepsize +subjectid +sume +superfences +sustainability +swapaxes +Tahsin +tcia +tempconvs +tensorboard +tgz +thresholded +thresholding +Thu +tiatoolbox +tiffslide +timepoints +timm +tio +tioq +tiosd +TLDR +tmi +TOOLSDIRECTORY +torchaudio +torchinfo +torchio +torchmetrics +torchvision +towardsdatascience +TPAMI +tqdm +traininginference +transunet +triaged +tryfirst +tsaftaris +TUDA +tversky +uanced +uinc +Ujjwal +Umeton +unet +unetr +uniformsample +uniformsampler +unittests +unitwise +unsqueeze +upenn +Uploaing +Uploded +upsample +upsampled +upsampling +utm +uzh +vahadane +validing +valuetopredict +vgg +Vinayak +vios +visualstudiomagazine +vmem +voxel +VRAM +vtk +vvv +WACV +warmupcosineschedule +Wauplin +wcs +weightedsample +weightedsampler +whl +WORKDIR +wsi +wsl +xavier +xdim +XDl +XEI +xkq +xlabel +xlim +xnat +XResolution +XTools +yamlchecker +yamlvalidator +ydim +ylabel +YResolution +Yrv +Yuemeng +zarr +Zeroplanes +zicat +znorm +ZNormalization +Zou +abebbed +adipocytes +afcc +AMNIST +autorefs +avq +baf +bdf +bfa +bjoh +cadf +cbcb +cdbe +cividis +cmap +CMNIST +colorectal +colormaps +cxbdfkig +dbdf +Deconvolutional +Dermatoscope +dff +eaik +edc +eeaf +eee +enu +faa +fdb +fdc +hdwlu +hjzwmjxvamrxotxu +ifqqtrs +ihd +Ingnore +ipykernel +ipynb +ipynbcheckpoints +jjzoifpdly +Kather +kernelspec +Krisam +lexer +lkrvyj +luap +mkdocstrings +MNIST +mpimg +mtdpzx +nbconvert +nbformat +nhfspc +ocflopa +OCTMNIST +pmkdaguy +pvqg +qfzk +qwuvqx +redownload +rsmff +rxtzgrcaq +SMNIST +swp +torchtext +uzbklab +uzsc +viridis +xaburhd +xso +ystore +Zisserman +zsuokb +zwezggl +zzokqk +thirdparty +adopy +Shohei +crcrpar +lrs +autograd +cudagraph +kwonly +torchscript \ No newline at end of file diff --git a/GANDLF/cli/huggingface_hub_handler.py b/GANDLF/cli/huggingface_hub_handler.py index 72e2f35b0..a582e9bf7 100644 --- a/GANDLF/cli/huggingface_hub_handler.py +++ b/GANDLF/cli/huggingface_hub_handler.py @@ -121,7 +121,7 @@ def push_to_model_hub( ignore_patterns=ignore_patterns, delete_patterns=delete_patterns, ) - print("Model Sucessfully Uploded") + print("Model Successfully Uploaded") def download_from_hub( diff --git a/GANDLF/compute/forward_pass.py b/GANDLF/compute/forward_pass.py index 69efa15a9..2135ca0ee 100644 --- a/GANDLF/compute/forward_pass.py +++ b/GANDLF/compute/forward_pass.py @@ -89,7 +89,7 @@ def validate_network( # # putting stuff in individual arrays for correlation analysis # all_targets = [] - # all_predics = [] + # all_predicts = [] if params["medcam_enabled"] and params["model"]["type"] == "torch": model.enable_medcam() params["medcam_enabled"] = True diff --git a/GANDLF/compute/step.py b/GANDLF/compute/step.py index 148d206cf..1d9db8c12 100644 --- a/GANDLF/compute/step.py +++ b/GANDLF/compute/step.py @@ -98,7 +98,7 @@ def step( f"Model output is not a Tensor: {type(output)}. Say, `deep_resunet` and `deep_unet` may return " f"list of tensors on different scales instead of just one prediction Tensor. However due to " f"GaNDLF architecture it is expected that models return only one tensor. For deep_* models " - f"only the biggeest scale is processed. Use these models with caution till fix is implemented." + f"only the biggest scale is processed. Use these models with caution till fix is implemented." ) output = output[0] diff --git a/GANDLF/data/patch_miner/opm/utils.py b/GANDLF/data/patch_miner/opm/utils.py index 997f13f17..a0ebaf56f 100644 --- a/GANDLF/data/patch_miner/opm/utils.py +++ b/GANDLF/data/patch_miner/opm/utils.py @@ -431,7 +431,7 @@ def generate_initial_mask(slide_path: str, scale: int) -> Tuple[np.ndarray, tupl slide = openslide.open_slide(slide_path) slide_dims = slide.dimensions - # Call thumbnail for effiency, calculate scale relative to whole slide + # Call thumbnail for efficiency, calculate scale relative to whole slide slide_thumbnail = np.asarray( slide.get_thumbnail((slide_dims[0] // scale, slide_dims[1] // scale)) ) diff --git a/GANDLF/losses/loss_interface.py b/GANDLF/losses/loss_interface.py index e8459f41d..90d29154a 100644 --- a/GANDLF/losses/loss_interface.py +++ b/GANDLF/losses/loss_interface.py @@ -26,7 +26,7 @@ def forward(self, prediction: torch.Tensor, target: torch.Tensor) -> torch.Tenso class AbstractSegmentationLoss(AbstractLossFunction): """ - Base class for loss funcions that are used for segmentation tasks. + Base class for loss functions that are used for segmentation tasks. """ def __init__(self, params: dict): @@ -43,7 +43,7 @@ def _compute_single_class_loss( def _optional_loss_operations(self, loss: torch.Tensor) -> torch.Tensor: """ - Perform addtional operations on the loss value. Defaults to identity operation. + Perform additional operations on the loss value. Defaults to identity operation. If needed, child classes can override this method. Useful in cases where for example, the loss value needs to log-transformed or clipped. """ diff --git a/GANDLF/metrics/segmentation.py b/GANDLF/metrics/segmentation.py index 82254079f..cd1a7637b 100644 --- a/GANDLF/metrics/segmentation.py +++ b/GANDLF/metrics/segmentation.py @@ -226,7 +226,7 @@ def _calculator_sensitivity_specificity( float, float: The sensitivity and specificity between the object(s) in ```inp``` and the object(s) in ```target```. """ # inMask is mask of input array equal to a certain tissue (ie. all one's in tumor core) - # Ref mask is mask of certain tissue in ground truth (ie. all one's in refernce core ) + # Ref mask is mask of certain tissue in ground truth (ie. all one's in reference core ) # overlap is mask where the two equal each other # They are of the total number of voxels of the ground truth brain mask diff --git a/GANDLF/optimizers/README.md b/GANDLF/optimizers/README.md index 8d1499fb3..fe2e8d917 100644 --- a/GANDLF/optimizers/README.md +++ b/GANDLF/optimizers/README.md @@ -8,7 +8,7 @@ - Add the relevant code under the `GANDLF.optimizers.thirdparty` submodule. - Add a wrapper which takes in GaNDLF's `parameter` dictionary as input and creates a `torch.optim.Optimizer` object as output. - Add the wrapper to the `GANDLF.optimizers.thirdparty.__init__.py` so that it can be called from `GANDLF.optimizers.__init__.py`. - - See `GANDLF.optimizers.thirdparty.adopy.py` as an example. + - See `GANDLF.optimizers.thirdparty.adopt.py` as an example. - If a new dependency needs to be used, update GaNDLF's [`setup.py`](https://github.com/mlcommons/GaNDLF/blob/master/setup.py) with the new requirement. - Define a new submodule under `GANDLF.optimizers` as `GANDLF.optimizers.wrap_${package_name}.py`. - Ensure that the new algorithm is wrapped in a function which returns an object with the PyTorch optimizer type. Use any of the optimizers in `GANDLF.optimizers.wrap_torch.py` as an example. diff --git a/docs/extending.md b/docs/extending.md index 929efa515..897bf1e9b 100644 --- a/docs/extending.md +++ b/docs/extending.md @@ -80,7 +80,7 @@ To update/change/add a dependency in [setup](https://github.com/mlcommons/GaNDLF ## Adding new CLI command Example: `gandlf config-generator` [CLI command](https://github.com/mlcommons/GaNDLF/blob/master/GANDLF/entrypoints/config_generator.py) - Implement function and wrap it with `@click.command()` + `@click.option()` -- Add it to `cli_subommands` [dict](https://github.com/mlcommons/GaNDLF/blob/master/GANDLF/entrypoints/subcommands.py) +- Add it to `cli_subcommands` [dict](https://github.com/mlcommons/GaNDLF/blob/master/GANDLF/entrypoints/subcommands.py) The command would be available under `gandlf your-subcommand-name` CLI command. ## Update parameters diff --git a/docs/faq.md b/docs/faq.md index 0bb98239d..62f8ae109 100644 --- a/docs/faq.md +++ b/docs/faq.md @@ -53,9 +53,9 @@ Please see https://mlcommons.github.io/GaNDLF/usage/#federating-your-model-evalu Please read the [migration guide](https://mlcommons.github.io/GaNDLF/migration_guide) to understand the changes that have been made to GaNDLF. If you have any questions, please feel free to [post a support request](https://github.com/mlcommons/GaNDLF/issues/new?assignees=&labels=&template=--questions-help-support.md&title=). -### I am getting an error realted to version mismatch (greater or smaller) between the configuration and GaNDLF version. What should I do? +### I am getting an error related to version mismatch (greater or smaller) between the configuration and GaNDLF version. What should I do? -This is a safety feature to ensure a tight integartion between the configuration used to define a model and the code version used to perform the training. Ensure that you have all requirements satisfied, and then check the ``version`` key in the configration, and ensure it appropriately matches the output of ``gandlf run --version``. +This is a safety feature to ensure a tight integration between the configuration used to define a model and the code version used to perform the training. Ensure that you have all requirements satisfied, and then check the ``version`` key in the configuration, and ensure it appropriately matches the output of ``gandlf run --version``. ### What if I have another question? diff --git a/docs/usage.md b/docs/usage.md index a3d7f0278..386adbde3 100644 --- a/docs/usage.md +++ b/docs/usage.md @@ -611,7 +611,7 @@ To upload to a dataset or a Space, use the --repo-type option: ### Huggingface Template For Upload #### Design and Modify Template -To design the huggingface template use the hugging_face.md file change the medatory field +To design the huggingface template use the hugging_face.md file change the mandatory field [REQUIRED_FOR_GANDLF] to it's respective name don't leave it blank other wise it may through error, other field can be modeified by the user as per his convenience ```bash diff --git a/testing/test_full.py b/testing/test_full.py index 50b628e76..eccf0b3c8 100644 --- a/testing/test_full.py +++ b/testing/test_full.py @@ -2110,7 +2110,7 @@ def test_generic_one_hot_logic(): def test_generic_anonymizer(): - print("33: Starting anomymizer tests") + print("33: Starting anonymizer tests") input_file = get_testdata_file("MR_small.dcm") output_file = os.path.join(outputDir, "MR_small_anonymized.dcm") @@ -3292,14 +3292,14 @@ def test_generic_logging(capsys): os.remove(log_file) - # test the stout info level. The stout must show only INFO messages - message = "Testing stout logging" + # test the stdout info level. The stdout must show only INFO messages + message = "Testing stdout logging" logging.info(message) capture = capsys.readouterr() assert message in capture.out - # Test the stout not showing other messages - message = "Testing stout logging" + # Test the stdout not showing other messages + message = "Testing stdout logging" logging.debug(message) logging.warning(message) logging.error(message) @@ -3307,14 +3307,14 @@ def test_generic_logging(capsys): capture = capsys.readouterr() assert message not in capture.out - # test sterr must NOT show these messages. - message = "Testing sterr logging" + # test stderr must NOT show these messages. + message = "Testing stderr logging" logging.info(message) logging.debug(message) capture = capsys.readouterr() assert message not in capture.err - # test sterr must show these messages. + # test stderr must show these messages. logging.error(message) logging.warning(message) logging.critical(message)