Tox and github actions. (#24)

This commit is contained in:
Julien Palard 2020-11-23 14:26:34 +01:00 committed by GitHub
parent f7b61e04d0
commit 3e4bb50687
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 222 additions and 61 deletions

42
.github/workflows/tests.yml vendored Normal file
View File

@ -0,0 +1,42 @@
name: Tests
on:
push:
branches:
- master
pull_request:
branches:
- master
jobs:
test:
name: Run tox
runs-on: ubuntu-latest
strategy:
matrix:
tox:
- py_version: '3.6'
env: py36
- py_version: '3.7'
env: py37
- py_version: '3.8'
env: py38,flake8,mypy,black,pylint,pydocstyle,coverage
- py_version: '3.9'
env: py39
steps:
- uses: actions/checkout@v2
- uses: actions/setup-python@v2
with:
python-version: ${{ matrix.tox.py_version }}
- uses: actions/cache@v2
with:
path: .tox
key: ${{ matrix.tox.python-version }}-${{ hashFiles('tox.ini') }}-${{ hashFiles('requirements-dev.txt') }}
- name: Install dependencies
run: |
sudo apt-get update
sudo apt-get install -y hunspell
- name: Install tox
run: python3 -m pip install tox
- name: Run tox
run: tox -q -p all -e ${{ matrix.tox.env }}

View File

@ -1,11 +1,11 @@
"""pospell is a spellcheckers for po files containing reStructuedText. """pospell is a spellcheckers for po files containing reStructuedText."""
"""
import io import io
from string import digits from string import digits
from unicodedata import category from unicodedata import category
import logging import logging
import subprocess import subprocess
import sys import sys
from typing import Dict
from contextlib import redirect_stderr from contextlib import redirect_stderr
from itertools import chain from itertools import chain
from pathlib import Path from pathlib import Path
@ -26,7 +26,11 @@ DEFAULT_DROP_CAPITALIZED = {"fr": True, "fr_FR": True}
class POSpellException(Exception): class POSpellException(Exception):
pass """All exceptions from this module inherit from this one."""
class Unreachable(POSpellException):
"""The code encontered a state that should be unreachable."""
try: try:
@ -39,10 +43,15 @@ except FileNotFoundError:
class DummyNodeClass(docutils.nodes.Inline, docutils.nodes.TextElement): class DummyNodeClass(docutils.nodes.Inline, docutils.nodes.TextElement):
pass """Used to represent any unknown roles, so we can parse any rst blindly."""
def monkey_patch_role(role): def monkey_patch_role(role):
"""Patch docutils.parsers.rst.roles.role so it always match.
Giving a DummyNodeClass for unknown roles.
"""
def role_or_generic(role_name, language_module, lineno, reporter): def role_or_generic(role_name, language_module, lineno, reporter):
base_role, message = role(role_name, language_module, lineno, reporter) base_role, message = role(role_name, language_module, lineno, reporter)
if base_role is None: if base_role is None:
@ -57,56 +66,65 @@ roles.role = monkey_patch_role(roles.role)
class NodeToTextVisitor(docutils.nodes.NodeVisitor): class NodeToTextVisitor(docutils.nodes.NodeVisitor):
"""Recursively convert a docutils node to a Python string.
Usage:
>>> visitor = NodeToTextVisitor(document)
>>> document.walk(visitor)
>>> print(str(visitor))
It ignores (see IGNORE_LIST) some nodes, which we don't want in
hunspell (enphasis typically contain proper names that are unknown
to dictionaires).
"""
IGNORE_LIST = (
"emphasis",
"superscript",
"title_reference",
"strong",
"DummyNodeClass",
"reference",
"literal",
"Text",
)
def __init__(self, document): def __init__(self, document):
"""Initialize visitor for the given node/document."""
self.output = [] self.output = []
self.depth = 0
super().__init__(document) super().__init__(document)
def dispatch_visit(self, node):
self.depth += 1
super().dispatch_visit(node)
def dispatch_departure(self, node):
self.depth -= 1
super().dispatch_departure(node)
def unknown_visit(self, node): def unknown_visit(self, node):
"""Mandatory implementation to visit unknwon nodes.""" """Mandatory implementation to visit unknwon nodes."""
# print(" " * self.depth * 4, node.__class__.__name__, ":", node)
def unknown_departure(self, node): @staticmethod
"""To help debugging tree.""" def ignore(node):
# print(node, repr(node), node.__class__.__name__) """Just raise SkipChildren.
def visit_emphasis(self, node): Used for all visit_* in the IGNORE_LIST.
See __getattr__.
"""
raise docutils.nodes.SkipChildren raise docutils.nodes.SkipChildren
def visit_superscript(self, node): def __getattr__(self, name):
raise docutils.nodes.SkipChildren """Skip childrens from the IGNORE_LIST."""
if name.startswith("visit_") and name[6:] in self.IGNORE_LIST:
def visit_title_reference(self, node): return self.ignore
raise docutils.nodes.SkipChildren raise AttributeError(name)
def visit_strong(self, node):
raise docutils.nodes.SkipChildren
def visit_DummyNodeClass(self, node):
raise docutils.nodes.SkipChildren
def visit_reference(self, node):
raise docutils.nodes.SkipChildren
def visit_literal(self, node):
raise docutils.nodes.SkipChildren
def visit_Text(self, node): def visit_Text(self, node):
"""Keep this node text, this is typically what we want to spell check."""
self.output.append(node.rawsource) self.output.append(node.rawsource)
def __str__(self): def __str__(self):
"""Give the accumulated strings."""
return " ".join(self.output) return " ".join(self.output)
def strip_rst(line): def strip_rst(line):
"""Transform reStructuredText to plain text."""
if line.endswith("::"): if line.endswith("::"):
# Drop :: at the end, it would cause Literal block expected # Drop :: at the end, it would cause Literal block expected
line = line[:-2] line = line[:-2]
@ -175,11 +193,13 @@ def clear(line, drop_capitalized=False, po_path=""):
def quote_for_hunspell(text): def quote_for_hunspell(text):
""" """Quote a paragraph so hunspell don't misinterpret it.
Quoting the manpage: Quoting the manpage:
It is recommended that programmatic interfaces prefix It is recommended that programmatic interfaces prefix
every data line with an uparrow to protect themselves every data line with an uparrow to protect themselves
against future changes in hunspell.""" against future changes in hunspell.
"""
out = [] out = []
for line in text.split("\n"): for line in text.split("\n"):
out.append("^" + line if line else "") out.append("^" + line if line else "")
@ -187,9 +207,10 @@ def quote_for_hunspell(text):
def po_to_text(po_path, drop_capitalized=False): def po_to_text(po_path, drop_capitalized=False):
"""Converts a po file to a text file, by stripping the msgids and all """Convert a po file to a text file.
po syntax, but by keeping the kept lines at their same position /
line number. This strips the msgids and all po syntax while keeping lines at
their same position / line number.
""" """
buffer = [] buffer = []
lines = 0 lines = 0
@ -232,12 +253,14 @@ def parse_args():
parser.add_argument( parser.add_argument(
"--drop-capitalized", "--drop-capitalized",
action="store_true", action="store_true",
help="Always drop capitalized words in sentences (defaults according to the language).", help="Always drop capitalized words in sentences"
" (defaults according to the language).",
) )
parser.add_argument( parser.add_argument(
"--no-drop-capitalized", "--no-drop-capitalized",
action="store_true", action="store_true",
help="Never drop capitalized words in sentences (defaults according to the language).", help="Never drop capitalized words in sentences"
" (defaults according to the language).",
) )
parser.add_argument( parser.add_argument(
"po_file", "po_file",
@ -275,7 +298,9 @@ def parse_args():
def look_like_a_word(word): def look_like_a_word(word):
"""Used to filter out non-words like `---` or `-0700` so they don't """Return True if the given str looks like a word.
Used to filter out non-words like `---` or `-0700` so they don't
get reported. They typically are not errors. get reported. They typically are not errors.
""" """
if not word: if not word:
@ -296,13 +321,13 @@ def spell_check(
drop_capitalized=False, drop_capitalized=False,
debug_only=False, debug_only=False,
): ):
"""Check for spelling mistakes in the files po_files (po format, """Check for spelling mistakes in the given po_files.
containing restructuredtext), for the given language.
(po format, containing restructuredtext), for the given language.
personal_dict allow to pass a personal dict (-p) option, to hunspell. personal_dict allow to pass a personal dict (-p) option, to hunspell.
Debug only will show what's passed to Hunspell instead of passing it. Debug only will show what's passed to Hunspell instead of passing it.
""" """
errors = []
personal_dict_arg = ["-p", personal_dict] if personal_dict else [] personal_dict_arg = ["-p", personal_dict] if personal_dict else []
texts_for_hunspell = {} texts_for_hunspell = {}
for po_file in po_files: for po_file in po_files:
@ -310,32 +335,48 @@ def spell_check(
print(po_to_text(str(po_file), drop_capitalized)) print(po_to_text(str(po_file), drop_capitalized))
continue continue
texts_for_hunspell[po_file] = po_to_text(str(po_file), drop_capitalized) texts_for_hunspell[po_file] = po_to_text(str(po_file), drop_capitalized)
if debug_only:
return 0
try: try:
output = subprocess.run( output = subprocess.run(
["hunspell", "-d", language, "-a"] + personal_dict_arg, ["hunspell", "-d", language, "-a"] + personal_dict_arg,
universal_newlines=True, universal_newlines=True,
input=quote_for_hunspell("\n".join(texts_for_hunspell.values())), input=quote_for_hunspell("\n".join(texts_for_hunspell.values())),
stdout=subprocess.PIPE, stdout=subprocess.PIPE,
check=True,
) )
except subprocess.CalledProcessError: except subprocess.CalledProcessError:
return -1 return -1
return parse_hunspell_output(texts_for_hunspell, output)
def parse_hunspell_output(hunspell_input: Dict[str, str], hunspell_output) -> int:
"""Parse `hunspell -a` output.
Print one line per error on stderr, of the following format:
FILE:LINE:ERROR
Returns the number of errors.
hunspell_input contains a dict of files: all_lines_for_this_file.
"""
errors = 0 errors = 0
checked_files = iter(texts_for_hunspell.items()) checked_files = iter(hunspell_input.items())
checked_file_name, checked_text = next(checked_files) checked_file_name, checked_text = next(checked_files)
checked_lines = iter(checked_text.split("\n")) checked_lines = iter(checked_text.split("\n"))
currently_checked_line = next(checked_lines) next(checked_lines)
current_line_number = 1 current_line_number = 1
for line in output.stdout.split("\n")[1:]: for line in hunspell_output.stdout.split("\n")[1:]:
if not line: if not line:
try: try:
currently_checked_line = next(checked_lines) next(checked_lines)
current_line_number += 1 current_line_number += 1
except StopIteration: except StopIteration:
try: try:
checked_file_name, checked_text = next(checked_files) checked_file_name, checked_text = next(checked_files)
checked_lines = iter(checked_text.split("\n")) checked_lines = iter(checked_text.split("\n"))
currently_checked_line = next(checked_lines) next(checked_lines)
current_line_number = 1 current_line_number = 1
except StopIteration: except StopIteration:
return errors return errors
@ -343,10 +384,11 @@ def spell_check(
if line == "*": # OK if line == "*": # OK
continue continue
if line[0] == "&": if line[0] == "&":
_, original, count, offset, *miss = line.split() _, original, *_ = line.split()
if look_like_a_word(original): if look_like_a_word(original):
print(checked_file_name, current_line_number, original, sep=":") print(checked_file_name, current_line_number, original, sep=":")
errors += 1 errors += 1
raise Unreachable("Got this one? I'm sorry, read XKCD 2200, then open an issue.")
def gracefull_handling_of_missing_dicts(language): def gracefull_handling_of_missing_dicts(language):
@ -384,7 +426,7 @@ https://github.com/JulienPalard/pospell/) so I can enhance this error message.
def main(): def main():
"""Module entry point.""" """Entry point (for command-line)."""
args = parse_args() args = parse_args()
logging.basicConfig(level=50 - 10 * args.verbose) logging.basicConfig(level=50 - 10 * args.verbose)
default_drop_capitalized = DEFAULT_DROP_CAPITALIZED.get(args.language, False) default_drop_capitalized = DEFAULT_DROP_CAPITALIZED.get(args.language, False)

3
pyproject.toml Normal file
View File

@ -0,0 +1,3 @@
[build-system]
requires = ["setuptools", "wheel"]
build-backend = "setuptools.build_meta"

View File

@ -1,6 +1,8 @@
bandit bandit
black black
coverage
flake8 flake8
isort isort
mypy mypy
pylint pylint
pytest

View File

@ -6,28 +6,36 @@
# #
appdirs==1.4.4 # via black appdirs==1.4.4 # via black
astroid==2.4.2 # via pylint astroid==2.4.2 # via pylint
attrs==20.3.0 # via pytest
bandit==1.6.2 # via -r requirements-dev.in bandit==1.6.2 # via -r requirements-dev.in
black==20.8b1 # via -r requirements-dev.in black==20.8b1 # via -r requirements-dev.in
click==7.1.2 # via black click==7.1.2 # via black
coverage==5.3 # via -r requirements-dev.in
flake8==3.8.4 # via -r requirements-dev.in flake8==3.8.4 # via -r requirements-dev.in
gitdb==4.0.5 # via gitpython gitdb==4.0.5 # via gitpython
gitpython==3.1.11 # via bandit gitpython==3.1.11 # via bandit
iniconfig==1.1.1 # via pytest
isort==5.6.4 # via -r requirements-dev.in, pylint isort==5.6.4 # via -r requirements-dev.in, pylint
lazy-object-proxy==1.4.3 # via astroid lazy-object-proxy==1.4.3 # via astroid
mccabe==0.6.1 # via flake8, pylint mccabe==0.6.1 # via flake8, pylint
mypy-extensions==0.4.3 # via black, mypy mypy-extensions==0.4.3 # via black, mypy
mypy==0.790 # via -r requirements-dev.in mypy==0.790 # via -r requirements-dev.in
packaging==20.4 # via pytest
pathspec==0.8.1 # via black pathspec==0.8.1 # via black
pbr==5.5.1 # via stevedore pbr==5.5.1 # via stevedore
pluggy==0.13.1 # via pytest
py==1.9.0 # via pytest
pycodestyle==2.6.0 # via flake8 pycodestyle==2.6.0 # via flake8
pyflakes==2.2.0 # via flake8 pyflakes==2.2.0 # via flake8
pylint==2.6.0 # via -r requirements-dev.in pylint==2.6.0 # via -r requirements-dev.in
pyparsing==2.4.7 # via packaging
pytest==6.1.2 # via -r requirements-dev.in
pyyaml==5.3.1 # via bandit pyyaml==5.3.1 # via bandit
regex==2020.11.13 # via black regex==2020.11.13 # via black
six==1.15.0 # via astroid, bandit six==1.15.0 # via astroid, bandit, packaging
smmap==3.0.4 # via gitdb smmap==3.0.4 # via gitdb
stevedore==3.2.2 # via bandit stevedore==3.2.2 # via bandit
toml==0.10.2 # via black, pylint toml==0.10.2 # via black, pylint, pytest
typed-ast==1.4.1 # via black, mypy typed-ast==1.4.1 # via black, mypy
typing-extensions==3.7.4.3 # via black, mypy typing-extensions==3.7.4.3 # via black, mypy
wrapt==1.12.1 # via astroid wrapt==1.12.1 # via astroid

View File

@ -35,8 +35,6 @@ classifiers =
License :: OSI Approved :: MIT License License :: OSI Approved :: MIT License
Natural Language :: English Natural Language :: English
Programming Language :: Python :: 3 Programming Language :: Python :: 3
Programming Language :: Python :: 3.5
Programming Language :: Python :: 3.6
[options] [options]
py_modules = pospell py_modules = pospell

View File

@ -1,2 +1,2 @@
msgid "Rendez-vous à 10h chez Murex" msgid "Rendez-vous à 10h à la fête"
msgstr "See your at 10h at Murex" msgstr "See your at 10h at the party"

View File

@ -1,5 +1,3 @@
import os
from types import SimpleNamespace
from pathlib import Path from pathlib import Path
import pytest import pytest

68
tox.ini Normal file
View File

@ -0,0 +1,68 @@
[flake8]
;E203 for black (whitespace before : in slices), and F811 for @overload
ignore = E203, F811
max-line-length = 88
[coverage:run]
; branch = true: would need a lot of pragma: no branch on infinite loops.
parallel = true
omit =
.tox/*
[coverage:report]
skip_covered = True
show_missing = True
exclude_lines =
pragma: no cover
def __repr__
if self\.debug
raise AssertionError
raise NotImplementedError
if __name__ == .__main__.:
[tox]
envlist = py36, py37, py38, py39, flake8, mypy, black, pylint, pydocstyle, coverage
isolated_build = True
skip_missing_interpreters = True
[testenv]
deps = -r requirements-dev.txt
commands = coverage run -m pytest
setenv =
COVERAGE_FILE={toxworkdir}/.coverage.{envname}
[testenv:coverage]
depends = py36, py37, py38, py39
parallel_show_output = True
deps = coverage
skip_install = True
setenv = COVERAGE_FILE={toxworkdir}/.coverage
commands =
coverage combine
coverage report --fail-under 65
[testenv:flake8]
deps = flake8
skip_install = True
commands = flake8 tests/ pospell.py
[testenv:black]
deps = black
skip_install = True
commands = black --check --diff tests/ pospell.py
[testenv:mypy]
deps = mypy
skip_install = True
commands = mypy --ignore-missing-imports pospell.py
[testenv:pylint]
deps = pylint
commands = pylint --disable import-outside-toplevel,invalid-name pospell.py
[testenv:pydocstyle]
deps = pydocstyle
skip_install = True
commands = pydocstyle pospell.py