Skip to content

Commit

Permalink
publish
Browse files Browse the repository at this point in the history
  • Loading branch information
sudoskys committed May 14, 2024
1 parent 290c13c commit e13ab8c
Show file tree
Hide file tree
Showing 13 changed files with 34 additions and 12 deletions.
2 changes: 1 addition & 1 deletion playground/usage.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import io

from cleanse_speech import DLFA
from cleanse_speech.dlfa import DLFA

if __name__ == '__main__':
dfa = DLFA(words_resource=[
Expand Down
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[project]
name = "cleanse-speech"
version = "0.1.0"
version = "0.1.1"
description = "An intelligent Python library designed to detect and filter offensive language, ensuring respectful and constructive communication in any digital space."
authors = [
{ name = "llmkira", email = "me@dianas.cyou" },
Expand Down
7 changes: 0 additions & 7 deletions src/cleanse-speech/__init__.py

This file was deleted.

7 changes: 7 additions & 0 deletions src/cleanse_speech/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
from .bookshelf import SpamShelf
from .dlfa import DLFA

__all__ = [
'DLFA',
'SpamShelf',
]
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
3 changes: 0 additions & 3 deletions tests/__init__.py
Original file line number Diff line number Diff line change
@@ -1,3 +0,0 @@



25 changes: 25 additions & 0 deletions tests/test_dfa.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
import io

import pytest

from cleanse_speech import DLFA


@pytest.fixture
def test_censor():
dfa = DLFA(words_resource=[
['你好'],
io.BytesIO(b'sensitive'),
])
assert dfa.censor_text('This is a 你好 word.', 10) == 'This is a ** word.'
assert dfa.censor_text('This is a sensitive word.', 10) == 'This is a ********* word.'


def test_contains_illegal():
dfa = DLFA(words_resource=[
['你好'],
io.BytesIO(b'sensitive'),
])
assert dfa.contains_illegal('This is a 你好 word.')
assert dfa.contains_illegal('This is a sensitive word.')
assert not dfa.contains_illegal('This is a normal word.')

0 comments on commit e13ab8c

Please sign in to comment.