OXIESEC PANEL
- Current Dir:
/
/
snap
/
core24
/
988
/
usr
/
lib
/
python3
/
dist-packages
/
chardet
Server IP: 139.59.38.164
Upload:
Create Dir:
Name
Size
Modified
Perms
📁
..
-
05/04/2025 04:37:50 PM
rwxr-xr-x
📄
__init__.py
4.68 KB
08/20/2023 12:12:03 PM
rw-r--r--
📄
__main__.py
123 bytes
08/20/2023 12:12:03 PM
rw-r--r--
📁
__pycache__
-
05/04/2025 04:37:50 PM
rwxr-xr-x
📄
big5freq.py
30.54 KB
08/20/2023 12:12:03 PM
rw-r--r--
📄
big5prober.py
1.72 KB
08/20/2023 12:12:03 PM
rw-r--r--
📄
chardistribution.py
9.8 KB
08/20/2023 12:12:03 PM
rw-r--r--
📄
charsetgroupprober.py
3.82 KB
08/20/2023 12:12:03 PM
rw-r--r--
📄
charsetprober.py
5.29 KB
08/20/2023 12:12:03 PM
rw-r--r--
📁
cli
-
05/04/2025 04:37:50 PM
rwxr-xr-x
📄
codingstatemachine.py
3.64 KB
08/20/2023 12:12:03 PM
rw-r--r--
📄
codingstatemachinedict.py
542 bytes
08/20/2023 12:12:03 PM
rw-r--r--
📄
cp949prober.py
1.82 KB
08/20/2023 12:12:03 PM
rw-r--r--
📄
enums.py
1.64 KB
08/20/2023 12:12:03 PM
rw-r--r--
📄
escprober.py
3.91 KB
08/20/2023 12:12:03 PM
rw-r--r--
📄
escsm.py
11.89 KB
08/20/2023 12:12:03 PM
rw-r--r--
📄
eucjpprober.py
3.84 KB
08/20/2023 12:12:03 PM
rw-r--r--
📄
euckrfreq.py
13.25 KB
08/20/2023 12:12:03 PM
rw-r--r--
📄
euckrprober.py
1.71 KB
08/20/2023 12:12:03 PM
rw-r--r--
📄
euctwfreq.py
36.05 KB
08/20/2023 12:12:03 PM
rw-r--r--
📄
euctwprober.py
1.71 KB
08/20/2023 12:12:03 PM
rw-r--r--
📄
gb2312freq.py
20.25 KB
08/20/2023 12:12:03 PM
rw-r--r--
📄
gb2312prober.py
1.72 KB
08/20/2023 12:12:03 PM
rw-r--r--
📄
hebrewprober.py
14.2 KB
08/20/2023 12:12:03 PM
rw-r--r--
📄
jisfreq.py
25.19 KB
08/20/2023 12:12:03 PM
rw-r--r--
📄
johabfreq.py
41.5 KB
08/20/2023 12:12:03 PM
rw-r--r--
📄
johabprober.py
1.71 KB
08/20/2023 12:12:03 PM
rw-r--r--
📄
jpcntx.py
26.42 KB
08/20/2023 12:12:03 PM
rw-r--r--
📄
langbulgarianmodel.py
102.1 KB
08/20/2023 12:12:03 PM
rw-r--r--
📄
langgreekmodel.py
96.16 KB
08/20/2023 12:12:03 PM
rw-r--r--
📄
langhebrewmodel.py
95.88 KB
08/20/2023 12:12:03 PM
rw-r--r--
📄
langhungarianmodel.py
98.98 KB
08/20/2023 12:12:03 PM
rw-r--r--
📄
langrussianmodel.py
125.02 KB
08/20/2023 12:12:03 PM
rw-r--r--
📄
langthaimodel.py
100.35 KB
08/20/2023 12:12:03 PM
rw-r--r--
📄
langturkishmodel.py
93.13 KB
08/20/2023 12:12:03 PM
rw-r--r--
📄
latin1prober.py
5.25 KB
08/20/2023 12:12:03 PM
rw-r--r--
📄
macromanprober.py
5.93 KB
08/20/2023 12:12:03 PM
rw-r--r--
📄
mbcharsetprober.py
3.63 KB
08/20/2023 12:12:03 PM
rw-r--r--
📄
mbcsgroupprober.py
2.08 KB
08/20/2023 12:12:03 PM
rw-r--r--
📄
mbcssm.py
29.68 KB
08/20/2023 12:12:03 PM
rw-r--r--
📁
metadata
-
05/04/2025 04:37:50 PM
rwxr-xr-x
📄
py.typed
0 bytes
08/20/2023 12:12:03 PM
rw-r--r--
📄
resultdict.py
402 bytes
08/20/2023 12:12:03 PM
rw-r--r--
📄
sbcharsetprober.py
6.25 KB
08/20/2023 12:12:03 PM
rw-r--r--
📄
sbcsgroupprober.py
4.04 KB
08/20/2023 12:12:03 PM
rw-r--r--
📄
sjisprober.py
3.91 KB
08/20/2023 12:12:03 PM
rw-r--r--
📄
universaldetector.py
14.5 KB
08/20/2023 12:12:03 PM
rw-r--r--
📄
utf1632prober.py
8.31 KB
08/20/2023 12:12:03 PM
rw-r--r--
📄
utf8prober.py
2.75 KB
08/20/2023 12:12:03 PM
rw-r--r--
📄
version.py
244 bytes
08/20/2023 12:12:03 PM
rw-r--r--
Editing: charsetprober.py
Close
######################## BEGIN LICENSE BLOCK ######################## # The Original Code is Mozilla Universal charset detector code. # # The Initial Developer of the Original Code is # Netscape Communications Corporation. # Portions created by the Initial Developer are Copyright (C) 2001 # the Initial Developer. All Rights Reserved. # # Contributor(s): # Mark Pilgrim - port to Python # Shy Shalom - original C code # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### import logging import re from typing import Optional, Union from .enums import LanguageFilter, ProbingState INTERNATIONAL_WORDS_PATTERN = re.compile( b"[a-zA-Z]*[\x80-\xFF]+[a-zA-Z]*[^a-zA-Z\x80-\xFF]?" ) class CharSetProber: SHORTCUT_THRESHOLD = 0.95 def __init__(self, lang_filter: LanguageFilter = LanguageFilter.NONE) -> None: self._state = ProbingState.DETECTING self.active = True self.lang_filter = lang_filter self.logger = logging.getLogger(__name__) def reset(self) -> None: self._state = ProbingState.DETECTING @property def charset_name(self) -> Optional[str]: return None @property def language(self) -> Optional[str]: raise NotImplementedError def feed(self, byte_str: Union[bytes, bytearray]) -> ProbingState: raise NotImplementedError @property def state(self) -> ProbingState: return self._state def get_confidence(self) -> float: return 0.0 @staticmethod def filter_high_byte_only(buf: Union[bytes, bytearray]) -> bytes: buf = re.sub(b"([\x00-\x7F])+", b" ", buf) return buf @staticmethod def filter_international_words(buf: Union[bytes, bytearray]) -> bytearray: """ We define three types of bytes: alphabet: english alphabets [a-zA-Z] international: international characters [\x80-\xFF] marker: everything else [^a-zA-Z\x80-\xFF] The input buffer can be thought to contain a series of words delimited by markers. This function works to filter all words that contain at least one international character. All contiguous sequences of markers are replaced by a single space ascii character. This filter applies to all scripts which do not use English characters. """ filtered = bytearray() # This regex expression filters out only words that have at-least one # international character. The word may include one marker character at # the end. words = INTERNATIONAL_WORDS_PATTERN.findall(buf) for word in words: filtered.extend(word[:-1]) # If the last character in the word is a marker, replace it with a # space as markers shouldn't affect our analysis (they are used # similarly across all languages and may thus have similar # frequencies). last_char = word[-1:] if not last_char.isalpha() and last_char < b"\x80": last_char = b" " filtered.extend(last_char) return filtered @staticmethod def remove_xml_tags(buf: Union[bytes, bytearray]) -> bytes: """ Returns a copy of ``buf`` that retains only the sequences of English alphabet and high byte characters that are not between <> characters. This filter can be applied to all scripts which contain both English characters and extended ASCII characters, but is currently only used by ``Latin1Prober``. """ filtered = bytearray() in_tag = False prev = 0 buf = memoryview(buf).cast("c") for curr, buf_char in enumerate(buf): # Check if we're coming out of or entering an XML tag # https://github.com/python/typeshed/issues/8182 if buf_char == b">": # type: ignore[comparison-overlap] prev = curr + 1 in_tag = False # https://github.com/python/typeshed/issues/8182 elif buf_char == b"<": # type: ignore[comparison-overlap] if curr > prev and not in_tag: # Keep everything after last non-extended-ASCII, # non-alphabetic character filtered.extend(buf[prev:curr]) # Output a space to delimit stretch we kept filtered.extend(b" ") in_tag = True # If we're not in a tag... if not in_tag: # Keep everything after last non-extended-ASCII, non-alphabetic # character filtered.extend(buf[prev:]) return filtered