cengal.web_tools.detect_browsers_language.by_http_headers.versions.v_0.by_http_headers
1#!/usr/bin/env python 2# coding=utf-8 3 4# Copyright © 2012-2024 ButenkoMS. All rights reserved. Contacts: <gtalk@butenkoms.space> 5# 6# Licensed under the Apache License, Version 2.0 (the "License"); 7# you may not use this file except in compliance with the License. 8# You may obtain a copy of the License at 9# 10# http://www.apache.org/licenses/LICENSE-2.0 11# 12# Unless required by applicable law or agreed to in writing, software 13# distributed under the License is distributed on an "AS IS" BASIS, 14# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15# See the License for the specific language governing permissions and 16# limitations under the License. 17 18 19__all__ = ['parse_accept_language', 'normalize_lang_from_parts', 'normalize_lang', 'optimize_accept_language', 20 'match_langs'] 21 22 23""" 24Module Docstring 25Docstrings: http://www.python.org/dev/peps/pep-0257/ 26""" 27 28__author__ = "ButenkoMS <gtalk@butenkoms.space>" 29__copyright__ = "Copyright © 2012-2024 ButenkoMS. All rights reserved. Contacts: <gtalk@butenkoms.space>" 30__credits__ = ["ButenkoMS <gtalk@butenkoms.space>", ] 31__license__ = "Apache License, Version 2.0" 32__version__ = "4.4.1" 33__maintainer__ = "ButenkoMS <gtalk@butenkoms.space>" 34__email__ = "gtalk@butenkoms.space" 35# __status__ = "Prototype" 36__status__ = "Development" 37# __status__ = "Production" 38 39 40from cengal.text_processing.text_processing import removeprefix 41from cengal.math.numbers import RationalNumber 42from collections import OrderedDict 43from typing import Mapping, Optional, List, Tuple, OrderedDict, Dict, Set 44 45 46def parse_accept_language(headers: Mapping) -> Optional[OrderedDict[str, RationalNumber]]: 47 accept_language_str = headers.get('Accept-Language'.casefold(), None) 48 if accept_language_str is None: 49 return None 50 51 accept_lang_list = accept_language_str.split(',') 52 result_list = list() 53 for lang in accept_lang_list: 54 lang: str = lang.strip() 55 weight: Optional[RationalNumber] = None 56 if ';' in lang: 57 lang, weight = lang.split(';') 58 lang = lang.strip() 59 weight = weight.strip() 60 weight = removeprefix(weight, 'q=') 61 weight = weight.strip() 62 try: 63 weight = float(weight) 64 except ValueError: 65 weight = 0 66 else: 67 weight = 1 68 69 result_list.append((lang, weight)) 70 71 result_list.sort(key=lambda x: x[1], reverse=True) 72 return OrderedDict(result_list) 73 74 75def normalize_lang_from_parts(main_lang: str, sub_lang: Optional[str]) -> str: 76 if sub_lang: 77 return f'{main_lang}-{sub_lang.upper()}' 78 else: 79 return main_lang 80 81 82def normalize_lang(lang: str) -> str: 83 if '-' in lang: 84 main_lang, sub_lang = lang.split('-') 85 main_lang = main_lang.strip() 86 sub_lang = sub_lang.strip() 87 lang = f'{main_lang}-{sub_lang.upper()}' 88 else: 89 lang = lang.strip() 90 91 return lang 92 93 94def optimize_accept_language(parsed_accept_language: Optional[OrderedDict[str, RationalNumber]]) -> Optional[OrderedDict[str, RationalNumber]]: 95 if parsed_accept_language is None: 96 return None 97 98 result = OrderedDict() 99 for lang, weight in parsed_accept_language.items(): 100 lang = lang.casefold() 101 if '-' in lang: 102 main_lang, sub_lang = lang.split('-') 103 main_lang = main_lang.strip() 104 sub_lang = sub_lang.strip() 105 lang = f'{main_lang}-{sub_lang.upper()}' 106 result[lang] = weight 107 if main_lang in parsed_accept_language: 108 pass 109 else: 110 result[main_lang] = weight 111 else: 112 result[lang] = weight 113 114 return result 115 116 117def match_langs(default_lang: str, 118 featured_langs: Set[str], 119 supported_langs: Set[str], 120 languages_mapping: Dict[str, str], 121 parsed_accept_language: Optional[OrderedDict[str, RationalNumber]] 122 ) -> Tuple[str, OrderedDict[str, RationalNumber], OrderedDict[str, RationalNumber]]: 123 better_lang: str = None 124 featured_result = OrderedDict() 125 result = OrderedDict() 126 if parsed_accept_language is not None: 127 for lang, weight in parsed_accept_language.items(): 128 if lang in languages_mapping: 129 lang = languages_mapping[lang] 130 131 if lang in featured_langs: 132 featured_result[lang] = weight 133 elif lang in supported_langs: 134 result[lang] = weight 135 else: 136 pass 137 138 if featured_result: 139 for lang, weight in featured_result.items(): 140 better_lang = lang 141 break 142 elif result: 143 for lang, weight in result.items(): 144 better_lang = lang 145 break 146 else: 147 better_lang = default_lang 148 149 return better_lang, featured_result, result
def
parse_accept_language(headers: Mapping) -> Union[OrderedDict[str, Union[int, float]], NoneType]:
47def parse_accept_language(headers: Mapping) -> Optional[OrderedDict[str, RationalNumber]]: 48 accept_language_str = headers.get('Accept-Language'.casefold(), None) 49 if accept_language_str is None: 50 return None 51 52 accept_lang_list = accept_language_str.split(',') 53 result_list = list() 54 for lang in accept_lang_list: 55 lang: str = lang.strip() 56 weight: Optional[RationalNumber] = None 57 if ';' in lang: 58 lang, weight = lang.split(';') 59 lang = lang.strip() 60 weight = weight.strip() 61 weight = removeprefix(weight, 'q=') 62 weight = weight.strip() 63 try: 64 weight = float(weight) 65 except ValueError: 66 weight = 0 67 else: 68 weight = 1 69 70 result_list.append((lang, weight)) 71 72 result_list.sort(key=lambda x: x[1], reverse=True) 73 return OrderedDict(result_list)
def
normalize_lang_from_parts(main_lang: str, sub_lang: Union[str, NoneType]) -> str:
def
normalize_lang(lang: str) -> str:
def
optimize_accept_language( parsed_accept_language: Union[OrderedDict[str, Union[int, float]], NoneType]) -> Union[OrderedDict[str, Union[int, float]], NoneType]:
95def optimize_accept_language(parsed_accept_language: Optional[OrderedDict[str, RationalNumber]]) -> Optional[OrderedDict[str, RationalNumber]]: 96 if parsed_accept_language is None: 97 return None 98 99 result = OrderedDict() 100 for lang, weight in parsed_accept_language.items(): 101 lang = lang.casefold() 102 if '-' in lang: 103 main_lang, sub_lang = lang.split('-') 104 main_lang = main_lang.strip() 105 sub_lang = sub_lang.strip() 106 lang = f'{main_lang}-{sub_lang.upper()}' 107 result[lang] = weight 108 if main_lang in parsed_accept_language: 109 pass 110 else: 111 result[main_lang] = weight 112 else: 113 result[lang] = weight 114 115 return result
def
match_langs( default_lang: str, featured_langs: Set[str], supported_langs: Set[str], languages_mapping: Dict[str, str], parsed_accept_language: Union[OrderedDict[str, Union[int, float]], NoneType]) -> Tuple[str, OrderedDict[str, Union[int, float]], OrderedDict[str, Union[int, float]]]:
118def match_langs(default_lang: str, 119 featured_langs: Set[str], 120 supported_langs: Set[str], 121 languages_mapping: Dict[str, str], 122 parsed_accept_language: Optional[OrderedDict[str, RationalNumber]] 123 ) -> Tuple[str, OrderedDict[str, RationalNumber], OrderedDict[str, RationalNumber]]: 124 better_lang: str = None 125 featured_result = OrderedDict() 126 result = OrderedDict() 127 if parsed_accept_language is not None: 128 for lang, weight in parsed_accept_language.items(): 129 if lang in languages_mapping: 130 lang = languages_mapping[lang] 131 132 if lang in featured_langs: 133 featured_result[lang] = weight 134 elif lang in supported_langs: 135 result[lang] = weight 136 else: 137 pass 138 139 if featured_result: 140 for lang, weight in featured_result.items(): 141 better_lang = lang 142 break 143 elif result: 144 for lang, weight in result.items(): 145 better_lang = lang 146 break 147 else: 148 better_lang = default_lang 149 150 return better_lang, featured_result, result