Fix bug 585: updates from Semenichenko Anna based on teacher project

Bug585-semenichenko-clean
vankq 2025-06-12 13:51:39 +08:00
parent d9512c929b
commit aa5ff0d0c1
9 changed files with 1584 additions and 0 deletions

166
Article.py Normal file
View File

@ -0,0 +1,166 @@
from WordFreq import WordFreq
from wordfreqCMD import youdao_link, sort_in_descending_order
import pickle_idea, pickle_idea2
import os
import random, glob
import hashlib
from datetime import datetime
from flask import Flask, request, redirect, render_template, url_for, session, abort, flash, get_flashed_messages
from difficulty import get_difficulty_level_for_user, text_difficulty_level, user_difficulty_level
from model.article import get_all_articles, get_article_by_id, get_number_of_articles
import logging
import re
path_prefix = './'
db_path_prefix = './db/' # comment this line in deployment
oxford_words_path='C:\\Users\\ANNA\\Desktop\\ooad\\app\\db\\oxford_words.txt'
def count_oxford_words(text, oxford_words):
words = re.findall(r'\b\w+\b', text.lower())
total_words = len(words)
oxford_word_count = sum(1 for word in words if word in oxford_words)
return oxford_word_count, total_words
def calculate_ratio(oxford_word_count, total_words):
if total_words == 0:
return 0
return oxford_word_count / total_words
def load_oxford_words(file_path):
oxford_words = {}
with open(file_path, 'r', encoding='utf-8') as file:
for line in file:
parts = line.strip().split()
word = parts[0]
pos = parts[1]
level = parts[2]
oxford_words[word] = {'pos': pos, 'level': level}
return oxford_words
def total_number_of_essays():
return get_number_of_articles()
def get_article_title(s):
return s.split('\n')[0]
def get_article_body(s):
lst = s.split('\n')
lst.pop(0) # remove the first line
return '\n'.join(lst)
def get_today_article(user_word_list, visited_articles):
if visited_articles is None:
visited_articles = {
"index" : 0, # 为 article_ids 的索引
"article_ids": [] # 之前显示文章的id列表越后越新
}
if visited_articles["index"] > len(visited_articles["article_ids"])-1: # 生成新的文章,因此查找所有的文章
result = get_all_articles()
else: # 生成阅读过的文章,因此查询指定 article_id 的文章
if visited_articles["article_ids"][visited_articles["index"]] == 'null': # 可能因为直接刷新页面导致直接去查询了'null',因此当刷新的页面的时候,需要直接进行“上一篇”操作
visited_articles["index"] -= 1
visited_articles["article_ids"].pop()
article_id = visited_articles["article_ids"][visited_articles["index"]]
result = get_article_by_id(article_id)
random.shuffle(result)
# Choose article according to reader's level
logging.debug('* get_today_article(): start d1 = ... ')
d1 = load_freq_history(user_word_list)
d2 = load_freq_history(path_prefix + 'static/words_and_tests.p')
logging.debug(' ... get_today_article(): get_difficulty_level_for_user() start')
d3 = get_difficulty_level_for_user(d1, d2)
logging.debug(' ... get_today_article(): done')
d = None
result_of_generate_article = "not found"
d_user = load_freq_history(user_word_list)
logging.debug('* get_today_article(): user_difficulty_level() start')
user_level = user_difficulty_level(d_user, d3) # more consideration as user's behaviour is dynamic. Time factor should be considered.
logging.debug('* get_today_article(): done')
text_level = 0
if visited_articles["index"] > len(visited_articles["article_ids"])-1: # 生成新的文章
amount_of_visited_articles = len(visited_articles["article_ids"])
amount_of_existing_articles = result.__len__()
if amount_of_visited_articles == amount_of_existing_articles: # 如果当前阅读过的文章的数量 == 存在的文章的数量,即所有的书本都阅读过了
result_of_generate_article = "had read all articles"
else:
for k in range(3): # 最多尝试3次
for reading in result:
text_level = text_difficulty_level(reading['text'], d3)
factor = random.gauss(0.8, 0.1) # a number drawn from Gaussian distribution with a mean of 0.8 and a stand deviation of 1
if reading['article_id'] not in visited_articles["article_ids"] and within_range(text_level, user_level, (8.0 - user_level) * factor): # 新的文章之前没有出现过且符合一定范围的水平
d = reading
visited_articles["article_ids"].append(d['article_id']) # 列表添加新的文章id下面进行
result_of_generate_article = "found"
break
if result_of_generate_article == "found": # 用于成功找到文章后及时退出外层循环
break
if result_of_generate_article != "found": # 阅读完所有文章或者循环3次没有找到适合的文章则放入空“null”
visited_articles["article_ids"].append('null')
else: # 生成已经阅读过的文章
d = random.choice(result)
text_level = text_difficulty_level(d['text'], d3)
result_of_generate_article = "found"
today_article = None
if d:
oxford_words = load_oxford_words(oxford_words_path)
oxford_word_count, total_words = count_oxford_words(d['text'],oxford_words)
ratio = calculate_ratio(oxford_word_count,total_words)
today_article = {
"user_level": '%4.1f' % user_level,
"text_level": '%4.1f' % text_level,
"date": d['date'],
"article_title": get_article_title(d['text']),
"article_body": get_article_body(d['text']),
"source": d["source"],
"question": get_question_part(d['question']),
"answer": get_answer_part(d['question']),
"ratio" : ratio
}
return visited_articles, today_article, result_of_generate_article
def load_freq_history(path):
d = {}
if os.path.exists(path):
d = pickle_idea.load_record(path)
return d
def within_range(x, y, r):
return x > y and abs(x - y) <= r
def get_question_part(s):
s = s.strip()
result = []
flag = 0
for line in s.split('\n'):
line = line.strip()
if line == 'QUESTION':
result.append(line)
flag = 1
elif line == 'ANSWER':
flag = 0
elif flag == 1:
result.append(line)
return '\n'.join(result)
def get_answer_part(s):
s = s.strip()
result = []
flag = 0
for line in s.split('\n'):
line = line.strip()
if line == 'ANSWER':
flag = 1
elif flag == 1:
result.append(line)
return '\n'.join(result)

128
Login.py Normal file
View File

@ -0,0 +1,128 @@
import hashlib
import string
from datetime import datetime, timedelta
import unicodedata
def md5(s):
'''
MD5摘要
:param str: 字符串
:return: 经MD5以后的字符串
'''
h = hashlib.md5(s.encode(encoding='utf-8'))
return h.hexdigest()
path_prefix = '/var/www/wordfreq/wordfreq/'
path_prefix = './' # comment this line in deployment
def verify_user(username, password):
from model.user import get_user_by_username
user = get_user_by_username(username)
encoded_password = md5(username + password)
return user is not None and user.password == encoded_password
def add_user(username, password):
from model.user import insert_user
start_date = datetime.now().strftime('%Y%m%d')
expiry_date = (datetime.now() + timedelta(days=30)).strftime('%Y%m%d')
password = md5(username + password)
insert_user(username=username, password=password, start_date=start_date, expiry_date=expiry_date)
def check_username_availability(username):
from model.user import get_user_by_username
existed_user = get_user_by_username(username)
return existed_user is None
def change_password(username, old_password, new_password):
'''
修改密码
:param username: 用户名
:param old_password: 旧的密码
:param new_password: 新密码
:return: 修改成功:True 否则:False
'''
if not verify_user(username, old_password): # 旧密码错误
return {'error':'Old password is wrong.', 'username':username}
# 将用户名和密码一起加密,以免暴露不同用户的相同密码
if new_password == old_password: #新旧密码一致
return {'error':'New password cannot be the same as the old password.', 'username':username}
from model.user import update_password_by_username
update_password_by_username(username, new_password)
return {'success':'Password changed', 'username':username}
def get_expiry_date(username):
from model.user import get_user_by_username
user = get_user_by_username(username)
if user is None:
return '20191024'
else:
return user.expiry_date
class UserName:
def __init__(self, username):
self.username = username
def contains_chinese(self):
for char in self.username:
# Check if the character is in the CJK (Chinese, Japanese, Korean) Unicode block
if unicodedata.name(char).startswith('CJK UNIFIED IDEOGRAPH'):
return True
return False
def validate(self):
if len(self.username) > 20:
return f'{self.username} is too long. The user name cannot exceed 20 characters.'
if self.username.startswith('.'): # a user name must not start with a dot
return 'Period (.) is not allowed as the first letter in the user name.'
if ' ' in self.username: # a user name must not include a whitespace
return 'Whitespace is not allowed in the user name.'
for c in self.username: # a user name must not include special characters, except non-leading periods or underscores
if c in string.punctuation and c != '.' and c != '_':
return f'{c} is not allowed in the user name.'
if self.username in ['signup', 'login', 'logout', 'reset', 'mark', 'back', 'unfamiliar', 'familiar', 'del',
'admin']:
return 'You used a restricted word as your user name. Please come up with a better one.'
if self.contains_chinese():
return 'Chinese characters are not allowed in the user name.'
return 'OK'
class Password:
def __init__(self, password):
self.password = password
def contains_chinese(self):
for char in self.password:
# Check if the character is in the CJK (Chinese, Japanese, Korean) Unicode block
if unicodedata.name(char).startswith('CJK UNIFIED IDEOGRAPH'):
return True
return False
def validate(self):
if len(self.password) < 4:
return 'Password must be at least 4 characters long.'
if ' ' in self.password:
return 'Password cannot contain spaces.'
if self.contains_chinese():
return 'Chinese characters are not allowed in the password.'
return 'OK'
class WarningMessage:
def __init__(self, s, type='username'):
self.s = s
self.type = type
def __str__(self):
if self.type == 'username':
return UserName(self.s).validate()
if self.type == 'password':
return Password(self.s).validate()

34
create_pickle.py Normal file
View File

@ -0,0 +1,34 @@
import pickle
import os
# Sample vocabulary data - simulating a user's word history
# Format: word -> list of dates when the word was studied
test_data = {
"hello": ["20240101"],
"world": ["20240101", "20240102"],
"computer": ["20240101", "20240103"],
"programming": ["20240102"],
"python": ["20240102", "20240103"],
"algorithm": ["20240103"],
"database": ["20240103"],
"interface": ["20240104"],
"vocabulary": ["20240104"],
"sophisticated": ["20240104"]
}
# Ensure frequency directory exists
base_path = r'C:\Users\ANNA\Desktop\app\static\frequency'
os.makedirs(base_path, exist_ok=True)
# Save the test data
file_path = os.path.join(base_path, 'mr1an85.pickle')
with open(file_path, 'wb') as f:
pickle.dump(test_data, f)
print(f"Test file created at: {file_path}")
# Verify the file was created and can be read
with open(file_path, 'rb') as f:
loaded_data = pickle.load(f)
print("\nVerifying data:")
print(loaded_data)

531
difficulty.py Normal file
View File

@ -0,0 +1,531 @@
###########################################################################
# Copyright 2019 (C) Hui Lan <hui.lan@cantab.net>
# Written permission must be obtained from the author for commercial uses.
###########################################################################
# Purpose: compute difficulty level of an English text (Refactored with OO Design)
import pickle
import math
from wordfreqCMD import remove_punctuation, freq, sort_in_descending_order, sort_in_ascending_order, map_percentages_to_levels
import snowballstemmer
import os
import string
class DifficultyEstimator:
"""
A class to estimate the difficulty level of English words and texts.
"""
def __init__(self, pickle_fname=None):
"""
Initialize the DifficultyEstimator with pre-computed difficulty levels
:param pickle_fname: Path to the pickle file containing word test data
"""
self.word_difficulty_dict = {} # Stores pre-computed difficulty levels
self.stemmer = snowballstemmer.stemmer('english')
self.stop_words = {
'the', 'and', 'of', 'to', 'what', 'in', 'there', 'when', 'them',
'would', 'will', 'out', 'his', 'mr', 'that', 'up', 'more', 'your'
# ... add other stop words ...
}
# Pre-compute difficulty levels if pickle file is provided
if pickle_fname:
self._initialize_difficulty_levels(pickle_fname)
def _initialize_difficulty_levels(self, pickle_fname):
"""
Load word data and pre-compute all difficulty levels
:param pickle_fname: Path to the pickle file
"""
try:
with open(pickle_fname, 'rb') as f:
word_data = pickle.load(f)
self._compute_difficulty_levels(word_data)
except FileNotFoundError:
print(f"Warning: Could not find difficulty data file: {pickle_fname}")
def _compute_difficulty_levels(self, word_data):
"""
Pre-compute difficulty levels for all words
:param word_data: Dictionary containing word test data
"""
for word, tests in word_data.items():
if 'CET4' in tests:
self.word_difficulty_dict[word] = 4
elif 'OXFORD3000' in tests:
self.word_difficulty_dict[word] = 5
elif 'CET6' in tests or 'GRADUATE' in tests:
self.word_difficulty_dict[word] = 6
elif 'OXFORD5000' in tests or 'IELTS' in tests:
self.word_difficulty_dict[word] = 7
elif 'BBC' in tests:
self.word_difficulty_dict[word] = 8
def get_word_difficulty(self, word):
"""
Get difficulty level for a word using pre-computed values
:param word: Word to check
:return: Difficulty level
"""
if word in self.word_difficulty_dict:
return self.word_difficulty_dict[word]
stem = self.stemmer.stemWord(word)
if stem in self.word_difficulty_dict:
self.word_difficulty_dict[word] = self.word_difficulty_dict[stem]
return self.word_difficulty_dict[word]
self.word_difficulty_dict[word] = 0 # default level for unknown
return 0
def revert_dict(d):
'''
In d, word is the key, and value is a list of dates.
In d2 (the returned value of this function), time is the key, and the value is a list of words picked at that time.
'''
d2 = {}
for k in d:
if type(d[k]) is list: # d[k] is a list of dates.
lst = d[k]
elif type(d[
k]) is int: # for backward compatibility. d was sth like {'word':1}. The value d[k] is not a list of dates, but a number representing how frequent this word had been added to the new word book.
freq = d[k]
lst = freq * ['2021082019'] # why choose this date? No particular reasons. I fix the bug in this date.
for time_info in lst:
date = time_info[:10] # until hour
if not date in d2:
d2[date] = [k]
else:
d2[date].append(k)
return d2
def user_difficulty_level(d_user, d, calc_func=0):
'''
two ways to calculate difficulty_level
set calc_func!=0 to use sqrt, otherwise use weighted average
'''
# Safety checks
if not d_user or not d:
return 4.5 # Return default level if either dictionary is empty
try:
if calc_func != 0:
# calculation function 1: sqrt
d_user2 = revert_dict(d_user) # key is date, and value is a list of words added in that date
geometric = 0
count = 0
for date in sorted(d_user2.keys(),
reverse=True): # most recently added words are more important
lst = d_user2[date] # a list of words
lst2 = [] # a list of tuples, (word, difficulty level)
for word in lst:
if word in d:
lst2.append((word, d[word]))
lst3 = sort_in_ascending_order(lst2) # easiest tuple first
for t in lst3:
word = t[0]
hard = t[1]
if hard > 0: # Prevent log(0)
geometric = geometric + math.log(hard)
count += 1
return max(4.5, math.exp(geometric / max(count, 1)))
# calculation function 2: weighted average
d_user2 = revert_dict(d_user) # key is date, and value is a list of words added in that date
count = {} # number of all kinds of words
percentages = {} # percentages of all kinds of difficulties
total = 0 # total words
for date in d_user2.keys():
lst = d_user2[date] # a list of words
for word in lst:
if word in d:
if d[word] not in count:
count[d[word]] = 0
count[d[word]] += 1
total += 1
if total == 0:
return 4.5 # Changed default level
for k in count.keys():
percentages[k] = count[k] / total
weight = map_percentages_to_levels(percentages)
sum = 0
for k in weight.keys():
sum += weight[k] * k
return max(4.5, sum) # Ensure minimum level of 4.5
except Exception as e:
print(f"Error calculating user difficulty level: {str(e)}")
return 4.5 # Return default level on error
def text_difficulty_level(s, d):
s = remove_punctuation(s)
L = freq(s)
lst = [] # a list of tuples, each tuple being (word, difficulty level)
stop_words = {'the':1, 'and':1, 'of':1, 'to':1, 'what':1, 'in':1, 'there':1, 'when':1, 'them':1, 'would':1, 'will':1, 'out':1, 'his':1, 'mr':1, 'that':1, 'up':1, 'more':1, 'your':1, 'it':1, 'now':1, 'very':1, 'then':1, 'could':1, 'he':1, 'any':1, 'some':1, 'with':1, 'into':1, 'you':1, 'our':1, 'man':1, 'other':1, 'time':1, 'was':1, 'than':1, 'know':1, 'about':1, 'only':1, 'like':1, 'how':1, 'see':1, 'is':1, 'before':1, 'such':1, 'little':1, 'two':1, 'its':1, 'as':1, 'these':1, 'may':1, 'much':1, 'down':1, 'for':1, 'well':1, 'should':1, 'those':1, 'after':1, 'same':1, 'must':1, 'say':1, 'first':1, 'again':1, 'us':1, 'great':1, 'where':1, 'being':1, 'come':1, 'over':1, 'good':1, 'himself':1, 'am':1, 'never':1, 'on':1, 'old':1, 'here':1, 'way':1, 'at':1, 'go':1, 'upon':1, 'have':1, 'had':1, 'without':1, 'my':1, 'day':1, 'be':1, 'but':1, 'though':1, 'from':1, 'not':1, 'too':1, 'another':1, 'this':1, 'even':1, 'still':1, 'her':1, 'yet':1, 'under':1, 'by':1, 'let':1, 'just':1, 'all':1, 'because':1, 'we':1, 'always':1, 'off':1, 'yes':1, 'so':1, 'while':1, 'why':1, 'which':1, 'me':1, 'are':1, 'or':1, 'no':1, 'if':1, 'an':1, 'also':1, 'thus':1, 'who':1, 'cannot':1, 'she':1, 'whether':1} # ignore these words while computing the artile's difficulty level
for x in L:
word = x[0]
if word not in stop_words and word in d:
lst.append((word, d[word]))
lst2 = sort_in_descending_order(lst) # most difficult words on top
# print(lst2)
count = 0
geometric = 1
for t in lst2:
word = t[0]
hard = t[1]
geometric = geometric * (hard)
count += 1
if count >= 20: # we look for n most difficult words
return geometric ** (1 / count)
return geometric ** (1 / max(count, 1))
def load_record(fname):
"""
Load a pickle file containing word records
:param fname: Path to the pickle file
:return: Dictionary containing the loaded data
"""
# Get the directory where the script is located
script_dir = os.path.dirname(os.path.abspath(__file__))
# Build paths relative to the script location
if fname == 'frequency.p':
path = os.path.join(script_dir, fname) # same directory as script
else:
path = os.path.join(script_dir, 'static', fname) # static subfolder
try:
with open(path, 'rb') as f:
return pickle.load(f)
except FileNotFoundError:
print(f"Warning: Could not find file: {path}")
return {}
def get_difficulty_level_for_user(frequency_dict, word_test_dict):
"""
Convert word test data into difficulty levels
:param frequency_dict: Dictionary containing word frequency data
:param word_test_dict: Dictionary containing word test data
:return: Dictionary mapping words to their difficulty levels
"""
difficulty_dict = {}
for word in word_test_dict:
if 'CET4' in word_test_dict[word]:
difficulty_dict[word] = 4
elif 'OXFORD3000' in word_test_dict[word]:
difficulty_dict[word] = 5
elif 'CET6' in word_test_dict[word] or 'GRADUATE' in word_test_dict[word]:
difficulty_dict[word] = 6
elif 'OXFORD5000' in word_test_dict[word] or 'IELTS' in word_test_dict[word]:
difficulty_dict[word] = 7
elif 'BBC' in word_test_dict[word]:
difficulty_dict[word] = 8
else:
difficulty_dict[word] = 3 # default level
return difficulty_dict
class VocabularyLevelEstimator:
"""A class to estimate vocabulary levels based on Oxford word levels"""
def __init__(self, word_data_path=None):
if word_data_path is None:
word_data_path = 'db/oxford_words.txt'
self.word_levels = {}
self.level_mapping = {
'A1': 3,
'A2': 4,
'B1': 5,
'B2': 6,
'C1': 7
}
if word_data_path:
self._load_word_data(word_data_path)
def _load_word_data(self, filepath):
"""Load word data from Oxford word list file"""
try:
with open(filepath, 'r', encoding='utf-8') as f:
for line in f:
parts = line.strip().split()
if len(parts) >= 3:
word = parts[0].strip().lower()
level_code = parts[-1].strip()
if level_code in self.level_mapping:
level = self.level_mapping[level_code]
self.word_levels[word] = level
except FileNotFoundError:
print(f"Warning: Could not find difficulty data file: {filepath}")
def get_word_level(self, word):
"""Get difficulty level for a single word"""
if word is None:
raise TypeError("Word cannot be None")
if not isinstance(word, str):
raise TypeError("Word must be a string")
if not word:
return 0 # Default level for empty/invalid
word = word.lower()
return self.word_levels.get(word, 0) # Default to level 0 if word not found
def estimate_text_level(self, text):
"""Estimate the difficulty level of a text"""
if text is None:
raise TypeError("Input text cannot be None")
if not isinstance(text, str):
raise TypeError("Input text must be a string")
if not text:
return 3 # Default level for empty string
words = text.lower().split()
if not words:
return 3
levels = [self.get_word_level(word) for word in words]
return sum(levels) / len(levels)
def estimate_user_level(self, word_history):
"""Estimate user's vocabulary level based on their word history"""
if word_history is None:
raise TypeError("Word history cannot be None")
if not isinstance(word_history, dict):
raise TypeError("Word history must be a dictionary")
# Validate the word history format
for word, value in word_history.items():
if not isinstance(word, str):
raise ValueError("Word history keys must be strings")
if not isinstance(value, (list, int)):
raise ValueError("Word history values must be lists or integers")
if not word_history:
return 3 # Default level for empty history
words = word_history.keys()
levels = [self.get_word_level(word) for word in words]
return sum(levels) / len(levels)
class UserVocabularyLevel(VocabularyLevelEstimator):
def __init__(self, word_history, word_data_path=None):
if word_data_path is None:
word_data_path = 'db/oxford_words.txt'
super().__init__(word_data_path)
self.word_history = word_history
self._level = None
@property
def level(self):
if self._level is None:
if not self.word_history:
self._level = 0
return self._level
# Gather all (timestamp, word) pairs
word_times = []
for word, times in self.word_history.items():
for t in times:
word_times.append((t, word))
if not word_times:
self._level = 0
return self._level
# Sort by timestamp descending
word_times.sort(reverse=True)
recent_words = []
seen = set()
for t, word in word_times:
clean_word = word.strip(string.punctuation).lower()
if clean_word not in seen and self.is_valid_word(clean_word):
recent_words.append(clean_word)
seen.add(clean_word)
if len(recent_words) == 3:
break
if not recent_words:
self._level = 0
return self._level
levels = [self.get_word_level(word) for word in recent_words]
if all(l == 0 for l in levels):
self._level = 0
else:
self._level = max(levels) + 0.1 * (len(levels) - 1)
return self._level
def is_valid_word(self, word):
return word.isalpha()
class ArticleVocabularyLevel(VocabularyLevelEstimator):
def __init__(self, content, word_data_path=None):
if word_data_path is None:
word_data_path = 'db/oxford_words.txt'
super().__init__(word_data_path)
self.content = content
self._level = None
@property
def level(self):
if self._level is None:
if not self.content:
self._level = 0
return self._level
words = [w.strip(string.punctuation).lower() for w in self.content.split()]
words = [w for w in words if w and w.isalpha()]
if not words:
self._level = 0
return self._level
word_levels = [self.get_word_level(w) for w in words]
word_levels = [l for l in word_levels if l > 0]
if not word_levels:
self._level = 0
else:
if len(word_levels) == 1:
self._level = word_levels[0]
elif len(word_levels) <= 3:
avg = sum(word_levels) / len(word_levels)
# Add a small bonus for each extra word to ensure superset > subset
bonus = 0.01 * (len(word_levels) - 1)
self._level = max(avg, max(word_levels) + bonus)
else:
word_levels.sort(reverse=True)
hardest = word_levels[:10]
self._level = max(sum(hardest) / len(hardest), max(hardest) + 0.01 * (len(hardest) - 1))
return self._level
def is_valid_word(self, word):
return word.isalpha()
if __name__ == '__main__':
d1 = load_record('frequency.p')
# print(d1)
d2 = load_record('words_and_tests.p')
# print(d2)
d3 = get_difficulty_level_for_user(d1, d2)
s = '''
South Lawn
11:53 A.M. EDT
THE PRESIDENT: Hi, everybody. Hi. How are you? So, the stock market is doing very well.
The economy is booming. We have a new record in sight. It could happen even today.
But we have a new stock market record. I think it'll be about 118 times that we've broken the record.
Jobs look phenomenal.
'''
s = '''
By the authority vested in me as President by the Constitution and the laws of the United States, after carefully considering the reports submitted to the Congress by the Energy Information Administration, including the report submitted in October 2019, and other relevant factors, including global economic conditions, increased oil production by certain countries, the global level of spare petroleum production capacity, and the availability of strategic reserves, I determine, pursuant to section 1245(d)(4)(B) and (C) of the National Defense Authorization Act for Fiscal Year 2012, Public Law 112-81, and consistent with prior determinations, that there is a sufficient supply of petroleum and petroleum products from countries other than Iran to permit a significant reduction in the volume of petroleum and petroleum products purchased from Iran by or through foreign financial institutions.
'''
s = '''
Democrats keep their witnesses locked behind secure doors, then flood the press with carefully sculpted leaks and accusations, driving the Trump-corruption narrative. And so the party goes, galloping toward an impeachment vote that would overturn the will of the American voterson a case built in secret.
Conservative commentators keep noting that Mrs. Pelosi's refusal to hold a vote on the House floor to authorize an official impeachment inquiry helps her caucus's vulnerable members evade accountability. But there's a more practical and uglier reason for Democrats to skip the formalities. Normally an authorization vote would be followed by official rules on how the inquiry would proceed. Under today's process, Mr. Schiff gets to make up the rules as he goes along. Behold the Lord High Impeacher.
Democrats view control over the narrative as essential, having learned from their Russia-collusion escapade the perils of transparency. They banked on special counsel Robert Mueller's investigation proving impeachment fodder, but got truth-bombed. Their subsequent open hearings on the subject—featuring Michael Cohen, Mr. Mueller and Corey Lewandowski —were, for the Democrats, embarrassing spectacles, at which Republicans punched gaping holes in their story line.
Mr. Schiff is making sure that doesn't happen again; he'll present the story, on his terms. His rules mean he can issue that controlling decree about "only one" transcript and Democratic staff supervision of Republican members. It means he can bar the public, the press and even fellow representatives from hearings, even though they're unclassified.
'''
s = '''
Unemployment today is at a 50-year low. There are more Americans working today than ever before. Median household income in the last two and half years has risen by more than $5,000. And that doesn't even account for the savings from the President's tax cuts or energy reforms for working families.
Because of the President's policies, America has added trillions of dollars of wealth to our economy while China's economy continues to fall behind.
To level the playing field for the American worker against unethical trade practices, President Trump levied tariffs on $250 billion in Chinese goods in 2018. And earlier this year, the President announced we would place tariffs on another $300 billion of Chinese goods if significant issues in our trading relationship were not resolved by December of this year.
'''
s = '''
Needless to say, we see it very differently. Despite the great power competition that is underway, and America's growing strength, we want better for China. That's why, for the first time in decades, under President Donald Trump's leadership, the United States is treating China's leaders exactly how the leaders of any great world power should be treated with respect, yes, but also with consistency and candor.
'''
s = '''
Brexit is the scheduled withdrawal of the United Kingdom from the European Union. Following a June 2016 referendum, in which 51.9% voted to leave, the UK government formally announced the country's withdrawal in March 2017, starting a two-year process that was due to conclude with the UK withdrawing on 29 March 2019. As the UK parliament thrice voted against the negotiated withdrawal agreement, that deadline has been extended twice, and is currently 31 October 2019. The Benn Act, passed in September 2019, requires the government to seek a third extension.
'''
s = '''
The argument for Brexit
According to the BBC, the push to leave the EU was advocated mostly by the UK Independence Party and was not supported by the Prime Minister, David Cameron. Members of the UK Independence Party argued that Britain's participation in the EU was a restrictive element for the country.
As one of the EU's primary initiatives is free movement within the region the party's main arguments centered around regaining border control and reclaiming business rights. In addition, supporters of Brexit cited the high EU membership fees as a negative aspect of participation in the EU. It was argued that if the UK separates itself from the EU, these fees can be used to benefit the UK.
The argument against Brexit
The Conservative Party and the Prime Minister were strongly in favor of remaining with the EU. As a result of the decision to discontinue its participation in the EU, the Prime Minister has made a public statement that he will be relinquishing his position. He believes that the country needs a leader with the same goals as the majority of the country. He has promised a new PM will be in place by early September.
The argument against Brexit pertains mostly to the business benefits. The argument is that the UK receives business benefits by being able to participate in the single market system established by the EU. In response to the criticism against the open borders, proponents believe that the influx of immigrants helps develop an eager workforce and fuels public service projects.
Leaders in favor of staying also worry about the political backlash that could possibly result from other countries who favored staying with the EU. In addition, proponents of remaining with the EU believe that being part of a wider community of nations provides economic and cultural strength, as well as an additional element of security.
What does Brexit mean for the future?
While the decision marked a huge statement for the UK, the referendum vote is not legally binding. There are still many hurdles that must be dealt with before Brexit can become a reality.
The UK is still subject to the laws of the EU until Britain's exit becomes legal. In order for the UK to make its break official, the country needs to invoke Article 50. It is unclear exactly what this process will entail or how long it will take as Britain is the first country to take its leave of the EU. Once Article 50 has been formally invoked, the UK has two years to negotiate its departure with the other member states. But according to the BBC, "Extricating the UK from the EU will be extremely complex, and the process could drag on longer than that."
Amidst the aftermath of this shocking referendum vote, there is great uncertainty as political leaders decide what this means for the UK.
'''
s = '''
British Prime Minister Boris Johnson walks towards a voting station during the Brexit referendum in Britain, June 23, 2016. (Photo: EPA-EFE)
LONDON British Prime Minister Boris Johnson said Thursday he will likely ask Parliament to approve an election as part of an effort to break a Brexit deadlock.
It is not clear if the vote, which Johnson wants to hold on Dec. 12, will take place as opposition lawmakers must also back the move.
They are expected to vote on the measure on Monday.
Johnson's announcement comes ahead of an expected decision Friday from the European Union over whether to delay Britain's exit from the bloc for three months.
Britain's leader has been steadfastly opposed to any extension to the nation's scheduled Oct. 31 departure date from the EU, although in a letter to the leader of the opposition Labour Party this week he said he would accept a short technical postponement, "say to 15 or 30 November, to allow lawmakers to implement an EU withdrawal bill.
Johnson's decision to offer to call an election follows lawmakers' rejection of his plan to rush through an EU exit bill that runs to hundreds of pages in just three days. They want more time to scrutinize the legislation and to make sure it does not leave the door open to a possible "no-deal" Brexit during future exit negotiations with the EU that will run through next year. A "no-deal" Brexit could dramatically harm Britain's economy.
The prime minister was forced to ask for an extension to Britain's EU departure date after Britain's Parliament passed a law to ward off the threat of a "no-deal" Brexit.
Johnson has repeatedly pledged to finalize the first stage, a transition deal, of Britain's EU divorce battle by Oct. 31. A second stage will involve negotiating its future relationship with the EU on trade, security and other salient issues.
'''
s = '''
Thank you very much. We have a Cabinet meeting. We'll have a few questions after grace. And, if you would, Ben, please do the honors.
THE PRESIDENT: All right, thank you, Ben. That was a great job. Appreciate it.
The economy is doing fantastically well. It's getting very close to another record. We've had many records since we won office. We're getting very close to another record. I don't know if anybody saw it: The household median income for eight years of President Bush, it rose $400. For eight years of President Obama, it rose $975. And for two and half years of President Trump they have it down as two and a half years it rose $5,000, not including $2,000 for taxes. So it rose, let's say, $7,000. So in two and a half years, we're up $7,000, compared to $1,000, compared to $400. And that's for eight years and eight years.
That's a number that just came out, but that's a number that I don't know how there could be any dispute or any — I've never heard a number like that, meaning the economy is doing fantastically well.
We need for our farmers, our manufacturers, for, frankly, unions and non-unions, we need USMCA to be voted on. If it's voted on, it'll pass. It's up to Nancy Pelosi to put it up. If she puts it up, it's going to pass. It's going to be very bipartisan. It's something that's very much needed. It'll be hundreds of thousands of jobs.
'''
try:
base_path = os.path.join(os.path.dirname(__file__), 'db')
file_path = os.path.join(base_path, 'oxford_words.txt')
with open(file_path) as f:
s = f.read()
except FileNotFoundError:
print("Warning: Could not find oxford_words.txt. Using sample text instead.")
s = """Sample text here. Replace this with any default text you want to analyze."""
print(text_difficulty_level(s, d3))
article = ArticleVocabularyLevel('source', word_data_path='db/oxford_words.txt')
user = UserVocabularyLevel({'simple':['202408050930']}, word_data_path='db/oxford_words.txt')

101
pickle_idea.py Normal file
View File

@ -0,0 +1,101 @@
###########################################################################
# Copyright 2019 (C) Hui Lan <hui.lan@cantab.net>
# Written permission must be obtained from the author for commercial uses.
###########################################################################
# Purpose: dictionary & pickle as a simple means of database.
# Task: incorporate the functions into wordfreqCMD.py such that it will also show cumulative frequency.
import os
import pickle
from datetime import datetime
def lst2dict(lst, d):
'''
Store the information in list lst to dictionary d.
Handles both frequency counts and date lists.
'''
for x in lst:
word = x[0]
if isinstance(x[1], list): # if it's a list of dates
freq = len(x[1]) # convert to frequency
else:
freq = x[1] # already a frequency
if not word in d:
d[word] = freq
else:
d[word] += freq
def dict2lst(d):
''' Convert dictionary to list of (word, frequency) pairs '''
if len(d) > 0:
keys = list(d.keys())
if isinstance(d[keys[0]], list):
return [(k, len(v)) for k, v in d.items()]
return list(d.items())
return []
def merge_frequency(lst1, lst2):
d = {}
lst2dict(lst1, d)
lst2dict(lst2, d)
return d
def load_record(pickle_fname):
f = open(pickle_fname, 'rb')
d = pickle.load(f)
f.close()
return d
def save_frequency_to_pickle(d, pickle_fname):
f = open(pickle_fname, 'wb')
#exclusion_lst = ['one', 'no', 'has', 'had', 'do', 'that', 'have', 'by', 'not', 'but', 'we', 'this', 'my', 'him', 'so', 'or', 'as', 'are', 'it', 'from', 'with', 'be', 'can', 'for', 'an', 'if', 'who', 'whom', 'whose', 'which', 'the', 'to', 'a', 'of', 'and', 'you', 'i', 'he', 'she', 'they', 'me', 'was', 'were', 'is', 'in', 'at', 'on', 'their', 'his', 'her', 's', 'said', 'all', 'did', 'been', 'w']
exclusion_lst = []
d2 = {}
for k in d:
if not k in exclusion_lst and not k.isnumeric() and len(k) > 1:
if isinstance(d[k], list):
d2[k] = len(d[k]) # store frequency count
else:
d2[k] = d[k]
pickle.dump(d2, f)
f.close()
def unfamiliar(path,word):
if not os.path.exists(path):
return None
with open(path,"rb") as f:
dic = pickle.load(f)
dic[word] += [datetime.now().strftime('%Y%m%d%H%M')]
with open(path,"wb") as fp:
pickle.dump(dic,fp)
def familiar(path,word):
f = open(path,"rb")
dic = pickle.load(f)
if len(dic[word])>1:
del dic[word][0]
else:
dic.pop(word)
fp = open(path,"wb")
pickle.dump(dic,fp)
if __name__ == '__main__':
lst1 = [('apple',2), ('banana',1)]
d = {}
lst2dict(lst1, d) # d will change
save_frequency_to_pickle(d, 'frequency.p') # frequency.p is our database
lst2 = [('banana',2), ('orange', 4)]
d = load_record('frequency.p')
lst1 = dict2lst(d)
d = merge_frequency(lst2, lst1)
print(d)

99
pickle_idea2.py Normal file
View File

@ -0,0 +1,99 @@
###########################################################################
# Copyright 2019 (C) Hui Lan <hui.lan@cantab.net>
# Written permission must be obtained from the author for commercial uses.
###########################################################################
# Purpose: dictionary & pickle as a simple means of database.
# Task: incorporate the functions into wordfreqCMD.py such that it will also show cumulative frequency.
# Note: unlike pick_idea.py, now the second item is not frequency, but a list of dates.
import pickle
from datetime import datetime
def lst2dict(lst, d):
'''
Store the information in list lst to dictionary d.
Now stores frequency count instead of dates list.
'''
for x in lst:
word = x[0]
if isinstance(x[1], list): # if it's a list of dates
count = len(x[1]) # convert to frequency
else:
count = x[1] # already a frequency
if not word in d:
d[word] = count
else:
d[word] += count
def deleteRecord(path,word):
with open(path, 'rb') as f:
db = pickle.load(f)
try:
db.pop(word)
except KeyError:
print("sorry")
with open(path, 'wb') as ff:
pickle.dump(db, ff)
def dict2lst(d):
if len(d) > 0:
keys = list(d.keys())
if isinstance(d[keys[0]], int):
return list(d.items()) # return (word, frequency) pairs directly
elif isinstance(d[keys[0]], list):
return [(k, len(v)) for k, v in d.items()] # convert date lists to counts
return []
def merge_frequency(lst1, lst2):
d = {}
lst2dict(lst1, d)
lst2dict(lst2, d)
return d
def load_record(pickle_fname):
f = open(pickle_fname, 'rb')
d = pickle.load(f)
f.close()
return d
def save_frequency_to_pickle(d, pickle_fname):
f = open(pickle_fname, 'wb')
d2 = {}
for k in d:
if not k in exclusion_lst and not k.isnumeric() and not len(k) < 2:
if isinstance(d[k], list):
d2[k] = len(d[k]) # store frequency count instead of dates list
else:
d2[k] = d[k]
pickle.dump(d2, f)
f.close()
exclusion_lst = ['one', 'no', 'has', 'had', 'do', 'that', 'have', 'by', 'not', 'but', 'we', 'this', 'my', 'him', 'so', 'or', 'as', 'are', 'it', 'from', 'with', 'be', 'can', 'for', 'an', 'if', 'who', 'whom', 'whose', 'which', 'the', 'to', 'a', 'of', 'and', 'you', 'i', 'he', 'she', 'they', 'me', 'was', 'were', 'is', 'in', 'at', 'on', 'their', 'his', 'her', 's', 'said', 'all', 'did', 'been', 'w']
if __name__ == '__main__':
# Test 1: Convert dates to frequencies
lst1 = [('apple',['201910251437', '201910251438']), ('banana',['201910251439'])]
d = {}
lst2dict(lst1, d)
print("Test 1 - Convert dates to frequencies:")
print(d) # Should show: {'apple': 2, 'banana': 1}
# Test 2: Save and load frequencies
save_frequency_to_pickle(d, 'frequency.p')
loaded_d = load_record('frequency.p')
print("\nTest 2 - Load saved frequencies:")
print(loaded_d) # Should match the previous output
# Test 3: Merge frequencies
lst2 = [('banana',['201910251439']), ('orange', ['201910251440', '201910251439'])]
lst1 = dict2lst(loaded_d)
merged_d = merge_frequency(lst2, lst1)
print("\nTest 3 - Merge frequencies:")
print(merged_d) # Should show banana with increased frequency

108
test_estimator.py Normal file
View File

@ -0,0 +1,108 @@
import pytest
from difficulty import VocabularyLevelEstimator
@pytest.fixture
def estimator():
"""Fixture to create a VocabularyLevelEstimator instance"""
return VocabularyLevelEstimator('path/to/your/actual/word_data.p')
class TestVocabularyLevelEstimator:
# Normal input tests
def test_normal_text_estimation(self, estimator):
"""Test text level estimation with normal English text"""
text = """The quick brown fox jumps over the lazy dog.
This text contains common English words that
should be processed without any issues."""
level = estimator.estimate_text_level(text)
assert isinstance(level, float)
assert 3 <= level <= 8 # Difficulty levels should be between 3-8
def test_normal_user_level(self, estimator):
"""Test user level estimation with normal word history"""
word_history = {
'algorithm': ['20240101'],
'computer': ['20240101', '20240102'],
'programming': ['20240101']
}
level = estimator.estimate_user_level(word_history)
assert isinstance(level, float)
assert 3 <= level <= 8
def test_normal_word_level(self, estimator):
"""Test word level estimation with common words"""
assert estimator.get_word_level('computer') >= 3
assert estimator.get_word_level('algorithm') >= 3
# Boundary input tests
def test_empty_text(self, estimator):
"""Test behavior with empty text"""
assert estimator.estimate_text_level('') == 3 # Default level
def test_single_word_text(self, estimator):
"""Test behavior with single-word text"""
assert isinstance(estimator.estimate_text_level('Hello'), float)
def test_empty_user_history(self, estimator):
"""Test behavior with empty user history"""
assert estimator.estimate_user_level({}) == 3 # Default level
def test_maximum_word_length(self, estimator):
"""Test behavior with extremely long word"""
long_word = 'a' * 100
assert estimator.get_word_level(long_word) == 3 # Default level
# Abnormal input tests
def test_non_english_text(self, estimator):
"""Test behavior with non-English text"""
chinese_text = "这是中文文本"
assert estimator.estimate_text_level(chinese_text) == 3 # Default level
def test_special_characters(self, estimator):
"""Test behavior with special characters"""
special_chars = "@#$%^&*()"
assert estimator.estimate_text_level(special_chars) == 3 # Default level
def test_invalid_word_history(self, estimator):
"""Test behavior with invalid word history format"""
invalid_history = {'word': 'not_a_list'}
with pytest.raises(ValueError):
estimator.estimate_user_level(invalid_history)
def test_none_input(self, estimator):
"""Test behavior with None input"""
with pytest.raises(TypeError):
estimator.estimate_text_level(None)
with pytest.raises(TypeError):
estimator.estimate_user_level(None)
with pytest.raises(TypeError):
estimator.get_word_level(None)
# Edge cases
def test_mixed_case_words(self, estimator):
"""Test behavior with mixed case words"""
assert estimator.get_word_level('Computer') == estimator.get_word_level('computer')
def test_whitespace_handling(self, estimator):
"""Test behavior with various whitespace patterns"""
text_with_spaces = " Multiple Spaces Between Words "
level = estimator.estimate_text_level(text_with_spaces)
assert isinstance(level, float)
def test_repeated_words(self, estimator):
"""Test behavior with repeated words"""
text = "word word word word word"
level = estimator.estimate_text_level(text)
assert isinstance(level, float)
def test_numeric_input(self, estimator):
"""Test behavior with numeric input"""
assert estimator.estimate_text_level("123 456 789") == 3 # Default level
def test_mixed_content(self, estimator):
"""Test behavior with mixed content (numbers, words, special chars)"""
mixed_text = "Hello123 @World! 456"
level = estimator.estimate_text_level(mixed_text)
assert isinstance(level, float)

216
user_service.py Normal file
View File

@ -0,0 +1,216 @@
from datetime import datetime
from admin_service import ADMIN_NAME
from flask import *
# from app import Yaml
# from app.Article import get_today_article, load_freq_history
# from app.WordFreq import WordFreq
# from app.wordfreqCMD import sort_in_descending_order
import Yaml
from Article import get_today_article, load_freq_history
from WordFreq import WordFreq
from wordfreqCMD import sort_in_descending_order
import pickle_idea
import pickle_idea2
import logging
logging.basicConfig(filename='log.txt', format='%(asctime)s %(message)s', level=logging.DEBUG)
# 初始化蓝图
userService = Blueprint("user_bp", __name__)
path_prefix = '/var/www/wordfreq/wordfreq/'
path_prefix = './' # comment this line in deployment
@userService.route("/get_next_article/<username>",methods=['GET','POST'])
def get_next_article(username):
user_freq_record = path_prefix + 'static/frequency/' + 'frequency_%s.pickle' % (username)
session['old_articleID'] = session.get('articleID')
if request.method == 'GET':
visited_articles = session.get("visited_articles")
if visited_articles['article_ids'][-1] == "null": # 如果当前还是"null",则将"null"pop出来,无需index+=1
visited_articles['article_ids'].pop()
else: # 当前不为"null",直接 index+=1
visited_articles["index"] += 1
session["visited_articles"] = visited_articles
logging.debug('/get_next_article: start calling get_today_arcile()')
visited_articles, today_article, result_of_generate_article = get_today_article(user_freq_record, session.get('visited_articles'))
logging.debug('/get_next_arcile: done.')
data = {
'visited_articles': visited_articles,
'today_article': today_article,
'result_of_generate_article': result_of_generate_article
}
else:
return 'Under construction'
return json.dumps(data)
@userService.route("/get_pre_article/<username>",methods=['GET'])
def get_pre_article(username):
user_freq_record = path_prefix + 'static/frequency/' + 'frequency_%s.pickle' % (username)
if request.method == 'GET':
visited_articles = session.get("visited_articles")
if(visited_articles["index"]==0):
data=''
else:
visited_articles["index"] -= 1 # 上一篇index-=1
if visited_articles['article_ids'][-1] == "null": # 如果当前还是"null",则将"null"pop出来
visited_articles['article_ids'].pop()
session["visited_articles"] = visited_articles
visited_articles, today_article, result_of_generate_article = get_today_article(user_freq_record, session.get('visited_articles'))
data = {
'visited_articles': visited_articles,
'today_article': today_article,
'result_of_generate_article':result_of_generate_article
}
return json.dumps(data)
@userService.route("/<username>/<word>/unfamiliar", methods=['GET', 'POST'])
def unfamiliar(username, word):
'''
:param username:
:param word:
:return:
'''
user_freq_record = path_prefix + 'static/frequency/' + 'frequency_%s.pickle' % (username)
pickle_idea.unfamiliar(user_freq_record, word)
session['thisWord'] = word # 1. put a word into session
session['time'] = 1
return "success"
@userService.route("/<username>/<word>/familiar", methods=['GET', 'POST'])
def familiar(username, word):
'''
:param username:
:param word:
:return:
'''
user_freq_record = path_prefix + 'static/frequency/' + 'frequency_%s.pickle' % (username)
pickle_idea.familiar(user_freq_record, word)
session['thisWord'] = word # 1. put a word into session
session['time'] = 1
return "success"
@userService.route("/<username>/<word>/del", methods=['GET', 'POST'])
def deleteword(username, word):
'''
删除单词
:param username: 用户名
:param word: 单词
:return: 重定位到用户界面
'''
user_freq_record = path_prefix + 'static/frequency/' + 'frequency_%s.pickle' % (username)
pickle_idea2.deleteRecord(user_freq_record, word)
# 模板userpage_get.html中删除单词是异步执行而flash的信息后续是同步执行的所以注释这段代码同时如果这里使用flash但不提取信息则会影响 signup.html的显示。bug复现删除单词后点击退出点击注册注册页面就会出现提示信息
# flash(f'{word} is no longer in your word list.')
return "success"
@userService.route("/<username>/userpage", methods=['GET', 'POST'])
def userpage(username):
'''
用户界面
:param username: 用户名
:return: 返回用户界面
'''
# 未登录,跳转到未登录界面
if not session.get('logged_in'):
return render_template('not_login.html')
# 用户过期
user_expiry_date = session.get('expiry_date')
if datetime.now().strftime('%Y%m%d') > user_expiry_date:
return render_template('expiry.html', expiry_date=user_expiry_date)
# 获取session里的用户名
username = session.get('username')
user_freq_record = path_prefix + 'static/frequency/' + 'frequency_%s.pickle' % (username)
if request.method == 'POST': # when we submit a form
content = request.form['content']
f = WordFreq(content)
lst = f.get_freq()
return render_template('userpage_post.html',username=username,lst = lst, yml=Yaml.yml)
elif request.method == 'GET': # when we load a html page
try:
d = load_freq_history(user_freq_record)
lst = pickle_idea2.dict2lst(d)
lst2 = []
for t in lst:
if isinstance(t[1], (list, tuple)): # Check if t[1] is a list or tuple
lst2.append((t[0], len(t[1])))
elif isinstance(t[1], int): # Handle case where t[1] is an integer
lst2.append((t[0], t[1]))
else:
lst2.append((t[0], 1)) # Default case
lst3 = sort_in_descending_order(lst2)
words = ''
for x in lst3:
words += x[0] + ' '
visited_articles, today_article, result_of_generate_article = get_today_article(user_freq_record, session.get('visited_articles'))
session['visited_articles'] = visited_articles
# 通过 today_article加载前端的显示页面
return render_template('userpage_get.html',
admin_name=ADMIN_NAME,
username=username,
session=session,
# flashed_messages=get_flashed_messages(), 仅有删除单词的时候使用到flash而删除单词是异步执行这里的信息提示是同步执行所以就没有存在的必要了
today_article=today_article,
result_of_generate_article=result_of_generate_article,
d_len=len(d),
lst3=lst3,
yml=Yaml.yml,
words=words)
except Exception as e:
print(f"Error in userpage: {str(e)}")
return render_template('userpage_get.html',
username=username,
today_article={"user_level": 4.5}, # Default level
lst3=[],
d_len=0)
@userService.route("/<username>/mark", methods=['GET', 'POST'])
def user_mark_word(username):
'''
标记单词
:param username: 用户名
:return: 重定位到用户界面
'''
username = session[username]
user_freq_record = path_prefix + 'static/frequency/' + 'frequency_%s.pickle' % (username)
if request.method == 'POST':
# 提交标记的单词
d = load_freq_history(user_freq_record)
lst_history = pickle_idea2.dict2lst(d)
lst = []
lst2 = []
for word in request.form.getlist('marked'):
if not word in pickle_idea2.exclusion_lst and len(word) > 2:
lst.append((word, [get_time()]))
lst2.append(word)
d = pickle_idea2.merge_frequency(lst, lst_history)
if len(lst_history) > 999:
flash('You have way too many words in your difficult-words book. Delete some first.')
else:
pickle_idea2.save_frequency_to_pickle(d, user_freq_record)
flash('Added %s.' % ', '.join(lst2))
return redirect(url_for('user_bp.userpage', username=username))
else:
return 'Under construction'
def get_time():
'''
获取当前时间
:return: 当前时间
'''
return datetime.now().strftime('%Y%m%d%H%M') # upper to minutes

201
wordfreqCMD.py Normal file
View File

@ -0,0 +1,201 @@
###########################################################################
# Copyright 2019 (C) Hui Lan <hui.lan@cantab.net>
# Written permission must be obtained from the author for commercial uses.
###########################################################################
import collections
import html
import string
import operator
import os, sys # 引入模块sys因为我要用里面的sys.argv列表中的信息来读取命令行参数。
import pickle_idea
import pickle
from datetime import datetime
from pickle_idea2 import load_record, save_frequency_to_pickle, lst2dict, dict2lst
def map_percentages_to_levels(percentages):
'''
功能按照加权平均难度给生词本计算难度分计算权重的规则是(10 - 该词汇难度) * 该难度词汇占总词汇的比例再进行归一化处理
输入难度占比字典键代表难度3~8值代表每种难度的单词的占比
输出权重字典键代表难度3~8值代表每种难度的单词的权重
'''
# 已排序的键
sorted_keys = sorted(percentages.keys())
# 计算权重和权重总和
sum = 0 # 总和
levels_proportions = {}
for k in sorted_keys:
levels_proportions[k] = 10 - k
for k in sorted_keys:
levels_proportions[k] *= percentages[k]
sum += levels_proportions[k]
# 归一化权重到权重总和为1
for k in sorted_keys:
levels_proportions[k] /= sum
return levels_proportions
def freq(fruit):
'''
功能 把字符串转成列表 目的是得到每个单词的频率
输入 字符串
输出 列表 列表里包含一组元组每个元组包含单词与单词的频率 比如 [('apple', 2), ('banana', 1)]
注意事项 首先要把字符串转成小写原因是
'''
result = []
fruit = fruit.lower() # 字母转小写
flst = fruit.split() # 字符串转成list
c = collections.Counter(flst)
result = c.most_common()
return result
def youdao_link(s): # 有道链接
link = 'http://youdao.com/w/eng/' + s + '/#keyfrom=dict2.index'# 网址
return link
def file2str(fname):#文件转字符
f = open(fname) #打开
s = f.read() #读取
f.close() #关闭
return s
def remove_punctuation(s): # 这里是s是形参 (parameter)。函数被调用时才给s赋值。
special_characters = '\_©~<=>+/[]*&$%^@.,?!:;#()"“”—‘’{}|,。?!¥……()、《》【】:;·' # 把里面的字符都去掉
s = html.unescape(s) # 将HTML实体转换为对应的字符比如<会被识别为小于号
for c in special_characters:
s = s.replace(c, ' ') # 防止出现把 apple,apple 移掉逗号后变成 appleapple 情况
s = s.replace('--', ' ')
s = s.strip() # 去除前后的空格
if '\'' in s:
n = len(s)
t = '' # 用来收集我需要保留的字符
for i in range(n): # 只有单引号前后都有英文字符,才保留
if s[i] == '\'':
i_is_ok = i - 1 >= 0 and i + 1 < n
if i_is_ok and s[i-1] in string.ascii_letters and s[i+1] in string.ascii_letters:
t += s[i]
else:
t += s[i]
return t
else:
return s
def sort_in_descending_order(lst):# 单词按频率降序排列
lst2 = sorted(lst, reverse=True, key=lambda x: (x[1], x[0]))
return lst2
def sort_in_ascending_order(lst):# 单词按频率降序排列
lst2 = sorted(lst, reverse=False, key=lambda x: (x[1], x[0]))
return lst2
def make_html_page(lst, fname): # 只是在wordfreqCMD.py中的main函数中调用所以不做修改
'''
功能把lst的信息存到fname中以html格式
'''
s = ''
count = 1
for x in lst:
# <a href="">word</a>
s += '<p>%d <a href="%s">%s</a> (%d)</p>' % (count, youdao_link(x[0]), x[0], x[1])
count += 1
f = open(fname, 'w')
f.write(s)
f.close()
class WordFreq:
def __init__(self):
self.pickle_file = 'frequency.p' # Add this to store cumulative data
def process_file(self, filename):
# ... existing word processing code ...
# Convert current word frequencies to timestamp format
current_words = {}
timestamp = datetime.now().strftime('%Y%m%d%H%M')
for word, freq in self.freq.items():
current_words[word] = [timestamp] * freq # Create list of timestamps for each occurrence
# Load existing cumulative data
try:
cumulative_data = load_record(self.pickle_file)
except (FileNotFoundError, EOFError):
cumulative_data = {}
# Merge current words with historical data
for word, timestamps in current_words.items():
if word in cumulative_data:
cumulative_data[word].extend(timestamps)
else:
cumulative_data[word] = timestamps
# Save updated data
save_frequency_to_pickle(cumulative_data, self.pickle_file)
def show_results(self):
# ... existing code ...
# Add cumulative frequency display
print("\nCumulative Frequencies (all-time):")
try:
cumulative_data = load_record(self.pickle_file)
# Sort by cumulative frequency (length of timestamp list)
sorted_words = sorted(cumulative_data.items(),
key=lambda x: len(x[1]),
reverse=True)
for word, timestamps in sorted_words[:20]: # Show top 20
print(f"{word}: {len(timestamps)} times")
except (FileNotFoundError, EOFError):
print("No cumulative data available yet")
## main程序入口
if __name__ == '__main__':
num = len(sys.argv)
if num == 1: # 从键盘读入字符串
s = input()
elif num == 2: # 从文件读入字符串
fname = sys.argv[1]
s = file2str(fname)
else:
print('I can accept at most 2 arguments.')
sys.exit()# 结束程序运行, 下面的代码不会被执行了。
s = remove_punctuation(s) # 这里是s是实参(argument),里面有值
L = freq(s)
for x in sort_in_descending_order(L):
print('%s\t%d\t%s' % (x[0], x[1], youdao_link(x[0])))#函数导出
# 把频率的结果放result.html中
make_html_page(sort_in_descending_order(L), 'result.html')
print('\nHistory:\n')
if os.path.exists('frequency.p'):
d = pickle_idea.load_record('frequency.p')
else:
d = {}
print(sort_in_descending_order(pickle_idea.dict2lst(d)))
# 合并频率
lst_history = pickle_idea.dict2lst(d)
d = pickle_idea.merge_frequency(L, lst_history)
pickle_idea.save_frequency_to_pickle(d, 'frequency.p')