feat(desktop): ✨ 实现一些功能
1. 实现了用户阅读画像 2. 实现了全局检索功能
This commit is contained in:
100
src/main/services/persona.service.ts
Normal file
100
src/main/services/persona.service.ts
Normal file
@@ -0,0 +1,100 @@
|
||||
import { Repository } from 'typeorm'
|
||||
import { ReadingPersona } from '@main/db/entities/ReadingPersona'
|
||||
import {
|
||||
IReadingReflectionTaskBatch,
|
||||
IReadingReflectionTaskItem
|
||||
} from '@shared/types/IReadingReflectionTask'
|
||||
import { IUserReadingPersona } from '@shared/types/IUserReadingPersona'
|
||||
|
||||
export class PersonaService {
|
||||
constructor(private personaRepo: Repository<ReadingPersona>) {}
|
||||
|
||||
/**
|
||||
* 刷新画像并保存到数据库
|
||||
*/
|
||||
async refreshPersona(
|
||||
items: IReadingReflectionTaskItem[],
|
||||
batches: IReadingReflectionTaskBatch[]
|
||||
) {
|
||||
const rawResult = await this.calculatePersona(items, batches) // 调用你原来的计算逻辑
|
||||
|
||||
const persona = new ReadingPersona()
|
||||
persona.id = 'current_user_persona'
|
||||
persona.cognition = rawResult.cognition
|
||||
persona.breadth = rawResult.breadth
|
||||
persona.practicality = rawResult.practicality
|
||||
persona.output = rawResult.output
|
||||
persona.global = rawResult.global
|
||||
persona.topKeywords = JSON.stringify(rawResult.topKeywords)
|
||||
|
||||
// 存储完整的 stats 结构以便前端适配
|
||||
persona.rawStats = {
|
||||
totalWords: items.reduce((sum, i) => sum + (i.content?.length || 0), 0),
|
||||
totalBooks: batches.length,
|
||||
topKeywords: rawResult.topKeywords
|
||||
}
|
||||
|
||||
return await this.personaRepo.save(persona)
|
||||
}
|
||||
/**
|
||||
* 从数据库聚合数据并计算画像分值
|
||||
*/
|
||||
async calculatePersona(
|
||||
items: IReadingReflectionTaskItem[],
|
||||
batches: IReadingReflectionTaskBatch[]
|
||||
) {
|
||||
// 1. 计算认知深度:根据关键词频次
|
||||
const allKeywords = items.flatMap((i) => i.keywords || [])
|
||||
const keywordMap = new Map<string, number>()
|
||||
allKeywords.forEach((k) => keywordMap.set(k, (keywordMap.get(k) || 0) + 1))
|
||||
|
||||
// 逻辑:去重后的关键词越多且重复越高,分值越高 (示例算法)
|
||||
const cognitionScore = Math.min(100, keywordMap.size * 2 + allKeywords.length / 5)
|
||||
|
||||
// 2. 计算知识广度:根据书籍数量
|
||||
const breadthScore = Math.min(100, batches.length * 10)
|
||||
|
||||
// 3. 计算产出效率:根据总字数
|
||||
const totalWords = items.reduce((sum, i) => sum + (i.content?.length || 0), 0)
|
||||
const outputScore = Math.min(100, totalWords / 500) // 每 5万字满分
|
||||
|
||||
// 4. 计算 Top 10 关键词
|
||||
const sortedKeywords = [...keywordMap.entries()]
|
||||
.sort((a, b) => b[1] - a[1])
|
||||
.slice(0, 10)
|
||||
.map((entry) => entry[0])
|
||||
|
||||
return {
|
||||
cognition: Math.round(cognitionScore),
|
||||
breadth: Math.round(breadthScore),
|
||||
output: Math.round(outputScore),
|
||||
practicality: 75, // 可根据 occupation 比例动态计算
|
||||
global: 60, // 可根据 language 比例动态计算
|
||||
topKeywords: sortedKeywords
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* 实体转化为用户阅读画像
|
||||
* @param entity 实体
|
||||
* */
|
||||
export function entityToUserReadingPersona(entity: ReadingPersona): IUserReadingPersona {
|
||||
return {
|
||||
domainDepth: JSON.parse(entity.topKeywords || '[]').map((name: string) => ({
|
||||
name,
|
||||
score: entity.cognition, // 简易算法:共用认知深度分
|
||||
bookCount: 1 // 可根据数据库详细统计进一步细化
|
||||
})),
|
||||
breadthScore: entity.breadth,
|
||||
efficiencyScore: entity.output,
|
||||
maturityScore: entity.practicality,
|
||||
languageScore: entity.global,
|
||||
stats: entity.rawStats || {
|
||||
totalWords: 0,
|
||||
totalBooks: 0,
|
||||
topKeywords: [],
|
||||
mostUsedOccupation: 'other'
|
||||
}
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user