-
Notifications
You must be signed in to change notification settings - Fork 9
Expand file tree
/
Copy pathindex.js
More file actions
77 lines (57 loc) · 2.12 KB
/
index.js
File metadata and controls
77 lines (57 loc) · 2.12 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
'use strict';
require('dotenv').config()
const postgres = require('postgres');
const fetch = require('node-fetch');
const OPENAI_API_KEY = process.env.OPENAI_API_KEY || "sk-xxxx";
const OPENAI_URL = process.env.OPENAI_URL || "https://api.baichuan-ai.com";
const log = function() { console.log(...arguments) };
var _ = module.exports;
// process.env["NODE_TLS_REJECT_UNAUTHORIZED"] = 0
_.main = async function (params, context) {
console.time("ALL_Timer");
const text = params.text || '';
const openid = params.openid || '';
const lang = params.lang || 'Chinese';
const isRAG = params.isRAG || false;
var messages = [];
if(openid == '' || text == '') {
return { openid, errCode: 22001, 'choices': [{'text': '异常请求,text/openid为空'}] };
}
// 默认 messages
messages = [
{"role": "system", "content": "You are a professional psychologist." },
{"role": "user", "content": "I will provide you my thoughts. I want you to give me scientific suggestions that will make me feel better. Request a response in "+ lang +". My thought is: "+ text +"\n" }
];
if(isRAG){
// 初始化向量PG数据库
// todo
}
// 带上下文请求ChatGPT
const retData = await _.getGPTRes(messages);
log("gpt returen data: ", JSON.stringify(retData));
// 计算耗时情况
console.timeEnd("ALL_Timer");
// 返回数据 和出参结构映射
return {openid, errCode: 22000, params, ...retData};
}
// 请求大模型 GPT
_.getGPTRes = async function (messages){
log({messages})
const chatResponse = await fetch(
OPENAI_URL + "/v1/chat/completions",
{
method: "POST",
headers: {
"Authorization": `Bearer ${OPENAI_API_KEY}`,
"Content-Type": "application/json"
},
body: JSON.stringify({
"model": "Baichuan2-Turbo", //gpt-3.5-turbo
"messages": messages,
"temperature": 0.5,
"max_tokens": 2000,
})
}
);
return await chatResponse.json();
}