SmolLLM

LLM utility library

目前為 2025-03-04 提交的版本,檢視 最新版本

此腳本不應該直接安裝,它是一個供其他腳本使用的函式庫。欲使用本函式庫,請在腳本 metadata 寫上: // @require https://update.gf.qytechs.cn/scripts/528704/1546687/SmolLLM.js

  1. // ==UserScript==
  2. // @name SmolLLM
  3. // @namespace http://tampermonkey.net/
  4. // @version 0.1.4
  5. // @description LLM utility library
  6. // @author RoCry
  7. // @grant GM_xmlhttpRequest
  8. // @require https://update.gf.qytechs.cn/scripts/528703/1546610/SimpleBalancer.js
  9. // @license MIT
  10. // ==/UserScript==
  11.  
  12. class SmolLLM {
  13. constructor() {
  14. // Ensure SimpleBalancer is available
  15. if (typeof SimpleBalancer === 'undefined') {
  16. throw new Error('SimpleBalancer is required for SmolLLM to work');
  17. }
  18.  
  19. // Verify GM_xmlhttpRequest is available
  20. if (typeof GM_xmlhttpRequest === 'undefined') {
  21. throw new Error('GM_xmlhttpRequest is required for SmolLLM to work');
  22. }
  23.  
  24. this.balancer = new SimpleBalancer();
  25. this.logger = console;
  26. }
  27.  
  28. /**
  29. * Prepares request data based on the provider
  30. *
  31. * @param {string} prompt - User prompt
  32. * @param {string} systemPrompt - System prompt
  33. * @param {string} modelName - Model name
  34. * @param {string} providerName - Provider name (anthropic, openai, gemini)
  35. * @param {string} baseUrl - API base URL
  36. * @returns {Object} - {url, data} for the request
  37. */
  38. prepareRequestData(prompt, systemPrompt, modelName, providerName, baseUrl) {
  39. baseUrl = baseUrl.trim().replace(/\/+$/, '');
  40.  
  41. let url, data;
  42.  
  43. if (providerName === 'anthropic') {
  44. url = `${baseUrl}/v1/messages`;
  45. data = {
  46. model: modelName,
  47. max_tokens: 4096,
  48. messages: [{ role: 'user', content: prompt }],
  49. stream: true
  50. };
  51. if (systemPrompt) {
  52. data.system = systemPrompt;
  53. }
  54. } else if (providerName === 'gemini') {
  55. url = `${baseUrl}/v1beta/models/${modelName}:streamGenerateContent?alt=sse`;
  56. data = {
  57. contents: [{ parts: [{ text: prompt }] }]
  58. };
  59. if (systemPrompt) {
  60. data.system_instruction = { parts: [{ text: systemPrompt }] };
  61. }
  62. } else {
  63. // OpenAI compatible APIs
  64. const messages = [];
  65. if (systemPrompt) {
  66. messages.push({ role: 'system', content: systemPrompt });
  67. }
  68. messages.push({ role: 'user', content: prompt });
  69.  
  70. data = {
  71. messages: messages,
  72. model: modelName,
  73. stream: true
  74. };
  75.  
  76. // Handle URL based on suffix
  77. if (baseUrl.endsWith('#')) {
  78. url = baseUrl.slice(0, -1); // Remove the # and use exact URL
  79. } else if (baseUrl.endsWith('/')) {
  80. url = `${baseUrl}chat/completions`; // Skip v1 prefix
  81. } else {
  82. url = `${baseUrl}/v1/chat/completions`; // Default pattern
  83. }
  84. }
  85.  
  86. return { url, data };
  87. }
  88.  
  89. /**
  90. * Prepares headers for authentication based on the provider
  91. *
  92. * @param {string} providerName - Provider name
  93. * @param {string} apiKey - API key
  94. * @returns {Object} - Request headers
  95. */
  96. prepareHeaders(providerName, apiKey) {
  97. const headers = {
  98. 'Content-Type': 'application/json'
  99. };
  100.  
  101. if (providerName === 'anthropic') {
  102. headers['X-API-Key'] = apiKey;
  103. headers['Anthropic-Version'] = '2023-06-01';
  104. } else if (providerName === 'gemini') {
  105. headers['X-Goog-Api-Key'] = apiKey;
  106. } else {
  107. headers['Authorization'] = `Bearer ${apiKey}`;
  108. }
  109.  
  110. return headers;
  111. }
  112.  
  113. /**
  114. * Process SSE stream data for different providers
  115. *
  116. * @param {string} chunk - Data chunk from SSE
  117. * @param {string} providerName - Provider name
  118. * @returns {string|null} - Extracted text content or null
  119. */
  120. processStreamChunk(chunk, providerName) {
  121. if (!chunk || chunk === '[DONE]') return null;
  122.  
  123. try {
  124. console.log(`Processing chunk for ${providerName}:`, chunk.substring(0, 100) + (chunk.length > 100 ? '...' : ''));
  125. const data = JSON.parse(chunk);
  126.  
  127. if (providerName === 'anthropic') {
  128. if (data.type === 'content_block_delta' && data.delta && data.delta.text) {
  129. return data.delta.text;
  130. } else {
  131. console.log('Anthropic data structure:', JSON.stringify(data).substring(0, 200));
  132. }
  133. } else if (providerName === 'gemini') {
  134. if (data.candidates &&
  135. data.candidates[0].content &&
  136. data.candidates[0].content.parts &&
  137. data.candidates[0].content.parts[0].text) {
  138. return data.candidates[0].content.parts[0].text;
  139. } else {
  140. console.log('Gemini data structure:', JSON.stringify(data).substring(0, 200));
  141. }
  142. } else {
  143. // OpenAI compatible
  144. if (data.choices &&
  145. data.choices[0].delta &&
  146. data.choices[0].delta.content) {
  147. return data.choices[0].delta.content;
  148. } else {
  149. console.log('OpenAI data structure:', JSON.stringify(data).substring(0, 200));
  150. }
  151. }
  152. } catch (e) {
  153. // Ignore parsing errors in stream chunks
  154. console.error(`Error parsing chunk: ${e.message}, chunk: ${chunk}`);
  155. return null;
  156. }
  157.  
  158. return null;
  159. }
  160.  
  161. /**
  162. * Makes a request to the LLM API and handles streaming responses
  163. *
  164. * @param {Object} params - Request parameters
  165. * @returns {Promise<string>} - Full response text
  166. */
  167. async askLLM({
  168. prompt,
  169. providerName,
  170. systemPrompt = '',
  171. model,
  172. apiKey,
  173. baseUrl,
  174. handler = null,
  175. timeout = 60000
  176. }) {
  177. if (!prompt || !providerName || !model || !apiKey || !baseUrl) {
  178. throw new Error('Required parameters missing');
  179. }
  180.  
  181. // Use balancer to choose API key and base URL pair
  182. [apiKey, baseUrl] = this.balancer.choosePair(apiKey, baseUrl);
  183.  
  184. const { url, data } = this.prepareRequestData(
  185. prompt, systemPrompt, model, providerName, baseUrl
  186. );
  187.  
  188. const headers = this.prepareHeaders(providerName, apiKey);
  189.  
  190. // Log request info (with masked API key)
  191. const apiKeyPreview = `${apiKey.slice(0, 5)}...${apiKey.slice(-4)}`;
  192. this.logger.info(
  193. `Sending ${url} model=${model} api_key=${apiKeyPreview}, len=${prompt.length}`
  194. );
  195.  
  196. return new Promise((resolve, reject) => {
  197. let responseText = '';
  198. let buffer = '';
  199. let timeoutId;
  200.  
  201. // Set timeout
  202. if (timeout) {
  203. timeoutId = setTimeout(() => {
  204. reject(new Error(`Request timed out after ${timeout}ms`));
  205. }, timeout);
  206. }
  207.  
  208. GM_xmlhttpRequest({
  209. method: 'POST',
  210. url: url,
  211. headers: headers,
  212. data: JSON.stringify(data),
  213. responseType: 'stream',
  214. onload: (response) => {
  215. // This won't be called for streaming responses
  216. if (response.status !== 200) {
  217. clearTimeout(timeoutId);
  218. reject(new Error(`API request failed: ${response.status} - ${response.responseText}`));
  219. } else {
  220. console.log(`API request success: ${response.status} - ${response.responseText}`);
  221. }
  222. },
  223. onreadystatechange: (state) => {
  224. if (state.readyState === 4) {
  225. // Request completed
  226. clearTimeout(timeoutId);
  227. resolve(responseText);
  228. } else {
  229. console.log(`API request state: ${state.readyState}`);
  230. }
  231. },
  232. onprogress: (response) => {
  233. // Handle streaming response
  234. const chunk = response.responseText.substring(buffer.length);
  235. buffer = response.responseText;
  236.  
  237. console.log(`API request progress: ${chunk}`);
  238.  
  239. if (!chunk) return;
  240.  
  241. // Process SSE format (data: {...}\n\n)
  242. const lines = chunk.split('\n\n');
  243.  
  244. for (const line of lines) {
  245. if (!line.trim() || !line.startsWith('data: ')) continue;
  246.  
  247. const content = line.slice(6); // Remove 'data: ' prefix
  248. const textChunk = this.processStreamChunk(content, providerName);
  249.  
  250. if (textChunk) {
  251. responseText += textChunk;
  252. if (handler && typeof handler === 'function') {
  253. handler(textChunk);
  254. }
  255. }
  256. }
  257. },
  258. onerror: (error) => {
  259. clearTimeout(timeoutId);
  260. reject(new Error(`Request failed: ${error.error || error}`));
  261. },
  262. ontimeout: () => {
  263. clearTimeout(timeoutId);
  264. reject(new Error(`Request timed out after ${timeout}ms`));
  265. }
  266. });
  267. });
  268. }
  269. }
  270.  
  271. // Make it available globally
  272. window.SmolLLM = SmolLLM;
  273.  
  274. // Export for module systems if needed
  275. if (typeof module !== 'undefined') {
  276. module.exports = SmolLLM;
  277. }

QingJ © 2025

镜像随时可能失效,请加Q群300939539或关注我们的公众号极客氢云获取最新地址