24 * By convention, we only use string and number values. Complex values are serialized.
25 */
26export async function transformExport(
27 tracesData: IExportTraceServiceRequest,
28): Promise<Array<OtelSpan>> {
107 | Record<string, AttributeValuePrimitive>;
108
109async function mapAttributeValue(value: IAnyValue): Promise<AttributeValue> {
110 if (!value) {
111 return null;
137}
138
139async function mapAttributes(
140 attributes: IKeyValue[],
141): Promise<Record<string, AttributeValue>> {
158}
159
160async function mapEvent(event: IEvent) {
161 return {
162 name: event.name,
166}
167
168async function mapLink(link: ILink) {
169 return {
170 trace_id: stringOrUintToString(link.traceId),
175}
176
177function stringOrUintToString(id: string | Uint8Array) {
178 return id instanceof Uint8Array
179 ? Buffer.from(id).toString("hex")
181}
182
183function mapStatus(status: IStatus) {
184 return {
185 code: status.code,
188}
189
190function statusCodeToString(statusCode: EStatusCode) {
191 switch (statusCode) {
192 case 0:
201}
202
203// Function to convert ESpanKind to SpanKind
204function convertToSpanKind(spanKind: ESpanKind): string {
205 switch (spanKind) {
206 case 1:
47
48/**
49 * Export a function that wraps the incoming request,
50 * then injects the Deno env vars into the Hono app befoe
51 * executing the api entrypoint (`app.fetch`)
52 */
53export default async function(req: Request): Promise<Response> {
54 const env = Deno.env.toObject();
55 // NOTE - Adding the entire env object will also expose the following values to your api handlers:
12}
13
14async function makeGigyaRequest(endpoint, params) {
15 const url = `https://${GIGYA_DOMAIN}/${endpoint}?httpStatusCodes=true`;
16
36 return JSON.parse(text);
37}
38async function confirmCleanup() {
39 console.log("⚠️ WARNING: This script will delete ALL data from Gigya DS!");
40 console.log("This action cannot be undone.");
50}
51
52async function cleanupDSData() {
53 await confirmCleanup();
54
136];
137
138// Helper function to create namespaced key
139function createNamespacedKey(key: string, userId?: string | null): string {
140 if (userId === null || userId === undefined) {
141 // Admin context - use global namespace with admin prefix
146}
147
148// Helper function to extract user keys from namespaced keys
149function extractUserKeys(keys: string[], userId?: string | null): string[] {
150 const namespace = userId === null || userId === undefined ? "admin:" : `user:${userId}:`;
151 console.log(`extractUserKeys() `, keys);
155}
156
157export async function handleBlobTool(toolName: string, params: any, authContext: AuthContext): Promise<any> {
158 const userId = authContext.isAdmin ? null : authContext.user?.id;
159
7)`);
8
9async function createOrUpdatePerson(phoneNumber: string, fullName: string, zipcode: string) {
10 // Use INSERT OR REPLACE to handle both create and update cases
11 await sqlite.execute({
22);
23
24async function getPersonByPhoneNumber(phoneNumber: string): Promise<{ fullName: string; zipcode: string } | null> {
25 const result = await sqlite.execute({
26 sql: `SELECT full_name, zipcode FROM customer WHERE phone_number = ?`,
68`;
69
70export default async function jsonOkExample(request: Request) {
71 if (request.method !== "POST") {
72 return Response.json(
50 },
51 {
52 "raw": "### Project Name\n\nDevstral_FT\n\n### Repository URL\n\nhttps://github.TBD/org-name/sap-tuned-devstral\n\n### Project Description\n\nProblem Statement/Motivation: 🎯\r\nCurrently, leveraging Large Language Models (LLMs) for code assistance, generation, and understanding often involves sending proprietary code to third-party services. This presents potential privacy and security concerns. Additionally, reliance on external services can introduce latency and lacks offline capabilities.\r\n\r\nOur services extensive codebase, contained within our Git repositories, represents a valuable dataset. Fine-tuning a capable open-source model like Devstral specifically on our code could provide a highly contextualized, fast, and private LLM assistant. This would empower developers with tailored code suggestions, explanations, and generation capabilities directly within their local development environment.\r\n\r\nGoals/Acceptance Criteria: ✅\r\n\r\nSuccessfully fine-tune a version of the Devstral model on a selected subset of our team's Git repositories.\r\nThe fine-tuned model can be run locally using Ollama on a typical developer machine with acceptable performance (e.g., response time for suggestions < 1-2 seconds).\r\nDevelopers can query the local model from VS Code (e.g., via a compatible extension like Continue or a similar tool) for:\r\nCode completion/suggestion relevant to our codebase.\r\nExplaining snippets of our proprietary code.\r\nGenerating boilerplate or utility functions based on our coding patterns.\r\nThe entire process maintains data privacy by keeping our codebase and the fine-tuned model strictly within our local/private infrastructure.\r\nDocumentation is created for setting up and using the local LLM.\r\n\r\n\r\nHigh-Level Tasks: 🛠️\r\n\r\n[ ] Research & Planning:\r\n[ ] Identify suitable Git repositories and branches for the training dataset.\r\n[ ] Research best practices for preparing code data for fine-tuning (e.g., filtering, formatting, anonymization if needed).\r\n[ ] Evaluate hardware requirements for fine-tuning and local inference.\r\n[ ] Investigate and select the most appropriate VS Code plugin(s) for Ollama integration.\r\n\r\n[ ] Data Preparation:\r\n[ ] Clone selected repositories.\r\n[ ] Implement scripts to clean, preprocess, and format the code data.\r\n\r\n[ ] Model Fine-Tuning:\r\n[ ] Set up the fine-tuning environment for Devstral.\r\n[ ] Perform initial fine-tuning runs and iterate on parameters.\r\n[ ] Evaluate the performance and quality of the fine-tuned model.\r\n\r\n[ ] Deployment & Integration:\r\n[ ] Package the fine-tuned model for Ollama.\r\n[ ] Develop instructions for team members to install Ollama and the custom model.\r\n[ ] Configure VS Code plugin(s) to connect to the local Ollama instance.\r\n\r\n[ ] Testing & Feedback:\r\n[ ] Conduct internal testing with the development team.\r\n[ ] Gather feedback and iterate on the model/setup.\r\n\r\n[ ] Documentation:\r\n[ ] Create user guides for setup and usage.\r\n[ ] Document the fine-tuning process for future reference.\r\n\r\n\r\nConsiderations/Potential Challenges: 🤔\r\nData Volume & Quality: Ensuring enough high-quality, representative code is used for effective fine-tuning.\r\nComputational Resources: Fine-tuning can be resource-intensive. We need to assess if local machines are sufficient or if a dedicated training server is needed.\r\nModel Performance: Achieving a good balance between model size, inference speed, and the quality of assistance.\r\nMaintenance: The model may need to be periodically retrained as the codebase evolves.\n\n### Potential Contribution Areas\n\n_No response_\n\n### Estimated Time Commitment\n\nMedium (4-8 hours/week)\n\n### Required Skills\n\n_No response_\n\n### License\n\n_No response_\n\n### Additional Information\n\n_No response_",
53 "id": 27,
54 "title": "🚀 Project: Fine-tune Devstral for Local, Private Code Assistance via Ollama & VS Code",
84 "projectName": "Devstral_FT",
85 "repositoryUrl": "https://github.TBD/org-name/sap-tuned-devstral",
86 "description": "Problem Statement/Motivation: 🎯\r\nCurrently, leveraging Large Language Models (LLMs) for code assistance, generation, and understanding often involves sending proprietary code to third-party services. This presents potential privacy and security concerns. Additionally, reliance on external services can introduce latency and lacks offline capabilities.\r\n\r\nOur services extensive codebase, contained within our Git repositories, represents a valuable dataset. Fine-tuning a capable open-source model like Devstral specifically on our code could provide a highly contextualized, fast, and private LLM assistant. This would empower developers with tailored code suggestions, explanations, and generation capabilities directly within their local development environment.\r\n\r\nGoals/Acceptance Criteria: ✅\r\n\r\nSuccessfully fine-tune a version of the Devstral model on a selected subset of our team's Git repositories.\r\nThe fine-tuned model can be run locally using Ollama on a typical developer machine with acceptable performance (e.g., response time for suggestions < 1-2 seconds).\r\nDevelopers can query the local model from VS Code (e.g., via a compatible extension like Continue or a similar tool) for:\r\nCode completion/suggestion relevant to our codebase.\r\nExplaining snippets of our proprietary code.\r\nGenerating boilerplate or utility functions based on our coding patterns.\r\nThe entire process maintains data privacy by keeping our codebase and the fine-tuned model strictly within our local/private infrastructure.\r\nDocumentation is created for setting up and using the local LLM.\r\n\r\n\r\nHigh-Level Tasks: 🛠️\r\n\r\n[ ] Research & Planning:\r\n[ ] Identify suitable Git repositories and branches for the training dataset.\r\n[ ] Research best practices for preparing code data for fine-tuning (e.g., filtering, formatting, anonymization if needed).\r\n[ ] Evaluate hardware requirements for fine-tuning and local inference.\r\n[ ] Investigate and select the most appropriate VS Code plugin(s) for Ollama integration.\r\n\r\n[ ] Data Preparation:\r\n[ ] Clone selected repositories.\r\n[ ] Implement scripts to clean, preprocess, and format the code data.\r\n\r\n[ ] Model Fine-Tuning:\r\n[ ] Set up the fine-tuning environment for Devstral.\r\n[ ] Perform initial fine-tuning runs and iterate on parameters.\r\n[ ] Evaluate the performance and quality of the fine-tuned model.\r\n\r\n[ ] Deployment & Integration:\r\n[ ] Package the fine-tuned model for Ollama.\r\n[ ] Develop instructions for team members to install Ollama and the custom model.\r\n[ ] Configure VS Code plugin(s) to connect to the local Ollama instance.\r\n\r\n[ ] Testing & Feedback:\r\n[ ] Conduct internal testing with the development team.\r\n[ ] Gather feedback and iterate on the model/setup.\r\n\r\n[ ] Documentation:\r\n[ ] Create user guides for setup and usage.\r\n[ ] Document the fine-tuning process for future reference.\r\n\r\n\r\nConsiderations/Potential Challenges: 🤔\r\nData Volume & Quality: Ensuring enough high-quality, representative code is used for effective fine-tuning.\r\nComputational Resources: Fine-tuning can be resource-intensive. We need to assess if local machines are sufficient or if a dedicated training server is needed.\r\nModel Performance: Achieving a good balance between model size, inference speed, and the quality of assistance.\r\nMaintenance: The model may need to be periodically retrained as the codebase evolves.",
87 "contribution": "_No response_",
88 "timeCommitment": "Medium (4-8 hours/week)",