Val Town Code SearchReturn to Val Town

API Access

You can access search results via JSON API by adding format=json to your query:

https://codesearch.val.run/image-url.jpg?q=function&page=13&format=json

For typeahead suggestions, use the /typeahead endpoint:

https://codesearch.val.run/typeahead?q=function

Returns an array of strings in format "username" or "username/projectName"

Found 28800 results for "function"(599ms)

OpenTelemetryCollectortransformExport.ts10 matches

@synthUpdated 17 hours ago
24 * By convention, we only use string and number values. Complex values are serialized.
25 */
26export async function transformExport(
27 tracesData: IExportTraceServiceRequest,
28): Promise<Array<OtelSpan>> {
107 | Record<string, AttributeValuePrimitive>;
108
109async function mapAttributeValue(value: IAnyValue): Promise<AttributeValue> {
110 if (!value) {
111 return null;
137}
138
139async function mapAttributes(
140 attributes: IKeyValue[],
141): Promise<Record<string, AttributeValue>> {
158}
159
160async function mapEvent(event: IEvent) {
161 return {
162 name: event.name,
166}
167
168async function mapLink(link: ILink) {
169 return {
170 trace_id: stringOrUintToString(link.traceId),
175}
176
177function stringOrUintToString(id: string | Uint8Array) {
178 return id instanceof Uint8Array
179 ? Buffer.from(id).toString("hex")
181}
182
183function mapStatus(status: IStatus) {
184 return {
185 code: status.code,
188}
189
190function statusCodeToString(statusCode: EStatusCode) {
191 switch (statusCode) {
192 case 0:
201}
202
203// Function to convert ESpanKind to SpanKind
204function convertToSpanKind(spanKind: ESpanKind): string {
205 switch (spanKind) {
206 case 1:

OpenTelemetryCollectorqueries.ts1 match

@synthUpdated 17 hours ago
8 * @TODO - Need to implement `end_time` column for this query to work
9 */
10export function findTraces(db: DBType) {
11 const REFERENCE QUERY = `
12SELECT trace_id, MAX(end_time) as end_time

OpenTelemetryCollectoropenapi.ts1 match

@synthUpdated 17 hours ago
3 * with a better experience
4 */
5export function getOpenAPISpec() {
6 return {
7 openapi: "3.0.0",

OpenTelemetryCollectorindex.ts2 matches

@synthUpdated 17 hours ago
47
48/**
49 * Export a function that wraps the incoming request,
50 * then injects the Deno env vars into the Hono app befoe
51 * executing the api entrypoint (`app.fetch`)
52 */
53export default async function(req: Request): Promise<Response> {
54 const env = Deno.env.toObject();
55 // NOTE - Adding the entire env object will also expose the following values to your api handlers:

OpenTelemetryCollectorauth.ts1 match

@synthUpdated 17 hours ago
36 * Token comparison that mitigates against timing based attacks
37 */
38function timingSafeTokenComparison(userToken: string, appToken: string) {
39 const userTokenByes = new TextEncoder().encode(userToken);
40 const appTokenBytes = new TextEncoder().encode(appToken);

oscleanup.tsx3 matches

@dinavinterUpdated 17 hours ago
12}
13
14async function makeGigyaRequest(endpoint, params) {
15 const url = `https://${GIGYA_DOMAIN}/${endpoint}?httpStatusCodes=true`;
16
36 return JSON.parse(text);
37}
38async function confirmCleanup() {
39 console.log("⚠️ WARNING: This script will delete ALL data from Gigya DS!");
40 console.log("This action cannot be undone.");
50}
51
52async function cleanupDSData() {
53 await confirmCleanup();
54

Contextualblob.ts5 matches

@c15rUpdated 18 hours ago
136];
137
138// Helper function to create namespaced key
139function createNamespacedKey(key: string, userId?: string | null): string {
140 if (userId === null || userId === undefined) {
141 // Admin context - use global namespace with admin prefix
146}
147
148// Helper function to extract user keys from namespaced keys
149function extractUserKeys(keys: string[], userId?: string | null): string[] {
150 const namespace = userId === null || userId === undefined ? "admin:" : `user:${userId}:`;
151 console.log(`extractUserKeys() `, keys);
155}
156
157export async function handleBlobTool(toolName: string, params: any, authContext: AuthContext): Promise<any> {
158 const userId = authContext.isAdmin ? null : authContext.user?.id;
159

steel_puppeteer_startermain.tsx1 match

@rishavUpdated 18 hours ago
12});
13
14async function main() {
15 let session;
16 let browser;

linkInBioTemplatenew_call.tsx3 matches

@hackthehackermanUpdated 19 hours ago
7)`);
8
9async function createOrUpdatePerson(phoneNumber: string, fullName: string, zipcode: string) {
10 // Use INSERT OR REPLACE to handle both create and update cases
11 await sqlite.execute({
22);
23
24async function getPersonByPhoneNumber(phoneNumber: string): Promise<{ fullName: string; zipcode: string } | null> {
25 const result = await sqlite.execute({
26 sql: `SELECT full_name, zipcode FROM customer WHERE phone_number = ?`,
68`;
69
70export default async function jsonOkExample(request: Request) {
71 if (request.method !== "POST") {
72 return Response.json(

osdata.json2 matches

@dinavinterUpdated 19 hours ago
50 },
51 {
52 "raw": "### Project Name\n\nDevstral_FT\n\n### Repository URL\n\nhttps://github.TBD/org-name/sap-tuned-devstral\n\n### Project Description\n\nProblem Statement/Motivation: 🎯\r\nCurrently, leveraging Large Language Models (LLMs) for code assistance, generation, and understanding often involves sending proprietary code to third-party services. This presents potential privacy and security concerns. Additionally, reliance on external services can introduce latency and lacks offline capabilities.\r\n\r\nOur services extensive codebase, contained within our Git repositories, represents a valuable dataset. Fine-tuning a capable open-source model like Devstral specifically on our code could provide a highly contextualized, fast, and private LLM assistant. This would empower developers with tailored code suggestions, explanations, and generation capabilities directly within their local development environment.\r\n\r\nGoals/Acceptance Criteria: ✅\r\n\r\nSuccessfully fine-tune a version of the Devstral model on a selected subset of our team's Git repositories.\r\nThe fine-tuned model can be run locally using Ollama on a typical developer machine with acceptable performance (e.g., response time for suggestions < 1-2 seconds).\r\nDevelopers can query the local model from VS Code (e.g., via a compatible extension like Continue or a similar tool) for:\r\nCode completion/suggestion relevant to our codebase.\r\nExplaining snippets of our proprietary code.\r\nGenerating boilerplate or utility functions based on our coding patterns.\r\nThe entire process maintains data privacy by keeping our codebase and the fine-tuned model strictly within our local/private infrastructure.\r\nDocumentation is created for setting up and using the local LLM.\r\n\r\n\r\nHigh-Level Tasks: 🛠️\r\n\r\n[ ] Research & Planning:\r\n[ ] Identify suitable Git repositories and branches for the training dataset.\r\n[ ] Research best practices for preparing code data for fine-tuning (e.g., filtering, formatting, anonymization if needed).\r\n[ ] Evaluate hardware requirements for fine-tuning and local inference.\r\n[ ] Investigate and select the most appropriate VS Code plugin(s) for Ollama integration.\r\n\r\n[ ] Data Preparation:\r\n[ ] Clone selected repositories.\r\n[ ] Implement scripts to clean, preprocess, and format the code data.\r\n\r\n[ ] Model Fine-Tuning:\r\n[ ] Set up the fine-tuning environment for Devstral.\r\n[ ] Perform initial fine-tuning runs and iterate on parameters.\r\n[ ] Evaluate the performance and quality of the fine-tuned model.\r\n\r\n[ ] Deployment & Integration:\r\n[ ] Package the fine-tuned model for Ollama.\r\n[ ] Develop instructions for team members to install Ollama and the custom model.\r\n[ ] Configure VS Code plugin(s) to connect to the local Ollama instance.\r\n\r\n[ ] Testing & Feedback:\r\n[ ] Conduct internal testing with the development team.\r\n[ ] Gather feedback and iterate on the model/setup.\r\n\r\n[ ] Documentation:\r\n[ ] Create user guides for setup and usage.\r\n[ ] Document the fine-tuning process for future reference.\r\n\r\n\r\nConsiderations/Potential Challenges: 🤔\r\nData Volume & Quality: Ensuring enough high-quality, representative code is used for effective fine-tuning.\r\nComputational Resources: Fine-tuning can be resource-intensive. We need to assess if local machines are sufficient or if a dedicated training server is needed.\r\nModel Performance: Achieving a good balance between model size, inference speed, and the quality of assistance.\r\nMaintenance: The model may need to be periodically retrained as the codebase evolves.\n\n### Potential Contribution Areas\n\n_No response_\n\n### Estimated Time Commitment\n\nMedium (4-8 hours/week)\n\n### Required Skills\n\n_No response_\n\n### License\n\n_No response_\n\n### Additional Information\n\n_No response_",
53 "id": 27,
54 "title": "🚀 Project: Fine-tune Devstral for Local, Private Code Assistance via Ollama & VS Code",
84 "projectName": "Devstral_FT",
85 "repositoryUrl": "https://github.TBD/org-name/sap-tuned-devstral",
86 "description": "Problem Statement/Motivation: 🎯\r\nCurrently, leveraging Large Language Models (LLMs) for code assistance, generation, and understanding often involves sending proprietary code to third-party services. This presents potential privacy and security concerns. Additionally, reliance on external services can introduce latency and lacks offline capabilities.\r\n\r\nOur services extensive codebase, contained within our Git repositories, represents a valuable dataset. Fine-tuning a capable open-source model like Devstral specifically on our code could provide a highly contextualized, fast, and private LLM assistant. This would empower developers with tailored code suggestions, explanations, and generation capabilities directly within their local development environment.\r\n\r\nGoals/Acceptance Criteria: ✅\r\n\r\nSuccessfully fine-tune a version of the Devstral model on a selected subset of our team's Git repositories.\r\nThe fine-tuned model can be run locally using Ollama on a typical developer machine with acceptable performance (e.g., response time for suggestions < 1-2 seconds).\r\nDevelopers can query the local model from VS Code (e.g., via a compatible extension like Continue or a similar tool) for:\r\nCode completion/suggestion relevant to our codebase.\r\nExplaining snippets of our proprietary code.\r\nGenerating boilerplate or utility functions based on our coding patterns.\r\nThe entire process maintains data privacy by keeping our codebase and the fine-tuned model strictly within our local/private infrastructure.\r\nDocumentation is created for setting up and using the local LLM.\r\n\r\n\r\nHigh-Level Tasks: 🛠️\r\n\r\n[ ] Research & Planning:\r\n[ ] Identify suitable Git repositories and branches for the training dataset.\r\n[ ] Research best practices for preparing code data for fine-tuning (e.g., filtering, formatting, anonymization if needed).\r\n[ ] Evaluate hardware requirements for fine-tuning and local inference.\r\n[ ] Investigate and select the most appropriate VS Code plugin(s) for Ollama integration.\r\n\r\n[ ] Data Preparation:\r\n[ ] Clone selected repositories.\r\n[ ] Implement scripts to clean, preprocess, and format the code data.\r\n\r\n[ ] Model Fine-Tuning:\r\n[ ] Set up the fine-tuning environment for Devstral.\r\n[ ] Perform initial fine-tuning runs and iterate on parameters.\r\n[ ] Evaluate the performance and quality of the fine-tuned model.\r\n\r\n[ ] Deployment & Integration:\r\n[ ] Package the fine-tuned model for Ollama.\r\n[ ] Develop instructions for team members to install Ollama and the custom model.\r\n[ ] Configure VS Code plugin(s) to connect to the local Ollama instance.\r\n\r\n[ ] Testing & Feedback:\r\n[ ] Conduct internal testing with the development team.\r\n[ ] Gather feedback and iterate on the model/setup.\r\n\r\n[ ] Documentation:\r\n[ ] Create user guides for setup and usage.\r\n[ ] Document the fine-tuning process for future reference.\r\n\r\n\r\nConsiderations/Potential Challenges: 🤔\r\nData Volume & Quality: Ensuring enough high-quality, representative code is used for effective fine-tuning.\r\nComputational Resources: Fine-tuning can be resource-intensive. We need to assess if local machines are sufficient or if a dedicated training server is needed.\r\nModel Performance: Achieving a good balance between model size, inference speed, and the quality of assistance.\r\nMaintenance: The model may need to be periodically retrained as the codebase evolves.",
87 "contribution": "_No response_",
88 "timeCommitment": "Medium (4-8 hours/week)",

getFileEmail4 file matches

@shouserUpdated 1 month ago
A helper function to build a file's email
tuna

tuna8 file matches

@jxnblkUpdated 1 month ago
Simple functional CSS library for Val Town
lost1991
import { OpenAI } from "https://esm.town/v/std/openai"; export default async function(req: Request): Promise<Response> { if (req.method === "OPTIONS") { return new Response(null, { headers: { "Access-Control-Allow-Origin": "*",
webup
LangChain (https://langchain.com) Ambassador, KubeSphere (https://kubesphere.io) Ambassador, CNCF OpenFunction (https://openfunction.dev) TOC Member.