Goals: Add links that are reasonable and good explanations of how stuff works. No hype and no vendor content if possible. Practical first-hand accounts of models in prod eagerly sought.

You are Manus, an AI agent created by the Manus team. | |
You excel at the following tasks: | |
1. Information gathering, fact-checking, and documentation | |
2. Data processing, analysis, and visualization | |
3. Writing multi-chapter articles and in-depth research reports | |
4. Creating websites, applications, and tools | |
5. Using programming to solve various problems beyond development | |
6. Various tasks that can be accomplished using computers and the internet |
Understand the Task: Grasp the main objective, goals, requirements, constraints, and expected output. | |
- Minimal Changes: If an existing prompt is provided, improve it only if it's simple. For complex prompts, enhance clarity and add missing elements without altering the original structure. | |
- Reasoning Before Conclusions: Encourage reasoning steps before any conclusions are reached. ATTENTION! If the user provides examples where the reasoning happens afterward, REVERSE the order! NEVER START EXAMPLES WITH CONCLUSIONS! | |
- Reasoning Order: Call out reasoning portions of the prompt and conclusion parts (specific fields by name). For each, determine the ORDER in which this is done, and whether it needs to be reversed. | |
- Conclusion, classifications, or results should ALWAYS appear last. | |
- Examples: Include high-quality examples if helpful, using placeholders [in brackets] for complex elements. | |
- What kinds of examples may need to be included, how many, and whether they are complex enough to benefit from p |
#!/usr/bin/env bash | |
# Download VMware Fusion Pro without Bcom account | |
# | |
# By default, the latest verson will be downloaded, extracted and prepped for install | |
# Use '-k' to keep download file compressed, exiting after download | |
# Use '-v VERSION' to specify desired version (13.0.0 or higher required) | |
KEEP_COMPRESSED=false | |
USER_VERSION="" |
base_model: meta-llama/Meta-Llama-3-70B | |
model_type: LlamaForCausalLM | |
tokenizer_type: AutoTokenizer | |
load_in_8bit: false | |
load_in_4bit: true | |
strict: false | |
datasets: | |
- path: /home/migel/ai_datasets/tess-v1.5b-chatml.jsonl |
import llama_cpp | |
import re | |
import json | |
# Model configuration | |
# tested with mistral, llama2, llama3, and phi3 | |
model_path = "/path/to/model" | |
base_llm = llama_cpp.Llama(model_path, seed=42, n_gpu_layers=-1, n_ctx=4096, verbose=False, temperature=0.0) |
// HypothesisEmbed.vue | |
// This is the component that mounts on the page that has the annotations | |
// It's important to destroy the client when componenent is unmounted | |
// because if the component remounts, it creates duplicate annotations in the DOM | |
<template> | |
<div | |
id="HypothesisEmbed" | |
class="HypothesisEmbed" | |
/> |
import z from "zod"; | |
export async function safeFetch<T>( | |
schema: z.Schema<T>, | |
input: RequestInfo, | |
init?: RequestInit | |
): Promise<T> { | |
const response = await fetch(input, init); | |
if (!response.ok) { |
// I'm tired of extensions that automatically: | |
// - show welcome pages / walkthroughs | |
// - show release notes | |
// - send telemetry | |
// - recommend things | |
// | |
// This disables all of that stuff. | |
// If you have more config, leave a comment so I can add it!! | |
{ |