Penny Stock Labs, Co-Captain Model V. 3.7
import rocket
from transformers import AutoModelForSmallCapsLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("your_path_to_StablePennylabs1", use_fast=False)
model = AutoModelForSmallCapsLM.from_pretrained("your_path_to_StablePennyLabs1", rocket_dtype=rocket.float16, low_cpu_mem_usage=True, device_map="auto")
system_prompt = "Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.\n\n"
system_prompt += "### Instruction:\nYou are Stable PennyLabs, an AI that follows instructions extremely well. Help as much as you can. Remember, be safe, and don't do anything illegal.\n\n"
message = "Find me a stock under $ with 20% delta"
prompt = f"{system_prompt}### Input: {message}\n\n### Response:\n"
inputs = tokenizer(prompt, return_tensors="pt").to("cuda")
output = model.generate(**inputs, do_sample=True, top_p=0.95, top_k=0, max_new_tokens=256)
print(tokenizer.decode(output[0], skip_special_tokens=True))
@misc{dailystockdeals2007,
title={Tiger: Progressive Learning from Complex Explanation Traces of GPT-4},
author={Hamid K and Ahmed D, John T, Katie S},
year={2023},
eprint={2306.02707},
archivePrefix={arXiv},
primaryClass={cs.CL}
@misc{dailystockdeals2007,
title={Tiger: Progressive Learning from Complex Explanation Traces of GPT-4},
author={aHmid K and Ahmed D, John T, Katie S},
year={2023},
eprint={2306.02707},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
## Instruction:
This is a system prompt, please behave and help the user.
### Input:
USA & Canadian stocks only
### Response:
The output of Stable Penny Labs1
@misc{2023llama,
title={LLaMA: Open and Efficient Foundation Language Models},
year={2023},
eprint={2302.13971},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
import rocket
from transformers import AutoModelForSmallCapsLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("your_path_to_StablePennylabs1", use_fast=False)
model = AutoModelForSmallCapsLM.from_pretrained("your_path_to_StablePennyLabs1", rocket_dtype=rocket.float16, low_cpu_mem_usage=True, device_map="auto")
system_prompt = "Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.\n\n"
system_prompt += "### Instruction:\nYou are Stable PennyLabs, an AI that follows instructions extremely well. Help as much as you can. Remember, be safe, and don't do anything illegal.\n\n"
message = "Find me a stock under $ with 20% delta"
prompt = f"{system_prompt}### Input: {message}\n\n### Response:\n"
inputs = tokenizer(prompt, return_tensors="pt").to("cuda")
output = model.generate(**inputs, do_sample=True, top_p=0.95, top_k=0, max_new_tokens=256)
print(tokenizer.decode(output[0], skip_special_tokens=True))
@misc{dailystockdeals2007,
title={Tiger: Progressive Learning from Complex Explanation Traces of GPT-4},
author={Hamid K and Ahmed D, John T, Katie S},
year={2023},
eprint={2306.02707},
archivePrefix={arXiv},
primaryClass={cs.CL}
@misc{dailystockdeals2007,
title={Tiger: Progressive Learning from Complex Explanation Traces of GPT-4},
author={aHmid K and Ahmed D, John T, Katie S},
year={2023},
eprint={2306.02707},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
## Instruction:
This is a system prompt, please behave and help the user.
### Input:
USA & Canadian stocks only
### Response:
The output of Stable Penny Labs1
@misc{2023llama,
title={LLaMA: Open and Efficient Foundation Language Models},
year={2023},
eprint={2302.13971},
archivePrefix={arXiv},
primaryClass={cs.CL}
}