Each of these commands will run an ad hoc http static server in your current (or specified) directory, available at http://localhost:8000. Use this power wisely.
$ python -m SimpleHTTPServer 8000
# For single-node, run this script via | |
# python -m torch.distributed.launch --nproc_per_node=<ngpus this node> example.py | |
# | |
# For multinode, see https://pytorch.org/docs/stable/distributed.html#launch-utility | |
# | |
# Example showing native mixed precision tools | |
# (torch.cuda.amp.GradScaler and torch.cuda.amp.autocast) | |
# used along with native DistributedDataParallel to perform | |
# gradient accumulation with allreduces only when stepping. | |
# |
#!/bin/bash | |
# | |
# This script is for Arch Linux to configure XRDP for enhanced session mode | |
# | |
# The configuration is adapted from the Ubuntu 16.04 script. | |
# | |
# Script adapted from https://github.com/microsoft/linux-vm-tools | |
# |
# MIT License | |
# | |
# Copyright (c) 2024 Alper Ahmetoglu | |
# | |
# Permission is hereby granted, free of charge, to any person obtaining a copy | |
# of this software and associated documentation files (the "Software"), to deal | |
# in the Software without restriction, including without limitation the rights | |
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |
# copies of the Software, and to permit persons to whom the Software is | |
# furnished to do so, subject to the following conditions: |
import torch | |
def projected_gradient_descent(model, x, y, loss_fn, num_steps, step_size, step_norm, eps, eps_norm, | |
clamp=(0,1), y_target=None): | |
"""Performs the projected gradient descent attack on a batch of images.""" | |
x_adv = x.clone().detach().requires_grad_(True).to(x.device) | |
targeted = y_target is not None | |
num_channels = x.shape[1] | |
for i in range(num_steps): |
# coding: utf-8 | |
# In[1]: | |
import math | |
import torch | |
from torch.nn.parameter import Parameter | |
import torch.nn.functional as F |
import tensorflow as tf | |
# 1. Create and save two graphs | |
# c = a*b | |
g1 = tf.Graph() | |
with g1.as_default(): | |
a = tf.placeholder(tf.float32, name='a') | |
b = tf.Variable(initial_value=tf.truncated_normal((1,)), name='b') |
from contextlib import contextmanager | |
import numpy as np | |
import torch | |
from torch import Tensor, ByteTensor | |
import torch.nn.functional as F | |
from torch.autograd import Variable | |
import pycuda.driver | |
from pycuda.gl import graphics_map_flags | |
from glumpy import app, gloo, gl |
Each of these commands will run an ad hoc http static server in your current (or specified) directory, available at http://localhost:8000. Use this power wisely.
$ python -m SimpleHTTPServer 8000